aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.ci/generate_test_report_github.py6
-rw-r--r--.ci/generate_test_report_lib.py164
-rw-r--r--.ci/generate_test_report_lib_test.py370
-rwxr-xr-x.ci/monolithic-linux.sh12
-rwxr-xr-x.ci/monolithic-windows.sh4
-rw-r--r--.ci/utils.sh3
-rw-r--r--.github/workflows/mlir-spirv-tests.yml32
-rw-r--r--.github/workflows/premerge.yaml6
-rw-r--r--bolt/include/bolt/Profile/DataAggregator.h3
-rw-r--r--bolt/lib/Passes/FrameOptimizer.cpp5
-rw-r--r--bolt/lib/Profile/DataAggregator.cpp39
-rw-r--r--bolt/test/AArch64/unsupported-passes.test8
-rw-r--r--bolt/unittests/Core/CMakeLists.txt6
-rw-r--r--bolt/unittests/Profile/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clang-change-namespace/ChangeNamespace.cpp53
-rw-r--r--clang-tools-extra/clang-doc/Serialize.cpp4
-rw-r--r--clang-tools-extra/clang-include-fixer/find-all-symbols/FindAllSymbols.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/CrtpConstructorAccessibilityCheck.cpp6
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.cpp14
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/ForwardDeclarationNamespaceCheck.cpp11
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp18
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp19
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/MoveForwardingReferenceCheck.cpp17
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.cpp15
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.h2
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp10
-rw-r--r--clang-tools-extra/clang-tidy/cppcoreguidelines/NoSuspendWithLockCheck.cpp4
-rw-r--r--clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/cppcoreguidelines/SlicingCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/google/AvoidCStyleCastsCheck.cpp31
-rw-r--r--clang-tools-extra/clang-tidy/google/ExplicitConstructorCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/google/UpgradeGoogletestCaseCheck.cpp9
-rw-r--r--clang-tools-extra/clang-tidy/misc/ConstCorrectnessCheck.cpp11
-rw-r--r--clang-tools-extra/clang-tidy/misc/MisplacedConstCheck.cpp14
-rw-r--r--clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp13
-rw-r--r--clang-tools-extra/clang-tidy/misc/UnusedAliasDeclsCheck.cpp10
-rw-r--r--clang-tools-extra/clang-tidy/misc/UnusedUsingDeclsCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/modernize/DeprecatedIosBaseAliasesCheck.cpp11
-rw-r--r--clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/modernize/ReplaceAutoPtrCheck.cpp15
-rw-r--r--clang-tools-extra/clang-tidy/modernize/TypeTraitsCheck.cpp63
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseAutoCheck.cpp18
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp7
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseEmplaceCheck.cpp53
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseScopedLockCheck.cpp43
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseTrailingReturnTypeCheck.cpp93
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseTransparentFunctorsCheck.cpp16
-rw-r--r--clang-tools-extra/clang-tidy/performance/NoAutomaticMoveCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/portability/StdAllocatorConstCheck.cpp11
-rw-r--r--clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp11
-rw-r--r--clang-tools-extra/clang-tidy/readability/StaticAccessedThroughInstanceCheck.cpp26
-rw-r--r--clang-tools-extra/clang-tidy/readability/SuspiciousCallArgumentCheck.cpp4
-rw-r--r--clang-tools-extra/clang-tidy/readability/UseStdMinMaxCheck.cpp6
-rw-r--r--clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp5
-rw-r--r--clang-tools-extra/clang-tidy/utils/Matchers.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp35
-rw-r--r--clang-tools-extra/clang-tidy/utils/TypeTraits.cpp3
-rw-r--r--clang-tools-extra/clangd/AST.cpp94
-rw-r--r--clang-tools-extra/clangd/AST.h3
-rw-r--r--clang-tools-extra/clangd/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clangd/CodeComplete.cpp19
-rw-r--r--clang-tools-extra/clangd/CodeCompletionStrings.cpp60
-rw-r--r--clang-tools-extra/clangd/ConfigFragment.h4
-rw-r--r--clang-tools-extra/clangd/DumpAST.cpp41
-rw-r--r--clang-tools-extra/clangd/FindTarget.cpp123
-rw-r--r--clang-tools-extra/clangd/Headers.cpp7
-rw-r--r--clang-tools-extra/clangd/Hover.cpp302
-rw-r--r--clang-tools-extra/clangd/Hover.h21
-rw-r--r--clang-tools-extra/clangd/IncludeFixer.cpp52
-rw-r--r--clang-tools-extra/clangd/InlayHints.cpp28
-rw-r--r--clang-tools-extra/clangd/Selection.cpp17
-rw-r--r--clang-tools-extra/clangd/SemanticHighlighting.cpp15
-rw-r--r--clang-tools-extra/clangd/SymbolDocumentation.cpp297
-rw-r--r--clang-tools-extra/clangd/SymbolDocumentation.h155
-rw-r--r--clang-tools-extra/clangd/XRefs.cpp13
-rw-r--r--clang-tools-extra/clangd/refactor/tweaks/AddUsing.cpp121
-rw-r--r--clang-tools-extra/clangd/refactor/tweaks/ExtractFunction.cpp7
-rw-r--r--clang-tools-extra/clangd/refactor/tweaks/PopulateSwitch.cpp2
-rw-r--r--clang-tools-extra/clangd/support/Markup.cpp7
-rw-r--r--clang-tools-extra/clangd/unittests/ASTTests.cpp7
-rw-r--r--clang-tools-extra/clangd/unittests/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp4
-rw-r--r--clang-tools-extra/clangd/unittests/DumpASTTests.cpp15
-rw-r--r--clang-tools-extra/clangd/unittests/FindTargetTests.cpp4
-rw-r--r--clang-tools-extra/clangd/unittests/HeadersTests.cpp11
-rw-r--r--clang-tools-extra/clangd/unittests/HoverTests.cpp274
-rw-r--r--clang-tools-extra/clangd/unittests/InlayHintTests.cpp9
-rw-r--r--clang-tools-extra/clangd/unittests/QualityTests.cpp14
-rw-r--r--clang-tools-extra/clangd/unittests/SelectionTests.cpp4
-rw-r--r--clang-tools-extra/clangd/unittests/SymbolDocumentationTests.cpp161
-rw-r--r--clang-tools-extra/clangd/unittests/support/MarkupTests.cpp8
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst8
-rw-r--r--clang-tools-extra/include-cleaner/include/clang-include-cleaner/Types.h2
-rw-r--r--clang-tools-extra/include-cleaner/lib/WalkAST.cpp28
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/copy-constructor-init.cpp1
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.c22
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.cpp22
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp6
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/portability/std-allocator-const.cpp22
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/container-size-empty.cpp13
-rw-r--r--clang/bindings/python/tests/cindex/test_type.py2
-rw-r--r--clang/docs/ClangOffloadBundler.rst8
-rw-r--r--clang/docs/OpenMPSupport.rst6
-rw-r--r--clang/docs/ReleaseNotes.rst31
-rw-r--r--clang/include/clang/AST/ASTConcept.h9
-rw-r--r--clang/include/clang/AST/ASTContext.h203
-rw-r--r--clang/include/clang/AST/ASTImporter.h2
-rw-r--r--clang/include/clang/AST/ASTNodeTraverser.h23
-rw-r--r--clang/include/clang/AST/ASTTypeTraits.h15
-rw-r--r--clang/include/clang/AST/AbstractBasicReader.h39
-rw-r--r--clang/include/clang/AST/AbstractBasicWriter.h35
-rw-r--r--clang/include/clang/AST/CanonicalType.h11
-rw-r--r--clang/include/clang/AST/Comment.h21
-rw-r--r--clang/include/clang/AST/CommentSema.h1
-rw-r--r--clang/include/clang/AST/Decl.h65
-rw-r--r--clang/include/clang/AST/DeclBase.h16
-rw-r--r--clang/include/clang/AST/DeclCXX.h53
-rw-r--r--clang/include/clang/AST/DeclObjC.h3
-rw-r--r--clang/include/clang/AST/DeclTemplate.h44
-rw-r--r--clang/include/clang/AST/DependenceFlags.h2
-rw-r--r--clang/include/clang/AST/DynamicRecursiveASTVisitor.h13
-rw-r--r--clang/include/clang/AST/Expr.h15
-rw-r--r--clang/include/clang/AST/ExprCXX.h8
-rw-r--r--clang/include/clang/AST/JSONNodeDumper.h1
-rw-r--r--clang/include/clang/AST/NestedNameSpecifier.h665
-rw-r--r--clang/include/clang/AST/NestedNameSpecifierBase.h586
-rw-r--r--clang/include/clang/AST/ODRHash.h2
-rw-r--r--clang/include/clang/AST/PrettyPrinter.h11
-rw-r--r--clang/include/clang/AST/PropertiesBase.td5
-rw-r--r--clang/include/clang/AST/QualTypeNames.h10
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h280
-rw-r--r--clang/include/clang/AST/TemplateBase.h40
-rw-r--r--clang/include/clang/AST/TemplateName.h30
-rw-r--r--clang/include/clang/AST/TextNodeDumper.h2
-rw-r--r--clang/include/clang/AST/Type.h766
-rw-r--r--clang/include/clang/AST/TypeLoc.h402
-rw-r--r--clang/include/clang/AST/TypeProperties.td140
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchers.h159
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchersInternal.h20
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/ASTOps.h10
-rw-r--r--clang/include/clang/Basic/ABIVersions.def135
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td46
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td6
-rw-r--r--clang/include/clang/Basic/Features.def8
-rw-r--r--clang/include/clang/Basic/LangOptions.h93
-rw-r--r--clang/include/clang/Basic/TypeNodes.td3
-rw-r--r--clang/include/clang/Basic/arm_sve.td4
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRAttrs.td32
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRDialect.td1
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td93
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h2
-rw-r--r--clang/include/clang/Driver/OffloadBundler.h2
-rw-r--r--clang/include/clang/ExtractAPI/DeclarationFragments.h5
-rw-r--r--clang/include/clang/Interpreter/RemoteJITUtils.h12
-rw-r--r--clang/include/clang/Sema/CodeCompleteConsumer.h7
-rw-r--r--clang/include/clang/Sema/DeclSpec.h36
-rw-r--r--clang/include/clang/Sema/HeuristicResolver.h3
-rw-r--r--clang/include/clang/Sema/ParsedTemplate.h26
-rw-r--r--clang/include/clang/Sema/Sema.h38
-rw-r--r--clang/include/clang/Sema/SemaInternal.h8
-rw-r--r--clang/include/clang/Sema/SemaWasm.h3
-rw-r--r--clang/include/clang/Sema/TypoCorrection.h29
-rw-r--r--clang/include/clang/Serialization/ASTRecordReader.h2
-rw-r--r--clang/include/clang/Serialization/ASTRecordWriter.h5
-rw-r--r--clang/include/clang/Serialization/TypeBitCodes.def1
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/Checker.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/CheckerManager.h11
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h2
-rw-r--r--clang/include/clang/Tooling/Refactoring/Lookup.h3
-rw-r--r--clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h16
-rw-r--r--clang/lib/AST/APValue.cpp3
-rw-r--r--clang/lib/AST/ASTConcept.cpp11
-rw-r--r--clang/lib/AST/ASTContext.cpp1079
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp49
-rw-r--r--clang/lib/AST/ASTDumper.cpp4
-rw-r--r--clang/lib/AST/ASTImporter.cpp302
-rw-r--r--clang/lib/AST/ASTImporterLookupTable.cpp16
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp186
-rw-r--r--clang/lib/AST/ASTTypeTraits.cpp36
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp85
-rw-r--r--clang/lib/AST/ByteCode/Compiler.h4
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp4
-rw-r--r--clang/lib/AST/ByteCode/Context.h19
-rw-r--r--clang/lib/AST/ByteCode/Descriptor.cpp15
-rw-r--r--clang/lib/AST/ByteCode/Descriptor.h7
-rw-r--r--clang/lib/AST/ByteCode/Disasm.cpp11
-rw-r--r--clang/lib/AST/ByteCode/DynamicAllocator.cpp62
-rw-r--r--clang/lib/AST/ByteCode/DynamicAllocator.h9
-rw-r--r--clang/lib/AST/ByteCode/EvalEmitter.cpp7
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp258
-rw-r--r--clang/lib/AST/ByteCode/Interp.h54
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.cpp9
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.h58
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp26
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.cpp6
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.h1
-rw-r--r--clang/lib/AST/ByteCode/InterpState.cpp22
-rw-r--r--clang/lib/AST/ByteCode/MemberPointer.h6
-rw-r--r--clang/lib/AST/ByteCode/Pointer.cpp7
-rw-r--r--clang/lib/AST/ByteCode/Pointer.h9
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp13
-rw-r--r--clang/lib/AST/ByteCode/Record.cpp2
-rw-r--r--clang/lib/AST/CXXInheritance.cpp16
-rw-r--r--clang/lib/AST/Comment.cpp2
-rw-r--r--clang/lib/AST/CommentParser.cpp5
-rw-r--r--clang/lib/AST/CommentSema.cpp21
-rw-r--r--clang/lib/AST/ComparisonCategories.cpp2
-rw-r--r--clang/lib/AST/ComputeDependence.cpp26
-rw-r--r--clang/lib/AST/Decl.cpp110
-rw-r--r--clang/lib/AST/DeclBase.cpp40
-rw-r--r--clang/lib/AST/DeclCXX.cpp104
-rw-r--r--clang/lib/AST/DeclPrinter.cpp60
-rw-r--r--clang/lib/AST/DeclTemplate.cpp72
-rw-r--r--clang/lib/AST/DeclarationName.cpp6
-rw-r--r--clang/lib/AST/DynamicRecursiveASTVisitor.cpp53
-rw-r--r--clang/lib/AST/Expr.cpp25
-rw-r--r--clang/lib/AST/ExprCXX.cpp9
-rw-r--r--clang/lib/AST/ExprConcepts.cpp4
-rw-r--r--clang/lib/AST/ExprConstant.cpp89
-rw-r--r--clang/lib/AST/FormatString.cpp10
-rw-r--r--clang/lib/AST/InheritViz.cpp6
-rw-r--r--clang/lib/AST/ItaniumCXXABI.cpp8
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp194
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp36
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp21
-rw-r--r--clang/lib/AST/NestedNameSpecifier.cpp474
-rw-r--r--clang/lib/AST/ODRHash.cpp124
-rw-r--r--clang/lib/AST/OpenMPClause.cpp26
-rw-r--r--clang/lib/AST/ParentMapContext.cpp6
-rw-r--r--clang/lib/AST/PrintfFormatString.cpp2
-rw-r--r--clang/lib/AST/QualTypeNames.cpp368
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp27
-rw-r--r--clang/lib/AST/ScanfFormatString.cpp5
-rw-r--r--clang/lib/AST/StmtPrinter.cpp33
-rw-r--r--clang/lib/AST/StmtProfile.cpp15
-rw-r--r--clang/lib/AST/TemplateBase.cpp32
-rw-r--r--clang/lib/AST/TemplateName.cpp56
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp78
-rw-r--r--clang/lib/AST/Type.cpp474
-rw-r--r--clang/lib/AST/TypeLoc.cpp244
-rw-r--r--clang/lib/AST/TypePrinter.cpp261
-rw-r--r--clang/lib/AST/VTTBuilder.cpp12
-rw-r--r--clang/lib/AST/VTableBuilder.cpp8
-rw-r--r--clang/lib/ASTMatchers/ASTMatchFinder.cpp115
-rw-r--r--clang/lib/ASTMatchers/ASTMatchersInternal.cpp5
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Registry.cpp5
-rw-r--r--clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp6
-rw-r--r--clang/lib/Analysis/ThreadSafety.cpp4
-rw-r--r--clang/lib/Analysis/ThreadSafetyCommon.cpp4
-rw-r--r--clang/lib/Analysis/UnsafeBufferUsage.cpp20
-rw-r--r--clang/lib/Basic/Targets/AVR.h3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp18
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp21
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp71
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h43
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp11
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp36
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h26
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp26
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.cpp33
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h101
-rw-r--r--clang/lib/CIR/CodeGen/TargetInfo.cpp2
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp21
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp29
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h10
-rw-r--r--clang/lib/CodeGen/ABIInfo.cpp2
-rw-r--r--clang/lib/CodeGen/ABIInfoImpl.cpp36
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp3
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp19
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp3
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp19
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp4
-rw-r--r--clang/lib/CodeGen/CGCall.cpp77
-rw-r--r--clang/lib/CodeGen/CGCall.h8
-rw-r--r--clang/lib/CodeGen/CGClass.cpp95
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp127
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp46
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp65
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp35
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp37
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp29
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp121
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp12
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp9
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp4
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp11
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp4
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp35
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp4
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp2
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp4
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp9
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp8
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp8
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h36
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp28
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp13
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp33
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp10
-rw-r--r--clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp3
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp68
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp25
-rw-r--r--clang/lib/CodeGen/SwiftCallingConv.cpp9
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp29
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/X86.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/ARC.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/ARM.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/BPF.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/CSKY.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/Hexagon.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/Lanai.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/LoongArch.cpp10
-rw-r--r--clang/lib/CodeGen/Targets/Mips.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/RISCV.cpp10
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/Sparc.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/SystemZ.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/WebAssembly.cpp3
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp59
-rw-r--r--clang/lib/CodeGen/Targets/XCore.cpp4
-rw-r--r--clang/lib/Driver/Compilation.cpp7
-rw-r--r--clang/lib/Driver/ToolChains/Arch/RISCV.cpp11
-rw-r--r--clang/lib/ExtractAPI/DeclarationFragments.cpp110
-rw-r--r--clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp2
-rw-r--r--clang/lib/Frontend/ASTConsumers.cpp7
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp2
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp87
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp36
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteObjC.cpp24
-rw-r--r--clang/lib/Headers/avx10_2_512bf16intrin.h16
-rw-r--r--clang/lib/Headers/avx10_2bf16intrin.h32
-rw-r--r--clang/lib/Headers/avx2intrin.h26
-rw-r--r--clang/lib/Headers/avx512bitalgintrin.h12
-rw-r--r--clang/lib/Headers/avx512bwintrin.h18
-rw-r--r--clang/lib/Headers/avx512dqintrin.h26
-rw-r--r--clang/lib/Headers/avx512fintrin.h143
-rw-r--r--clang/lib/Headers/avx512vlbitalgintrin.h18
-rw-r--r--clang/lib/Headers/avx512vlfp16intrin.h76
-rw-r--r--clang/lib/Headers/avx512vlintrin.h432
-rw-r--r--clang/lib/Headers/avx512vpopcntdqintrin.h1
-rw-r--r--clang/lib/Headers/avx512vpopcntdqvlintrin.h2
-rw-r--r--clang/lib/Headers/avxintrin.h100
-rw-r--r--clang/lib/Headers/cpuid.h5
-rw-r--r--clang/lib/Headers/emmintrin.h40
-rw-r--r--clang/lib/Headers/fma4intrin.h48
-rw-r--r--clang/lib/Headers/fmaintrin.h48
-rw-r--r--clang/lib/Headers/mmintrin.h55
-rw-r--r--clang/lib/Headers/xmmintrin.h7
-rw-r--r--clang/lib/Index/IndexTypeSourceInfo.cpp48
-rw-r--r--clang/lib/Index/USRGeneration.cpp19
-rw-r--r--clang/lib/InstallAPI/Visitor.cpp8
-rw-r--r--clang/lib/Interpreter/InterpreterValuePrinter.cpp8
-rw-r--r--clang/lib/Interpreter/RemoteJITUtils.cpp31
-rw-r--r--clang/lib/Interpreter/Value.cpp6
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp2
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp13
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp29
-rw-r--r--clang/lib/Parse/ParseTentative.cpp2
-rw-r--r--clang/lib/Parse/Parser.cpp9
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp11
-rw-r--r--clang/lib/Sema/DeclSpec.cpp29
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp23
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h2
-rw-r--r--clang/lib/Sema/HeuristicResolver.cpp50
-rw-r--r--clang/lib/Sema/Sema.cpp21
-rw-r--r--clang/lib/Sema/SemaAccess.cpp42
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp4
-rw-r--r--clang/lib/Sema/SemaBPF.cpp6
-rw-r--r--clang/lib/Sema/SemaCUDA.cpp7
-rw-r--r--clang/lib/Sema/SemaCXXScopeSpec.cpp329
-rw-r--r--clang/lib/Sema/SemaCast.cpp51
-rw-r--r--clang/lib/Sema/SemaChecking.cpp97
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp181
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp35
-rw-r--r--clang/lib/Sema/SemaDecl.cpp591
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp23
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp480
-rw-r--r--clang/lib/Sema/SemaDeclObjC.cpp32
-rw-r--r--clang/lib/Sema/SemaExceptionSpec.cpp5
-rw-r--r--clang/lib/Sema/SemaExpr.cpp125
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp174
-rw-r--r--clang/lib/Sema/SemaExprMember.cpp7
-rw-r--r--clang/lib/Sema/SemaExprObjC.cpp15
-rw-r--r--clang/lib/Sema/SemaFunctionEffects.cpp8
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp40
-rw-r--r--clang/lib/Sema/SemaInit.cpp145
-rw-r--r--clang/lib/Sema/SemaLambda.cpp8
-rw-r--r--clang/lib/Sema/SemaLookup.cpp233
-rw-r--r--clang/lib/Sema/SemaObjC.cpp7
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp4
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp13
-rw-r--r--clang/lib/Sema/SemaOverload.cpp181
-rw-r--r--clang/lib/Sema/SemaPPC.cpp6
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp2
-rw-r--r--clang/lib/Sema/SemaStmt.cpp40
-rw-r--r--clang/lib/Sema/SemaStmtAsm.cpp10
-rw-r--r--clang/lib/Sema/SemaSwift.cpp5
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp563
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp173
-rw-r--r--clang/lib/Sema/SemaTemplateDeductionGuide.cpp59
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp186
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp84
-rw-r--r--clang/lib/Sema/SemaTemplateVariadic.cpp39
-rw-r--r--clang/lib/Sema/SemaType.cpp184
-rw-r--r--clang/lib/Sema/SemaTypeTraits.cpp35
-rw-r--r--clang/lib/Sema/SemaWasm.cpp36
-rw-r--r--clang/lib/Sema/TreeTransform.h990
-rw-r--r--clang/lib/Sema/UsedDeclVisitor.h7
-rw-r--r--clang/lib/Serialization/ASTReader.cpp119
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp26
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp108
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp7
-rw-r--r--clang/lib/Serialization/TemplateArgumentHasher.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp16
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp76
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h57
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp83
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h59
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp51
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp33
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp16
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/MemRegion.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp4
-rw-r--r--clang/lib/Tooling/ASTDiff/ASTDiff.cpp13
-rw-r--r--clang/lib/Tooling/Refactoring/Lookup.cpp14
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp167
-rw-r--r--clang/lib/Tooling/Syntax/BuildTree.cpp171
-rw-r--r--clang/lib/Tooling/Transformer/RangeSelector.cpp8
-rw-r--r--clang/test/AST/ByteCode/arrays.cpp17
-rw-r--r--clang/test/AST/ByteCode/cxx11.cpp30
-rw-r--r--clang/test/AST/ByteCode/cxx23.cpp5
-rw-r--r--clang/test/AST/ByteCode/cxx2a.cpp14
-rw-r--r--clang/test/AST/ByteCode/functions.cpp5
-rw-r--r--clang/test/AST/ByteCode/lifetimes.cpp9
-rw-r--r--clang/test/AST/HLSL/StructuredBuffers-AST.hlsl36
-rw-r--r--clang/test/AST/HLSL/TypedBuffers-AST.hlsl24
-rw-r--r--clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl4
-rw-r--r--clang/test/AST/HLSL/vector-constructors.hlsl8
-rw-r--r--clang/test/AST/arm-mfp8.cpp2
-rw-r--r--clang/test/AST/ast-dump-color.cpp16
-rw-r--r--clang/test/AST/ast-dump-ctad-alias.cpp23
-rw-r--r--clang/test/AST/ast-dump-cxx2b-deducing-this.cpp2
-rw-r--r--clang/test/AST/ast-dump-decl-json.c3
-rw-r--r--clang/test/AST/ast-dump-decl.cpp38
-rw-r--r--clang/test/AST/ast-dump-expr-json.cpp14
-rw-r--r--clang/test/AST/ast-dump-expr.cpp7
-rw-r--r--clang/test/AST/ast-dump-for-range-lifetime.cpp318
-rw-r--r--clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp24
-rw-r--r--clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp2
-rw-r--r--clang/test/AST/ast-dump-record-definition-data-json.cpp5
-rw-r--r--clang/test/AST/ast-dump-records-json.cpp2
-rw-r--r--clang/test/AST/ast-dump-records.c8
-rw-r--r--clang/test/AST/ast-dump-records.cpp8
-rw-r--r--clang/test/AST/ast-dump-recovery.cpp6
-rw-r--r--clang/test/AST/ast-dump-stmt-json.cpp86
-rw-r--r--clang/test/AST/ast-dump-stmt.m2
-rw-r--r--clang/test/AST/ast-dump-template-decls.cpp3
-rw-r--r--clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp112
-rw-r--r--clang/test/AST/ast-dump-template-name.cpp47
-rw-r--r--clang/test/AST/ast-dump-templates.cpp225
-rw-r--r--clang/test/AST/ast-dump-traits.cpp5
-rw-r--r--clang/test/AST/ast-dump-types-json.cpp77
-rw-r--r--clang/test/AST/ast-dump-using-template.cpp38
-rw-r--r--clang/test/AST/ast-dump-using.cpp16
-rw-r--r--clang/test/AST/attr-swift_attr.m2
-rw-r--r--clang/test/AST/coroutine-locals-cleanup.cpp4
-rw-r--r--clang/test/AST/cxx2c-variadic-friends.cpp4
-rw-r--r--clang/test/AST/deduction-guides.cpp18
-rw-r--r--clang/test/AST/float16.cpp2
-rw-r--r--clang/test/AST/sourceranges.cpp6
-rw-r--r--clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp8
-rw-r--r--clang/test/Analysis/anonymous-decls.cpp4
-rw-r--r--clang/test/Analysis/malloc-checker-arg-uaf.c44
-rw-r--r--clang/test/C/C11/n1285_1.c44
-rw-r--r--clang/test/CIR/CodeGen/builtin_call.cpp32
-rw-r--r--clang/test/CIR/CodeGen/complex-compound-assignment.cpp208
-rw-r--r--clang/test/CIR/CodeGen/module-asm.c6
-rw-r--r--clang/test/CIR/IR/invalid-vtable.cir9
-rw-r--r--clang/test/CIR/IR/vtable-addrpt.cir23
-rw-r--r--clang/test/CIR/Lowering/module-asm.cir11
-rw-r--r--clang/test/CMakeLists.txt1
-rw-r--r--clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp2
-rw-r--r--clang/test/CXX/class.access/p6.cpp2
-rw-r--r--clang/test/CXX/class.derived/class.derived.general/p2.cpp8
-rw-r--r--clang/test/CXX/class/class.mem/class.mem.general/p8.cpp8
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp2
-rw-r--r--clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp2
-rw-r--r--clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp2
-rw-r--r--clang/test/CXX/dcl/dcl.fct/p17.cpp2
-rw-r--r--clang/test/CXX/drs/cwg0xx.cpp6
-rw-r--r--clang/test/CXX/drs/cwg12xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg13xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg2149.cpp8
-rw-r--r--clang/test/CXX/drs/cwg26xx.cpp8
-rw-r--r--clang/test/CXX/drs/cwg28xx.cpp12
-rw-r--r--clang/test/CXX/drs/cwg2xx.cpp10
-rw-r--r--clang/test/CXX/drs/cwg3xx.cpp8
-rw-r--r--clang/test/CXX/drs/cwg4xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg6xx.cpp8
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp2
-rw-r--r--clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp2
-rw-r--r--clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp4
-rw-r--r--clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp2
-rw-r--r--clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp4
-rw-r--r--clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp4
-rw-r--r--clang/test/CXX/temp/temp.param/p15-cxx0x.cpp4
-rw-r--r--clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp40
-rw-r--r--clang/test/CodeGen/2007-06-18-SextAttrAggregate.c2
-rw-r--r--clang/test/CodeGen/AArch64/ABI-align-packed.c56
-rw-r--r--clang/test/CodeGen/AArch64/byval-temp.c24
-rw-r--r--clang/test/CodeGen/AArch64/pure-scalable-args.c10
-rw-r--r--clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c4
-rw-r--r--clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp4
-rw-r--r--clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp8
-rw-r--r--clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c70
-rw-r--r--clang/test/CodeGen/X86/avx-builtins.c32
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c8
-rw-r--r--clang/test/CodeGen/X86/avx512-reduceIntrin.c108
-rw-r--r--clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c111
-rw-r--r--clang/test/CodeGen/X86/avx512bitalg-builtins.c24
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c9
-rw-r--r--clang/test/CodeGen/X86/avx512dq-builtins.c458
-rw-r--r--clang/test/CodeGen/X86/avx512f-builtins.c70
-rw-r--r--clang/test/CodeGen/X86/avx512vl-builtins.c2565
-rw-r--r--clang/test/CodeGen/X86/avx512vlbitalg-builtins.c42
-rw-r--r--clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c19
-rw-r--r--clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c29
-rw-r--r--clang/test/CodeGen/X86/builtin_test_helpers.h30
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c27
-rw-r--r--clang/test/CodeGen/X86/sse2-builtins.c7
-rw-r--r--clang/test/CodeGen/aggregate-assign-call.c22
-rw-r--r--clang/test/CodeGen/attr-counted-by-for-pointers.c8
-rw-r--r--clang/test/CodeGen/attr-counted-by.c8
-rw-r--r--clang/test/CodeGen/builtin-bpf-btf-type-id.c2
-rw-r--r--clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c2
-rw-r--r--clang/test/CodeGen/builtins-wasm.c21
-rw-r--r--clang/test/CodeGen/builtins-x86.c4
-rw-r--r--clang/test/CodeGen/cleanup-destslot-simple.c24
-rw-r--r--clang/test/CodeGen/debug-info-version.c2
-rw-r--r--clang/test/CodeGen/dominating-store-to-return.c8
-rw-r--r--clang/test/CodeGen/lifetime-sanitizer.c8
-rw-r--r--clang/test/CodeGen/lifetime.c8
-rw-r--r--clang/test/CodeGen/lifetime2.c18
-rw-r--r--clang/test/CodeGen/lifetime3.cpp16
-rw-r--r--clang/test/CodeGen/math-libcalls-tbaa.c12
-rw-r--r--clang/test/CodeGen/nofpclass.c8
-rw-r--r--clang/test/CodeGen/target-builtin-noerror.c6
-rw-r--r--clang/test/CodeGen/temporary-lifetime-exceptions.cpp12
-rw-r--r--clang/test/CodeGen/temporary-lifetime.cpp48
-rw-r--r--clang/test/CodeGen/union-tbaa1.c4
-rw-r--r--clang/test/CodeGenCXX/amdgcn_declspec_get.cpp4
-rw-r--r--clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp40
-rw-r--r--clang/test/CodeGenCXX/destructors.cpp14
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp6
-rw-r--r--clang/test/CodeGenCXX/modules-vtable.cppm2
-rw-r--r--clang/test/CodeGenCXX/pr70585.cppm2
-rw-r--r--clang/test/CodeGenCXX/stack-reuse-exceptions.cpp60
-rw-r--r--clang/test/CodeGenCXX/stack-reuse-miscompile.cpp6
-rw-r--r--clang/test/CodeGenObjC/arc-blocks.m18
-rw-r--r--clang/test/CodeGenObjC/arc-precise-lifetime.m68
-rw-r--r--clang/test/CodeGenObjC/arc-ternary-op.m12
-rw-r--r--clang/test/CodeGenObjC/arc.m102
-rw-r--r--clang/test/CodeGenObjC/exceptions.m4
-rw-r--r--clang/test/CodeGenObjCXX/arc-move.mm8
-rw-r--r--clang/test/CodeGenObjCXX/arc-references.mm6
-rw-r--r--clang/test/CodeGenObjCXX/arc.mm6
-rw-r--r--clang/test/CodeGenObjCXX/literals.mm12
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl32
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-printf.cl4
-rw-r--r--clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl28
-rw-r--r--clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl4
-rw-r--r--clang/test/CoverageMapping/logical.cpp22
-rw-r--r--clang/test/DebugInfo/KeyInstructions/lit.local.cfg2
-rw-r--r--clang/test/Driver/clang-offload-bundler-zlib.c24
-rw-r--r--clang/test/Driver/clang-offload-bundler-zstd.c4
-rw-r--r--clang/test/Driver/cuda-detect-path.cu2
-rw-r--r--clang/test/Driver/hipspv-toolchain.hip2
-rw-r--r--clang/test/Driver/ld-path.c2
-rw-r--r--clang/test/Driver/program-path-priority.c2
-rw-r--r--clang/test/Driver/riscv-cpus.c5
-rw-r--r--clang/test/Driver/spirv-toolchain.cl2
-rw-r--r--clang/test/Headers/__clang_hip_cmath.hip16
-rw-r--r--clang/test/Headers/__clang_hip_math.hip160
-rw-r--r--clang/test/Headers/__cpuidex_conflict.c1
-rw-r--r--clang/test/Index/Core/index-instantiated-source.cpp4
-rw-r--r--clang/test/Index/Core/index-source.cpp4
-rw-r--r--clang/test/Index/c-index-api-loadTU-test.m2
-rw-r--r--clang/test/Index/copy-assignment-operator.cpp4
-rw-r--r--clang/test/Index/index-refs.cpp4
-rw-r--r--clang/test/Index/keep-going.cpp4
-rw-r--r--clang/test/Index/move-assignment-operator.cpp2
-rw-r--r--clang/test/Index/opencl-types.cl16
-rw-r--r--clang/test/Index/paren-type.c2
-rw-r--r--clang/test/Index/print-type-size.cpp8
-rw-r--r--clang/test/Index/print-type.c14
-rw-r--r--clang/test/Index/print-type.cpp56
-rw-r--r--clang/test/Index/recursive-cxx-member-calls.cpp20
-rw-r--r--clang/test/Index/redeclarations.cpp2
-rw-r--r--clang/test/Index/skip-parsed-bodies/compile_commands.json6
-rw-r--r--clang/test/Interpreter/simple-exception.cpp2
-rw-r--r--clang/test/Lexer/cross-windows-on-linux.cpp2
-rw-r--r--clang/test/Lexer/has_feature_cfi.c87
-rw-r--r--clang/test/Misc/diag-template-diffing-cxx11.cpp8
-rw-r--r--clang/test/Modules/modules-merge-enum.m24
-rw-r--r--clang/test/Modules/odr_hash.cpp12
-rw-r--r--clang/test/Modules/pr97313.cppm2
-rw-r--r--clang/test/OpenMP/allocate_modifiers_messages.cpp2
-rw-r--r--clang/test/OpenMP/bug54082.c24
-rw-r--r--clang/test/OpenMP/bug56913.c4
-rw-r--r--clang/test/OpenMP/bug57757.cpp2
-rw-r--r--clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp192
-rw-r--r--clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp2
-rw-r--r--clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp2
-rw-r--r--clang/test/PCH/cxx-explicit-specifier.cpp6
-rw-r--r--clang/test/Parser/MicrosoftExtensions.cpp2
-rw-r--r--clang/test/Parser/cxx1z-class-template-argument-deduction.cpp6
-rw-r--r--clang/test/Preprocessor/file_test.c2
-rw-r--r--clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp18
-rw-r--r--clang/test/Sema/builtins-wasm.c17
-rw-r--r--clang/test/SemaCXX/MicrosoftExtensions.cpp8
-rw-r--r--clang/test/SemaCXX/builtin-get-vtable-pointer.cpp2
-rw-r--r--clang/test/SemaCXX/class-base-member-init.cpp2
-rw-r--r--clang/test/SemaCXX/co_await-ast.cpp34
-rw-r--r--clang/test/SemaCXX/compound-literal.cpp34
-rw-r--r--clang/test/SemaCXX/constant-expression.cpp2
-rw-r--r--clang/test/SemaCXX/constructor.cpp2
-rw-r--r--clang/test/SemaCXX/coroutine-allocs.cpp6
-rw-r--r--clang/test/SemaCXX/coroutine-traits-undefined-template.cpp2
-rw-r--r--clang/test/SemaCXX/coroutines.cpp4
-rw-r--r--clang/test/SemaCXX/ctad.cpp4
-rw-r--r--clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp10
-rw-r--r--clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp6
-rw-r--r--clang/test/SemaCXX/cxx23-invalid-constexpr.cpp1
-rw-r--r--clang/test/SemaCXX/cxx2a-destroying-delete.cpp2
-rw-r--r--clang/test/SemaCXX/cxx2b-deducing-this.cpp4
-rw-r--r--clang/test/SemaCXX/cxx2c-variadic-friends.cpp2
-rw-r--r--clang/test/SemaCXX/destructor.cpp6
-rw-r--r--clang/test/SemaCXX/elaborated-type-specifier.cpp2
-rw-r--r--clang/test/SemaCXX/enum-scoped.cpp2
-rw-r--r--clang/test/SemaCXX/err_init_conversion_failed.cpp2
-rw-r--r--clang/test/SemaCXX/gh102293.cpp2
-rw-r--r--clang/test/SemaCXX/incomplete-call.cpp2
-rw-r--r--clang/test/SemaCXX/matrix-casts.cpp4
-rw-r--r--clang/test/SemaCXX/nested-name-spec.cpp8
-rw-r--r--clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp4
-rw-r--r--clang/test/SemaCXX/pseudo-destructors.cpp2
-rw-r--r--clang/test/SemaCXX/return.cpp4
-rw-r--r--clang/test/SemaCXX/static-assert.cpp2
-rw-r--r--clang/test/SemaCXX/sugar-common-types.cpp5
-rw-r--r--clang/test/SemaCXX/sugared-auto.cpp2
-rw-r--r--clang/test/SemaCXX/type-aware-coroutines.cpp8
-rw-r--r--clang/test/SemaCXX/undefined-partial-specialization.cpp2
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-libc-functions.cpp71
-rw-r--r--clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip32
-rw-r--r--clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h2
-rw-r--r--clang/test/SemaObjCXX/arc-bridged-cast.mm4
-rw-r--r--clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm4
-rw-r--r--clang/test/SemaObjCXX/objcbridge-attribute-arc.mm178
-rw-r--r--clang/test/SemaObjCXX/objcbridge-attribute.mm180
-rw-r--r--clang/test/SemaObjCXX/objcbridge-related-attribute.mm12
-rw-r--r--clang/test/SemaObjCXX/objcbridge-static-cast.mm104
-rw-r--r--clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp4
-rw-r--r--clang/test/SemaTemplate/aggregate-deduction-candidate.cpp84
-rw-r--r--clang/test/SemaTemplate/ctad.cpp27
-rw-r--r--clang/test/SemaTemplate/current-instantiation.cpp2
-rw-r--r--clang/test/SemaTemplate/deduction-crash.cpp4
-rw-r--r--clang/test/SemaTemplate/deduction-guide.cpp96
-rw-r--r--clang/test/SemaTemplate/dependent-base-classes.cpp10
-rw-r--r--clang/test/SemaTemplate/dependent-names.cpp2
-rw-r--r--clang/test/SemaTemplate/elaborated-type-specifier.cpp11
-rw-r--r--clang/test/SemaTemplate/instantiate-requires-expr.cpp4
-rw-r--r--clang/test/SemaTemplate/make_integer_seq.cpp162
-rw-r--r--clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp2
-rw-r--r--clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp12
-rw-r--r--clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp6
-rw-r--r--clang/test/SemaTemplate/nested-name-spec-template.cpp15
-rw-r--r--clang/test/SemaTemplate/nested-template.cpp4
-rw-r--r--clang/test/SemaTemplate/overload-candidates.cpp12
-rw-r--r--clang/test/SemaTemplate/temp_arg_nontype.cpp2
-rw-r--r--clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp5
-rw-r--r--clang/test/SemaTemplate/template-id-expr.cpp2
-rw-r--r--clang/test/SemaTemplate/type_pack_element.cpp86
-rw-r--r--clang/test/SemaTemplate/typename-specifier-4.cpp2
-rw-r--r--clang/test/SemaTemplate/typename-specifier.cpp6
-rw-r--r--clang/test/lit.site.cfg.py.in1
-rw-r--r--clang/tools/libclang/CIndex.cpp168
-rw-r--r--clang/tools/libclang/CIndexCodeCompletion.cpp4
-rw-r--r--clang/tools/libclang/CXCursor.cpp8
-rw-r--r--clang/tools/libclang/CXIndexDataConsumer.cpp25
-rw-r--r--clang/tools/libclang/CXType.cpp36
-rw-r--r--clang/tools/libclang/CursorVisitor.h4
-rw-r--r--clang/unittests/AST/ASTContextParentMapTest.cpp10
-rw-r--r--clang/unittests/AST/ASTExprTest.cpp4
-rw-r--r--clang/unittests/AST/ASTImporterFixtures.h3
-rw-r--r--clang/unittests/AST/ASTImporterTest.cpp132
-rw-r--r--clang/unittests/AST/DeclPrinterTest.cpp62
-rw-r--r--clang/unittests/AST/DeclTest.cpp2
-rw-r--r--clang/unittests/AST/ProfilingTest.cpp8
-rw-r--r--clang/unittests/AST/RandstructTest.cpp2
-rw-r--r--clang/unittests/AST/RecursiveASTVisitorTest.cpp5
-rw-r--r--clang/unittests/AST/SizelessTypesTest.cpp2
-rw-r--r--clang/unittests/AST/StructuralEquivalenceTest.cpp15
-rw-r--r--clang/unittests/AST/TemplateNameTest.cpp13
-rw-r--r--clang/unittests/AST/TypePrinterTest.cpp13
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp25
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp88
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp245
-rw-r--r--clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp6
-rw-r--r--clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp2
-rw-r--r--clang/unittests/CodeGen/CodeGenExternalTest.cpp5
-rw-r--r--clang/unittests/Index/IndexTests.cpp2
-rw-r--r--clang/unittests/Interpreter/CMakeLists.txt22
-rw-r--r--clang/unittests/Interpreter/InterpreterTest.cpp11
-rw-r--r--clang/unittests/Interpreter/OutOfProcessInterpreterTests.cpp148
-rw-r--r--clang/unittests/Sema/HeuristicResolverTest.cpp32
-rw-r--r--clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp24
-rw-r--r--clang/unittests/StaticAnalyzer/SValTest.cpp16
-rw-r--r--clang/unittests/Tooling/LookupTest.cpp18
-rw-r--r--clang/unittests/Tooling/QualTypeNamesTest.cpp40
-rw-r--r--clang/unittests/Tooling/RangeSelectorTest.cpp6
-rw-r--r--clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp12
-rw-r--r--clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp2
-rw-r--r--clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp19
-rw-r--r--clang/unittests/Tooling/RefactoringTest.cpp9
-rw-r--r--compiler-rt/lib/builtins/aarch64/lse.S4
-rw-r--r--compiler-rt/lib/builtins/aarch64/sme-abi.S4
-rw-r--r--compiler-rt/lib/builtins/assembly.h18
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp12
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S2
-rw-r--r--compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S2
-rw-r--r--compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S2
-rw-r--r--compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp14
-rw-r--r--compiler-rt/lib/xray/xray_fdr_logging.cpp2
-rw-r--r--compiler-rt/test/profile/Linux/coverage_short_circuit.cpp36
-rw-r--r--flang-rt/lib/runtime/CMakeLists.txt32
-rw-r--r--flang-rt/unittests/CMakeLists.txt8
-rw-r--r--flang/CMakeLists.txt2
-rw-r--r--flang/include/flang/Config/config.h.cmake16
-rw-r--r--flang/include/flang/Lower/CUDA.h (renamed from flang/include/flang/Lower/Cuda.h)27
-rw-r--r--flang/include/flang/Lower/ConvertVariable.h6
-rw-r--r--flang/include/flang/Optimizer/Builder/FIRBuilder.h11
-rw-r--r--flang/include/flang/Parser/parse-tree.h6
-rw-r--r--flang/include/flang/Semantics/openmp-utils.h (renamed from flang/lib/Semantics/openmp-utils.h)0
-rw-r--r--flang/lib/Evaluate/tools.cpp2
-rw-r--r--flang/lib/Lower/Allocatable.cpp14
-rw-r--r--flang/lib/Lower/Bridge.cpp5
-rw-r--r--flang/lib/Lower/CMakeLists.txt1
-rw-r--r--flang/lib/Lower/CUDA.cpp157
-rw-r--r--flang/lib/Lower/ConvertVariable.cpp82
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp24
-rw-r--r--flang/lib/Optimizer/Builder/FIRBuilder.cpp8
-rw-r--r--flang/lib/Optimizer/Transforms/StackArrays.cpp4
-rw-r--r--flang/lib/Parser/openmp-parsers.cpp11
-rw-r--r--flang/lib/Parser/unparse.cpp4
-rw-r--r--flang/lib/Semantics/check-omp-atomic.cpp2
-rw-r--r--flang/lib/Semantics/check-omp-loop.cpp2
-rw-r--r--flang/lib/Semantics/check-omp-metadirective.cpp3
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp124
-rw-r--r--flang/lib/Semantics/openmp-utils.cpp2
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp369
-rw-r--r--flang/lib/Semantics/resolve-names.cpp69
-rw-r--r--flang/lib/Semantics/unparse-with-symbols.cpp14
-rw-r--r--flang/module/cudadevice.f904
-rw-r--r--flang/test/Driver/atomic-control-options.f9020
-rw-r--r--flang/test/Driver/linker-flags.f902
-rw-r--r--flang/test/Lower/CUDA/cuda-data-transfer.cuf18
-rw-r--r--flang/test/Lower/CUDA/cuda-device-proc.cuf7
-rw-r--r--flang/test/Lower/CUDA/cuda-set-allocator.cuf42
-rw-r--r--flang/test/Lower/do_loop_unstructured.f9019
-rw-r--r--flang/test/Parser/OpenMP/critical-unparse-with-symbols.f904
-rw-r--r--flang/test/Semantics/OpenMP/sync-critical01.f908
-rw-r--r--flang/test/Semantics/OpenMP/sync-critical02.f908
-rw-r--r--flang/test/Transforms/stack-arrays-lifetime.fir8
-rw-r--r--flang/tools/flang-driver/driver.cpp2
-rw-r--r--libc/config/baremetal/aarch64/entrypoints.txt21
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt21
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt21
-rw-r--r--libc/config/darwin/aarch64/entrypoints.txt21
-rw-r--r--libc/config/darwin/x86_64/entrypoints.txt13
-rw-r--r--libc/config/gpu/amdgpu/entrypoints.txt13
-rw-r--r--libc/config/gpu/nvptx/entrypoints.txt13
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt21
-rw-r--r--libc/config/linux/arm/entrypoints.txt13
-rw-r--r--libc/config/linux/riscv/entrypoints.txt21
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt22
-rw-r--r--libc/config/windows/entrypoints.txt13
-rw-r--r--libc/include/dlfcn.yaml6
-rw-r--r--libc/shared/math.h2
-rw-r--r--libc/shared/math/cbrtf.h23
-rw-r--r--libc/shared/math/cos.h23
-rw-r--r--libc/shared/rpc.h3
-rw-r--r--libc/src/__support/GPU/CMakeLists.txt7
-rw-r--r--libc/src/__support/GPU/allocator.cpp38
-rw-r--r--libc/src/__support/GPU/fixedstack.h111
-rw-r--r--libc/src/__support/math/CMakeLists.txt58
-rw-r--r--libc/src/__support/math/cbrtf.h161
-rw-r--r--libc/src/__support/math/cos.h173
-rw-r--r--libc/src/__support/math/range_reduction_double_common.h (renamed from libc/src/math/generic/range_reduction_double_common.h)24
-rw-r--r--libc/src/__support/math/range_reduction_double_fma.h (renamed from libc/src/math/generic/range_reduction_double_fma.h)18
-rw-r--r--libc/src/__support/math/range_reduction_double_nofma.h (renamed from libc/src/math/generic/range_reduction_double_nofma.h)16
-rw-r--r--libc/src/__support/math/sincos_eval.h (renamed from libc/src/math/generic/sincos_eval.h)8
-rw-r--r--libc/src/dlfcn/CMakeLists.txt5
-rw-r--r--libc/src/dlfcn/dladdr.cpp3
-rw-r--r--libc/src/dlfcn/dladdr.h2
-rw-r--r--libc/src/dlfcn/dlinfo.cpp3
-rw-r--r--libc/src/dlfcn/dlinfo.h2
-rw-r--r--libc/src/dlfcn/dlsym.cpp4
-rw-r--r--libc/src/dlfcn/dlsym.h2
-rw-r--r--libc/src/math/CMakeLists.txt17
-rw-r--r--libc/src/math/bf16add.h21
-rw-r--r--libc/src/math/bf16addf.h21
-rw-r--r--libc/src/math/bf16addf128.h21
-rw-r--r--libc/src/math/bf16addl.h21
-rw-r--r--libc/src/math/bf16sub.h21
-rw-r--r--libc/src/math/bf16subf.h21
-rw-r--r--libc/src/math/bf16subf128.h21
-rw-r--r--libc/src/math/bf16subl.h21
-rw-r--r--libc/src/math/ceilbf16.h21
-rw-r--r--libc/src/math/floorbf16.h21
-rw-r--r--libc/src/math/fmaxbf16.h21
-rw-r--r--libc/src/math/fminbf16.h21
-rw-r--r--libc/src/math/generic/CMakeLists.txt280
-rw-r--r--libc/src/math/generic/bf16add.cpp21
-rw-r--r--libc/src/math/generic/bf16addf.cpp21
-rw-r--r--libc/src/math/generic/bf16addf128.cpp21
-rw-r--r--libc/src/math/generic/bf16addl.cpp21
-rw-r--r--libc/src/math/generic/bf16sub.cpp21
-rw-r--r--libc/src/math/generic/bf16subf.cpp21
-rw-r--r--libc/src/math/generic/bf16subf128.cpp21
-rw-r--r--libc/src/math/generic/bf16subl.cpp21
-rw-r--r--libc/src/math/generic/cbrtf.cpp147
-rw-r--r--libc/src/math/generic/ceilbf16.cpp19
-rw-r--r--libc/src/math/generic/cos.cpp155
-rw-r--r--libc/src/math/generic/floorbf16.cpp21
-rw-r--r--libc/src/math/generic/fmaxbf16.cpp21
-rw-r--r--libc/src/math/generic/fminbf16.cpp21
-rw-r--r--libc/src/math/generic/roundbf16.cpp21
-rw-r--r--libc/src/math/generic/roundevenbf16.cpp21
-rw-r--r--libc/src/math/generic/sin.cpp14
-rw-r--r--libc/src/math/generic/sincos.cpp14
-rw-r--r--libc/src/math/generic/tan.cpp7
-rw-r--r--libc/src/math/generic/truncbf16.cpp21
-rw-r--r--libc/src/math/roundbf16.h22
-rw-r--r--libc/src/math/roundevenbf16.h21
-rw-r--r--libc/src/math/truncbf16.h21
-rw-r--r--libc/test/UnitTest/CMakeLists.txt1
-rw-r--r--libc/test/UnitTest/FEnvSafeTest.cpp6
-rw-r--r--libc/test/UnitTest/FEnvSafeTest.h3
-rw-r--r--libc/test/UnitTest/FPMatcher.h11
-rw-r--r--libc/test/integration/src/__support/GPU/CMakeLists.txt13
-rw-r--r--libc/test/integration/src/__support/GPU/fixedstack_test.cpp44
-rw-r--r--libc/test/shared/CMakeLists.txt2
-rw-r--r--libc/test/shared/shared_math_test.cpp4
-rw-r--r--libc/test/src/math/CMakeLists.txt112
-rw-r--r--libc/test/src/math/bf16add_test.cpp14
-rw-r--r--libc/test/src/math/bf16addf128_test.cpp14
-rw-r--r--libc/test/src/math/bf16addf_test.cpp14
-rw-r--r--libc/test/src/math/bf16addl_test.cpp14
-rw-r--r--libc/test/src/math/bf16sub_test.cpp14
-rw-r--r--libc/test/src/math/bf16subf128_test.cpp14
-rw-r--r--libc/test/src/math/bf16subf_test.cpp14
-rw-r--r--libc/test/src/math/bf16subl_test.cpp14
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt223
-rw-r--r--libc/test/src/math/smoke/CeilTest.h4
-rw-r--r--libc/test/src/math/smoke/FloorTest.h5
-rw-r--r--libc/test/src/math/smoke/RoundTest.h6
-rw-r--r--libc/test/src/math/smoke/TruncTest.h4
-rw-r--r--libc/test/src/math/smoke/bf16add_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16addf128_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16addf_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16addl_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16sub_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16subf128_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16subf_test.cpp14
-rw-r--r--libc/test/src/math/smoke/bf16subl_test.cpp14
-rw-r--r--libc/test/src/math/smoke/ceilbf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/floorbf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/fmaxbf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/fminbf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/roundbf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/roundevenbf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/truncbf16_test.cpp14
-rw-r--r--libc/utils/MPFRWrapper/MPFRUtils.cpp35
-rw-r--r--libc/utils/hdrgen/hdrgen/header.py5
-rw-r--r--libclc/CMakeLists.txt1
-rw-r--r--libclc/clc/include/clc/shared/binary_decl_with_scalar_second_arg.inc15
-rw-r--r--libcxx/docs/Status/Cxx2cIssues.csv2
-rw-r--r--libcxx/include/CMakeLists.txt2
-rw-r--r--libcxx/include/__expected/expected.h3
-rw-r--r--libcxx/include/__fwd/map.h31
-rw-r--r--libcxx/include/__fwd/set.h30
-rw-r--r--libcxx/include/__tree37
-rw-r--r--libcxx/include/map10
-rw-r--r--libcxx/include/module.modulemap.in2
-rw-r--r--libcxx/include/set6
-rw-r--r--libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add.verify.cpp4
-rw-r--r--libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add_explicit.verify.cpp4
-rw-r--r--libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub.verify.cpp4
-rw-r--r--libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub_explicit.verify.cpp4
-rw-r--r--libcxx/test/std/containers/associative/map/map.nonmember/compare.three_way.verify.cpp4
-rw-r--r--libcxx/test/std/containers/associative/map/map.ops/count0.pass.cpp4
-rw-r--r--libcxx/test/std/containers/associative/map/map.ops/equal_range0.pass.cpp7
-rw-r--r--libcxx/test/std/containers/associative/map/map.ops/find0.pass.cpp5
-rw-r--r--libcxx/test/std/containers/associative/map/map.ops/lower_bound0.pass.cpp5
-rw-r--r--libcxx/test/std/containers/associative/map/map.ops/upper_bound0.pass.cpp5
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.nonmember/compare.three_way.verify.cpp4
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.ops/count0.pass.cpp4
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.ops/equal_range0.pass.cpp7
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.ops/find0.pass.cpp5
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.ops/lower_bound0.pass.cpp5
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.ops/upper_bound0.pass.cpp5
-rw-r--r--libcxx/test/std/containers/sequences/array/array.overview/nttp.verify.cpp4
-rw-r--r--libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/params.verify.cpp2
-rw-r--r--libcxx/test/std/thread/futures/futures.task/futures.task.members/ctor1.verify.cpp4
-rw-r--r--libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp11
-rw-r--r--libcxx/test/std/utilities/utility/pairs/pairs.pair/nttp.verify.cpp4
-rw-r--r--libcxx/test/support/is_transparent.h11
-rw-r--r--lld/ELF/Arch/LoongArch.cpp3
-rw-r--r--lld/ELF/Arch/RISCV.cpp127
-rw-r--r--lld/ELF/BPSectionOrderer.cpp10
-rw-r--r--lld/ELF/LinkerScript.cpp14
-rw-r--r--lld/ELF/OutputSections.cpp14
-rw-r--r--lld/ELF/Target.h3
-rw-r--r--lld/ELF/Writer.cpp4
-rw-r--r--lld/MachO/Driver.cpp26
-rw-r--r--lld/test/ELF/bp-section-orderer.s90
-rw-r--r--lld/test/ELF/riscv-relax-align.s10
-rw-r--r--lld/test/ELF/riscv-relax-emit-relocs.s4
-rw-r--r--lld/test/ELF/riscv-relocatable-align.s131
-rw-r--r--lld/test/MachO/stabs.s16
-rw-r--r--lldb/include/lldb/API/SBTarget.h8
-rw-r--r--lldb/include/lldb/Breakpoint/BreakpointResolver.h9
-rw-r--r--lldb/include/lldb/Breakpoint/BreakpointResolverName.h2
-rw-r--r--lldb/include/lldb/Core/Disassembler.h2
-rw-r--r--lldb/include/lldb/Protocol/MCP/Protocol.h2
-rw-r--r--lldb/include/lldb/Protocol/MCP/Server.h70
-rw-r--r--lldb/include/lldb/Symbol/Symbol.h2
-rw-r--r--lldb/include/lldb/Target/Target.h11
-rw-r--r--lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py39
-rw-r--r--lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py22
-rw-r--r--lldb/source/API/SBTarget.cpp46
-rw-r--r--lldb/source/Breakpoint/BreakpointResolver.cpp39
-rw-r--r--lldb/source/Breakpoint/BreakpointResolverAddress.cpp5
-rw-r--r--lldb/source/Breakpoint/BreakpointResolverName.cpp15
-rw-r--r--lldb/source/Core/DemangledNameInfo.cpp11
-rw-r--r--lldb/source/Core/Disassembler.cpp10
-rw-r--r--lldb/source/Core/DynamicLoader.cpp2
-rw-r--r--lldb/source/Core/Mangled.cpp3
-rw-r--r--lldb/source/Expression/IRExecutionUnit.cpp2
-rw-r--r--lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp3
-rw-r--r--lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOS.cpp7
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp37
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp5
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp4
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangPersistentVariables.cpp6
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/NameSearchContext.cpp2
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp25
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h4
-rw-r--r--lldb/source/Plugins/Language/ObjC/NSDictionary.cpp3
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp2
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp2
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp3
-rw-r--r--lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h4
-rw-r--r--lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.cpp252
-rw-r--r--lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.h55
-rw-r--r--lldb/source/Plugins/RegisterTypeBuilder/RegisterTypeBuilderClang.cpp4
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp28
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp40
-rw-r--r--lldb/source/Plugins/StructuredData/DarwinLog/StructuredDataDarwinLog.cpp4
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp54
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h7
-rw-r--r--lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp11
-rw-r--r--lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp11
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp243
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h9
-rw-r--r--lldb/source/Protocol/MCP/CMakeLists.txt1
-rw-r--r--lldb/source/Protocol/MCP/Server.cpp236
-rw-r--r--lldb/source/Target/Target.cpp56
-rw-r--r--lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/string/TestDataFormatterStdString.py3
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py7
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp10
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h7
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp1
-rw-r--r--lldb/test/API/tools/lldb-dap/breakpoint-assembly/TestDAP_breakpointAssembly.py76
-rw-r--r--lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/memory/TestDAP_memory.py2
-rw-r--r--lldb/test/Shell/SymbolFile/DWARF/x86/dwp-foreign-type-units.cpp8
-rw-r--r--lldb/test/Shell/SymbolFile/PDB/ast-restore.test6
-rw-r--r--lldb/test/Shell/SymbolFile/PDB/calling-conventions-arm.test5
-rw-r--r--lldb/test/Shell/SymbolFile/PDB/calling-conventions-x86.test12
-rw-r--r--lldb/test/Shell/SymbolFile/PDB/vbases.test1
-rw-r--r--lldb/tools/lldb-dap/Breakpoint.cpp31
-rw-r--r--lldb/tools/lldb-dap/CMakeLists.txt3
-rw-r--r--lldb/tools/lldb-dap/DAP.cpp13
-rw-r--r--lldb/tools/lldb-dap/Handler/SetBreakpointsRequestHandler.cpp1
-rw-r--r--lldb/tools/lldb-dap/Protocol/DAPTypes.cpp36
-rw-r--r--lldb/tools/lldb-dap/Protocol/DAPTypes.h53
-rw-r--r--lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp5
-rw-r--r--lldb/tools/lldb-dap/Protocol/ProtocolTypes.h8
-rw-r--r--lldb/tools/lldb-dap/SourceBreakpoint.cpp101
-rw-r--r--lldb/tools/lldb-dap/SourceBreakpoint.h7
-rw-r--r--lldb/tools/lldb-rpc-gen/lldb-rpc-gen.cpp5
-rw-r--r--lldb/unittests/Core/MangledTest.cpp8
-rw-r--r--lldb/unittests/DAP/ProtocolTypesTest.cpp10
-rw-r--r--lldb/unittests/Host/MainLoopTest.cpp3
-rw-r--r--lldb/unittests/Instruction/ARM64/TestAArch64Emulator.cpp6
-rw-r--r--llvm/CMakeLists.txt14
-rw-r--r--llvm/cmake/modules/HandleLLVMOptions.cmake4
-rw-r--r--llvm/cmake/modules/LLVMProcessSources.cmake15
-rw-r--r--llvm/docs/LangRef.rst73
-rw-r--r--llvm/docs/MergeFunctions.rst38
-rw-r--r--llvm/docs/NVPTXUsage.rst9
-rw-r--r--llvm/docs/ReleaseNotes.md8
-rw-r--r--llvm/docs/SourceLevelDebugging.rst172
-rw-r--r--llvm/include/llvm-c/Core.h1
-rw-r--r--llvm/include/llvm/ADT/DenseMap.h29
-rw-r--r--llvm/include/llvm/ADT/SmallPtrSet.h53
-rw-r--r--llvm/include/llvm/ADT/StringMap.h10
-rw-r--r--llvm/include/llvm/ADT/StringRef.h2
-rw-r--r--llvm/include/llvm/Analysis/DXILResource.h4
-rw-r--r--llvm/include/llvm/Analysis/Delinearization.h29
-rw-r--r--llvm/include/llvm/Analysis/DependenceAnalysis.h1802
-rw-r--r--llvm/include/llvm/Analysis/InlineCost.h6
-rw-r--r--llvm/include/llvm/Analysis/LoopInfo.h5
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h11
-rw-r--r--llvm/include/llvm/AsmParser/LLToken.h1
-rw-r--r--llvm/include/llvm/BinaryFormat/DXContainer.h3
-rw-r--r--llvm/include/llvm/Bitcode/LLVMBitCodes.h3
-rw-r--r--llvm/include/llvm/CodeGen/BasicTTIImpl.h49
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h17
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h4
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h13
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGISel.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGTargetInfo.h4
-rw-r--r--llvm/include/llvm/CodeGen/TargetCallingConv.h17
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h8
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h230
-rw-r--r--llvm/include/llvm/IR/Constants.h2
-rw-r--r--llvm/include/llvm/IR/DebugInfoMetadata.h18
-rw-r--r--llvm/include/llvm/IR/GenericFloatingPointPredicateUtils.h24
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h14
-rw-r--r--llvm/include/llvm/IR/InstVisitor.h1
-rw-r--r--llvm/include/llvm/IR/Instruction.def53
-rw-r--r--llvm/include/llvm/IR/Instructions.h40
-rw-r--r--llvm/include/llvm/IR/Intrinsics.h6
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td16
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td32
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td13
-rw-r--r--llvm/include/llvm/IR/IntrinsicsRISCV.td13
-rw-r--r--llvm/include/llvm/IR/Operator.h31
-rw-r--r--llvm/include/llvm/IR/RuntimeLibcalls.td3
-rw-r--r--llvm/include/llvm/MC/MCSection.h13
-rw-r--r--llvm/include/llvm/ProfileData/InstrProfWriter.h2
-rw-r--r--llvm/include/llvm/SandboxIR/Instruction.h4
-rw-r--r--llvm/include/llvm/SandboxIR/Values.def1
-rw-r--r--llvm/include/llvm/Support/Atomic.h12
-rw-r--r--llvm/include/llvm/Support/DebugLog.h21
-rw-r--r--llvm/include/llvm/Support/GraphWriter.h50
-rw-r--r--llvm/include/llvm/Target/TargetCallingConv.td6
-rw-r--r--llvm/include/llvm/TextAPI/Architecture.def5
-rw-r--r--llvm/include/llvm/Transforms/Utils/Cloning.h33
-rw-r--r--llvm/include/llvm/Transforms/Utils/LoopUtils.h3
-rw-r--r--llvm/lib/Analysis/CaptureTracking.cpp6
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp3
-rw-r--r--llvm/lib/Analysis/Delinearization.cpp200
-rw-r--r--llvm/lib/Analysis/DependenceAnalysis.cpp510
-rw-r--r--llvm/lib/Analysis/Loads.cpp3
-rw-r--r--llvm/lib/Analysis/LoopInfo.cpp22
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp10
-rw-r--r--llvm/lib/Analysis/MemoryLocation.cpp2
-rw-r--r--llvm/lib/Analysis/StackLifetime.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp3
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp6
-rw-r--r--llvm/lib/AsmParser/LLLexer.cpp1
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp2
-rw-r--r--llvm/lib/BinaryFormat/DXContainer.cpp11
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp1
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp1
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp1
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp13
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CallLowering.cpp9
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIRParser.cpp2
-rw-r--r--llvm/lib/CodeGen/RegisterPressure.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp45
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp57
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp25
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h1
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp18
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp9
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp5
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFGdbIndex.cpp22
-rw-r--r--llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp2
-rw-r--r--llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp9
-rw-r--r--llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp11
-rw-r--r--llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp2
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp445
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp26
-rw-r--r--llvm/lib/IR/ConstantFold.cpp1
-rw-r--r--llvm/lib/IR/ConstantRange.cpp1
-rw-r--r--llvm/lib/IR/Constants.cpp20
-rw-r--r--llvm/lib/IR/DebugInfoMetadata.cpp10
-rw-r--r--llvm/lib/IR/Globals.cpp1
-rw-r--r--llvm/lib/IR/IRBuilder.cpp20
-rw-r--r--llvm/lib/IR/Instruction.cpp1
-rw-r--r--llvm/lib/IR/Instructions.cpp52
-rw-r--r--llvm/lib/IR/LLVMContextImpl.h27
-rw-r--r--llvm/lib/IR/Value.cpp46
-rw-r--r--llvm/lib/IR/Verifier.cpp33
-rw-r--r--llvm/lib/MC/MCObjectStreamer.cpp2
-rw-r--r--llvm/lib/MC/MCSection.cpp2
-rw-r--r--llvm/lib/ProfileData/InstrProfWriter.cpp63
-rw-r--r--llvm/lib/SandboxIR/Context.cpp1
-rw-r--r--llvm/lib/SandboxIR/Instruction.cpp3
-rw-r--r--llvm/lib/Support/SmallPtrSet.cpp12
-rw-r--r--llvm/lib/Support/StringRef.cpp6
-rw-r--r--llvm/lib/Support/regcomp.c2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp204
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrFormats.td10
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td4
-rw-r--r--llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp64
-rw-r--r--llvm/lib/Target/AArch64/AArch64Processors.td11
-rw-r--r--llvm/lib/Target/AArch64/AArch64StackTagging.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp99
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h8
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp85
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp38
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURewriteAGPRCopyMFMA.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp37
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.cpp56
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.h43
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp145
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.h9
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp29
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp67
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h13
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h15
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp14
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp140
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h3
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp7
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.h2
-rw-r--r--llvm/lib/Target/AVR/AVRTargetMachine.cpp12
-rw-r--r--llvm/lib/Target/AVR/AVRTargetMachine.h2
-rw-r--r--llvm/lib/Target/AVR/AVRTargetTransformInfo.cpp25
-rw-r--r--llvm/lib/Target/AVR/AVRTargetTransformInfo.h51
-rw-r--r--llvm/lib/Target/AVR/CMakeLists.txt4
-rw-r--r--llvm/lib/Target/DirectX/DXILOpLowering.cpp2
-rw-r--r--llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp98
-rw-r--r--llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp15
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.h2
-rw-r--r--llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp3
-rw-r--r--llvm/lib/Target/Mips/MipsCCState.cpp5
-rw-r--r--llvm/lib/Target/Mips/MipsCCState.h8
-rw-r--r--llvm/lib/Target/Mips/MipsCallLowering.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsCallingConv.td8
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp150
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.h6
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td44
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td62
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h7
-rw-r--r--llvm/lib/Target/PowerPC/PPCCCState.h30
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp13
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp17
-rw-r--r--llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h5
-rw-r--r--llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp6
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp10
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp32
-rw-r--r--llvm/lib/Target/RISCV/RISCVCallingConv.cpp14
-rw-r--r--llvm/lib/Target/RISCV/RISCVCallingConv.h6
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp170
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td5
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td200
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp29
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZCallingConv.h15
-rw-r--r--llvm/lib/Target/SystemZ/SystemZCallingConv.td38
-rw-r--r--llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp3
-rw-r--r--llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h3
-rw-r--r--llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp14
-rw-r--r--llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h3
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp9
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp4
-rw-r--r--llvm/lib/Target/X86/GISel/X86CallLowering.cpp5
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/ExpandVariadics.cpp21
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp48
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp20
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp16
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp38
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/InferAlignment.cpp49
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/NewGVN.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp27
-rw-r--r--llvm/lib/Transforms/Utils/CodeExtractor.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp170
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp138
-rw-r--r--llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp1
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp23
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h10
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp92
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp61
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.h45
-rw-r--r--llvm/test/Analysis/BasicAA/modref.ll10
-rw-r--r--llvm/test/Analysis/BasicAA/phi-values-usage.ll4
-rw-r--r--llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll4
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/arith-fp.ll147
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/extract_float.ll15
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/histograms.ll178
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll68
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll52
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll24
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll4
-rw-r--r--llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll3
-rw-r--r--llvm/test/Analysis/CostModel/X86/free-intrinsics.ll16
-rw-r--r--llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll16
-rw-r--r--llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll16
-rw-r--r--llvm/test/Analysis/DDG/basic-loopnest.ll5
-rw-r--r--llvm/test/Analysis/Delinearization/fixed_size_array.ll499
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/Coupled.ll4
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/DADelin.ll85
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json1
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt1
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt1
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt1
-rw-r--r--llvm/test/Analysis/KernelInfo/openmp/nvptx.ll18
-rw-r--r--llvm/test/Analysis/LazyValueAnalysis/invalidation.ll8
-rw-r--r--llvm/test/Analysis/MemorySSA/lifetime-simple.ll12
-rw-r--r--llvm/test/Analysis/MemorySSA/phi-translation.ll8
-rw-r--r--llvm/test/Analysis/MemorySSA/pr43044.ll2
-rw-r--r--llvm/test/Analysis/MemorySSA/pr49859.ll22
-rw-r--r--llvm/test/Analysis/MemorySSA/renamephis.ll2
-rw-r--r--llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll4
-rw-r--r--llvm/test/Analysis/ScalarEvolution/sdiv.ll8
-rw-r--r--llvm/test/Analysis/ScalarEvolution/srem.ll8
-rw-r--r--llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll8
-rw-r--r--llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll410
-rw-r--r--llvm/test/Analysis/StackSafetyAnalysis/local.ll56
-rw-r--r--llvm/test/Analysis/ValueTracking/pr152700.ll28
-rw-r--r--llvm/test/Assembler/auto_upgrade_intrinsics.ll12
-rw-r--r--llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll17
-rw-r--r--llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll56
-rw-r--r--llvm/test/Assembler/ptrtoaddr-invalid.ll84
-rw-r--r--llvm/test/Assembler/ptrtoaddr.ll27
-rw-r--r--llvm/test/Bitcode/ptrtoaddr.ll27
-rw-r--r--llvm/test/CMakeLists.txt1
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll70
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll (renamed from llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll)172
-rw-r--r--llvm/test/CodeGen/AArch64/abd-combine.ll45
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll38
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll66
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll58
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vmul.ll73
-rw-r--r--llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir20
-rw-r--r--llvm/test/CodeGen/AArch64/lifetime-poison.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll84
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll473
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll1096
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll1219
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll697
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll1194
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll610
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll1168
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll1062
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll757
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll652
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-math.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll1141
-rw-r--r--llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll116
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir608
-rw-r--r--llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir524
-rw-r--r--llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir1044
-rw-r--r--llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll237
-rw-r--r--llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll1011
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll1524
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll705
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll99
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll705
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll218
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll143
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll35
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll23
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll868
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll111
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll868
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll110
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll420
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll420
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll50
-rw-r--r--llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir256
-rw-r--r--llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/packed-fp32.ll917
-rw-r--r--llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll450
-rw-r--r--llvm/test/CodeGen/AMDGPU/saddsat.ll52
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll38
-rw-r--r--llvm/test/CodeGen/AMDGPU/ssubsat.ll378
-rw-r--r--llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/wqm.mir277
-rw-r--r--llvm/test/CodeGen/ARM/scmp.ll48
-rw-r--r--llvm/test/CodeGen/ARM/ucmp.ll36
-rw-r--r--llvm/test/CodeGen/AVR/bug-143247.ll4
-rw-r--r--llvm/test/CodeGen/AVR/cmp.ll15
-rw-r--r--llvm/test/CodeGen/AVR/half.ll534
-rw-r--r--llvm/test/CodeGen/AVR/issue-151080.ll95
-rw-r--r--llvm/test/CodeGen/AVR/llvm.sincos.ll70
-rw-r--r--llvm/test/CodeGen/AVR/load.ll36
-rw-r--r--llvm/test/CodeGen/AVR/shift.ll2
-rw-r--r--llvm/test/CodeGen/AVR/store.ll12
-rw-r--r--llvm/test/CodeGen/BPF/loop-exit-cond.ll16
-rw-r--r--llvm/test/CodeGen/BPF/vla.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll35
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll8
-rw-r--r--llvm/test/CodeGen/DirectX/imad.ll102
-rw-r--r--llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll4
-rw-r--r--llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll8
-rw-r--r--llvm/test/CodeGen/DirectX/legalize-memset.ll26
-rw-r--r--llvm/test/CodeGen/DirectX/umad.ll102
-rw-r--r--llvm/test/CodeGen/Generic/half.ll87
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll113
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll4
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir8
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll80
-rw-r--r--llvm/test/CodeGen/NVPTX/prefetch.ll43
-rw-r--r--llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll1501
-rw-r--r--llvm/test/CodeGen/NVPTX/variadics-lowering.ll16
-rw-r--r--llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir8
-rw-r--r--llvm/test/CodeGen/PowerPC/memintr32.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/memintr64.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert.ll108
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-half-convert.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbkb.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll26
-rw-r--r--llvm/test/CodeGen/Thumb/scmp.ll489
-rw-r--r--llvm/test/CodeGen/Thumb/ucmp.ll445
-rw-r--r--llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll116
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll12
-rw-r--r--llvm/test/CodeGen/WebAssembly/ref-test-func.ll30
-rw-r--r--llvm/test/CodeGen/WebAssembly/returned.ll4
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll109
-rw-r--r--llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll12
-rw-r--r--llvm/test/CodeGen/X86/pr152630.ll34
-rw-r--r--llvm/test/CodeGen/X86/ptrtoaddr.ll113
-rw-r--r--llvm/test/CodeGen/X86/select-optimize.ll6
-rw-r--r--llvm/test/DebugInfo/KeyInstructions/debugify.ll5
-rw-r--r--llvm/test/DebugInfo/KeyInstructions/lit.local.cfg2
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll7
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/lifetime.ll87
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll4
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll8
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll12
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll102
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll8
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll78
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll100
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll100
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/alloca.ll18
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll20
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll100
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll8
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/alloca.ll61
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s74
-rw-r--r--llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s20
-rw-r--r--llvm/test/MC/ELF/many-instructions.s3
-rw-r--r--llvm/test/MC/RISCV/Relocations/align-after-relax.s50
-rw-r--r--llvm/test/MC/RISCV/Relocations/align-norvc.s23
-rw-r--r--llvm/test/MC/RISCV/Relocations/mc-dump.s8
-rw-r--r--llvm/test/MC/RISCV/align-option-relax.s23
-rw-r--r--llvm/test/MC/RISCV/align.s45
-rw-r--r--llvm/test/MC/RISCV/cfi-advance.s28
-rw-r--r--llvm/test/MC/RISCV/large-instructions.s29
-rw-r--r--llvm/test/MC/RISCV/large-instructions.test60
-rw-r--r--llvm/test/MC/RISCV/nop-slide.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/vsetvl-invalid.s24
-rw-r--r--llvm/test/TableGen/intrinsic-attrs.td18
-rw-r--r--llvm/test/Transforms/AddDiscriminators/call.ll8
-rw-r--r--llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll4
-rw-r--r--llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll8
-rw-r--r--llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll44
-rw-r--r--llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll8
-rw-r--r--llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll240
-rw-r--r--llvm/test/Transforms/Attributor/heap_to_stack.ll2
-rw-r--r--llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll2
-rw-r--r--llvm/test/Transforms/Attributor/liveness.ll16
-rw-r--r--llvm/test/Transforms/Attributor/noalias.ll16
-rw-r--r--llvm/test/Transforms/Attributor/openmp_parallel.ll52
-rw-r--r--llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll4
-rw-r--r--llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll88
-rw-r--r--llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll12
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll10
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll4
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink.ll12
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll10
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll12
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll16
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll8
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll8
-rw-r--r--llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-06.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-07.ll6
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-08.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-09.ll57
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll6
-rw-r--r--llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-byval-param.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-elide-musttail.ll2
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-lifetime-end.ll14
-rw-r--r--llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-02.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-dbg.ll4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll22
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail5.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail6.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail7.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll10
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll4
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll8
-rw-r--r--llvm/test/Transforms/DCE/basic.ll8
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll36
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll12
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/dominate.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/libcalls.ll44
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/lifetime.ll16
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll12
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll10
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll4
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/simple.ll12
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll20
-rw-r--r--llvm/test/Transforms/EarlyCSE/memoryssa.ll40
-rw-r--r--llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll16
-rw-r--r--llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll32
-rw-r--r--llvm/test/Transforms/ExpandVariadics/indirect-calls.ll8
-rw-r--r--llvm/test/Transforms/ExpandVariadics/intrinsics.ll44
-rw-r--r--llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll24
-rw-r--r--llvm/test/Transforms/ExpandVariadics/pass-indirect.ll8
-rw-r--r--llvm/test/Transforms/ExpandVariadics/pass-integers.ll60
-rw-r--r--llvm/test/Transforms/FunctionAttrs/nocapture.ll59
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate-2.ll198
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate-add.ll45
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate.ll87
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll61
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-basic-add.ll52
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-jt-add.ll30
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll187
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load-guards.ll139
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll112
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load.ll1089
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll12
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll25
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-poison-add.ll83
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-single-pred.ll90
-rw-r--r--llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll53
-rw-r--r--llvm/test/Transforms/GVN/cond_br2.ll8
-rw-r--r--llvm/test/Transforms/GVN/lifetime-simple.ll36
-rw-r--r--llvm/test/Transforms/GVN/opt-remarks.ll4
-rw-r--r--llvm/test/Transforms/GVN/vscale.ll12
-rw-r--r--llvm/test/Transforms/GVNHoist/pr29034.ll4
-rw-r--r--llvm/test/Transforms/GVNSink/lifetime.ll26
-rw-r--r--llvm/test/Transforms/GlobalOpt/dead-store-status.ll8
-rw-r--r--llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll28
-rw-r--r--llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll28
-rw-r--r--llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll16
-rw-r--r--llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll16
-rw-r--r--llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll6
-rw-r--r--llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll44
-rw-r--r--llvm/test/Transforms/IRNormalizer/reordering-basic.ll14
-rw-r--r--llvm/test/Transforms/IRNormalizer/reordering.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/alloca-addrspace.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/different-intrinsics.ll24
-rw-r--r--llvm/test/Transforms/IROutliner/different-order-phi-merges.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/extraction.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll12
-rw-r--r--llvm/test/Transforms/IROutliner/gvn-phi-debug.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-assumes.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-memcpy.ll24
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-memmove.ll24
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-vaarg.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll20
-rw-r--r--llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/no-external-block-entries.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/outline-memcpy.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outline-memmove.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-bitcasts.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-cost-model.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll32
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll32
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll12
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll8
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll12
-rw-r--r--llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll194
-rw-r--r--llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll4
-rw-r--r--llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll21
-rw-r--r--llvm/test/Transforms/Inline/ML/development-training-log.ll6
-rw-r--r--llvm/test/Transforms/Inline/ML/size-estimator-default.ll4
-rw-r--r--llvm/test/Transforms/Inline/ML/size-estimator-training.ll6
-rw-r--r--llvm/test/Transforms/Inline/ML/state-tracking-coro.ll8
-rw-r--r--llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll18
-rw-r--r--llvm/test/Transforms/Inline/access-attributes-prop.ll16
-rw-r--r--llvm/test/Transforms/Inline/byval-align.ll4
-rw-r--r--llvm/test/Transforms/Inline/byval-tail-call.ll16
-rw-r--r--llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll4
-rw-r--r--llvm/test/Transforms/Inline/byval.ll12
-rw-r--r--llvm/test/Transforms/Inline/callbr.ll8
-rw-r--r--llvm/test/Transforms/Inline/devirtualize-4.ll20
-rw-r--r--llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll4
-rw-r--r--llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll4
-rw-r--r--llvm/test/Transforms/Inline/inline-tail.ll4
-rw-r--r--llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll12
-rw-r--r--llvm/test/Transforms/Inline/lifetime-no-datalayout.ll4
-rw-r--r--llvm/test/Transforms/Inline/lifetime.ll48
-rw-r--r--llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll16
-rw-r--r--llvm/test/Transforms/Inline/noalias-calls-always.ll8
-rw-r--r--llvm/test/Transforms/Inline/noalias-calls.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll15
-rw-r--r--llvm/test/Transforms/InstCombine/assume_inevitable.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/compare-alloca.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/deadcode.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/lifetime.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/lower-dbg-declare.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/malloc-free.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/memcpy-from-global.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll56
-rw-r--r--llvm/test/Transforms/InstCombine/trivial-dse-calls.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/vararg.ll24
-rw-r--r--llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll21
-rw-r--r--llvm/test/Transforms/LICM/dropped-tbaa.ll8
-rw-r--r--llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll8
-rw-r--r--llvm/test/Transforms/LICM/licm-coroutine.ll78
-rw-r--r--llvm/test/Transforms/LICM/loopsink-pr38462.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll21
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll72
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll146
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll47
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll1422
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll64
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll70
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll26
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll57
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll362
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll119
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll87
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll75
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll524
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll246
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll69
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll432
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/f16.ll37
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll204
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll53
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll1242
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll374
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll188
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll99
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll92
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll44
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll82
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll748
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll41
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll488
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll102
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll193
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll236
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll341
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll180
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll90
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll112
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll168
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll168
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll128
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll324
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr35432.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/histograms.ll37
-rw-r--r--llvm/test/Transforms/LoopVectorize/intrinsic.ll201
-rw-r--r--llvm/test/Transforms/LoopVectorize/lifetime.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-assume.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-inductions.ll26
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll66
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-predication.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll14
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll54
-rw-r--r--llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/ignore-droppable.ll12
-rw-r--r--llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll12
-rw-r--r--llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll10
-rw-r--r--llvm/test/Transforms/MemCpyOpt/capturing-func.ll12
-rw-r--r--llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll4
-rw-r--r--llvm/test/Transforms/MemCpyOpt/lifetime.ll48
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll18
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll20
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll16
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll18
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll12
-rw-r--r--llvm/test/Transforms/MemCpyOpt/pr29105.ll20
-rw-r--r--llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll6
-rw-r--r--llvm/test/Transforms/MemCpyOpt/stack-move.ll570
-rw-r--r--llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll4
-rw-r--r--llvm/test/Transforms/MoveAutoInit/clobber.ll20
-rw-r--r--llvm/test/Transforms/NewGVN/coercion-different-ptr.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/cond_br2-xfail.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/lifetime-simple.ll12
-rw-r--r--llvm/test/Transforms/NewGVN/verify-memoryphi.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/vscale.ll8
-rw-r--r--llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll20
-rw-r--r--llvm/test/Transforms/ObjCARC/post-inlining.ll12
-rw-r--r--llvm/test/Transforms/ObjCARC/related-check.ll12
-rw-r--r--llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll20
-rw-r--r--llvm/test/Transforms/OpenMP/nested_parallelism.ll36
-rw-r--r--llvm/test/Transforms/OpenMP/parallel_deletion.ll36
-rw-r--r--llvm/test/Transforms/OpenMP/parallel_region_merging.ll52
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_indirect.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_remarks.ll20
-rw-r--r--llvm/test/Transforms/PGOProfile/consecutive-zeros.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/entry_alloca.ll8
-rw-r--r--llvm/test/Transforms/PGOProfile/memop_size_annotation.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/memop_size_opt.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll20
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch.ll20
-rw-r--r--llvm/test/Transforms/PartialInlining/switch_stmt.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll52
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/reduce_muladd.ll92
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll274
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll72
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll40
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/assume-explosion.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll36
-rw-r--r--llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/vector-select.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll279
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll280
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll137
-rw-r--r--llvm/test/Transforms/SROA/alloca-address-space.ll2
-rw-r--r--llvm/test/Transforms/SROA/basictest.ll34
-rw-r--r--llvm/test/Transforms/SROA/dead-inst.ll12
-rw-r--r--llvm/test/Transforms/SROA/ignore-droppable.ll12
-rw-r--r--llvm/test/Transforms/SROA/lifetime-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/SROA/non-capturing-call-readonly.ll6
-rw-r--r--llvm/test/Transforms/SROA/pr26972.ll4
-rw-r--r--llvm/test/Transforms/SROA/readonlynocapture.ll32
-rw-r--r--llvm/test/Transforms/SROA/select-load.ll14
-rw-r--r--llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/SROA/vector-promotion.ll2
-rw-r--r--llvm/test/Transforms/SafeStack/ARM/debug.ll8
-rw-r--r--llvm/test/Transforms/SafeStack/X86/call.ll8
-rw-r--r--llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll12
-rw-r--r--llvm/test/Transforms/SafeStack/X86/coloring.ll16
-rw-r--r--llvm/test/Transforms/SafeStack/X86/coloring2.ll162
-rw-r--r--llvm/test/Transforms/SafeStack/X86/debug-loc2.ll4
-rw-r--r--llvm/test/Transforms/SafeStack/X86/layout-frag.ll16
-rw-r--r--llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/entry_counts_cold.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/profile-mismatch.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll16
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/remarks.ll12
-rw-r--r--llvm/test/Transforms/Scalarizer/intrinsics.ll47
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll28
-rw-r--r--llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll14
-rw-r--r--llvm/test/Transforms/SimplifyCFG/lifetime.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll20
-rw-r--r--llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll20
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll20
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll20
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll12
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll12
-rw-r--r--llvm/test/Transforms/Util/PredicateInfo/pr33456.ll4
-rw-r--r--llvm/test/Transforms/Util/dbg-call-bitcast.ll12
-rw-r--r--llvm/test/Verifier/intrinsic-immarg.ll20
-rw-r--r--llvm/test/Verifier/opaque-ptr.ll14
-rw-r--r--llvm/test/lit.site.cfg.py.in1
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll12
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected20
-rw-r--r--llvm/test/tools/dxil-dis/lifetimes.ll12
-rw-r--r--llvm/test/tools/llvm-ir2vec/entities.ll79
-rw-r--r--llvm/test/tools/llvm-ir2vec/triplets.ll58
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s1226
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s458
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s154
-rw-r--r--llvm/test/tools/llvm-profdata/merge-traces.proftext54
-rw-r--r--llvm/test/tools/llvm-profdata/read-traces.proftext21
-rw-r--r--llvm/test/tools/llvm-profdata/trace-limit.proftext2
-rw-r--r--llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll8
-rw-r--r--llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll10
-rw-r--r--llvm/tools/llvm-objdump/MachODump.cpp16
-rw-r--r--llvm/tools/llvm-objdump/llvm-objdump.cpp8
-rw-r--r--llvm/unittests/ADT/StringRefTest.cpp13
-rw-r--r--llvm/unittests/Analysis/IR2VecTest.cpp22
-rw-r--r--llvm/unittests/Analysis/MemorySSATest.cpp4
-rw-r--r--llvm/unittests/Analysis/ValueTrackingTest.cpp36
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGTestBase.h7
-rw-r--r--llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp139
-rw-r--r--llvm/unittests/IR/BasicBlockDbgInfoTest.cpp8
-rw-r--r--llvm/unittests/IR/IRBuilderTest.cpp12
-rw-r--r--llvm/unittests/IR/MetadataTest.cpp16
-rw-r--r--llvm/unittests/Support/DebugLogTest.cpp5
-rw-r--r--llvm/unittests/Target/AArch64/AArch64SelectionDAGTest.cpp7
-rw-r--r--llvm/unittests/TextAPI/TextStubV5Tests.cpp27
-rw-r--r--llvm/unittests/Transforms/Utils/CloningTest.cpp4
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanTestBase.h9
-rw-r--r--llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp176
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clangd/unittests/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/test/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn8
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/test/BUILD.gn1
-rw-r--r--llvm/utils/lit/lit/llvm/config.py2
-rw-r--r--mlir/cmake/modules/AddMLIR.cmake44
-rw-r--r--mlir/docs/DialectConversion.md67
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td3
-rw-r--r--mlir/include/mlir/Dialect/CommonFolders.h147
-rw-r--r--mlir/include/mlir/Dialect/GPU/IR/GPUOps.td20
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td14
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td38
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td16
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/Linalg.h194
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml286
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td9
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Passes.td39
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td5
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h4
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/VectorOps.h1
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/VectorOps.td67
-rw-r--r--mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td12
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/CMakeLists.txt6
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h9
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td164
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUDialect.td12
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td171
-rw-r--r--mlir/include/mlir/IR/PatternMatch.h8
-rw-r--r--mlir/include/mlir/Pass/PassOptions.h2
-rw-r--r--mlir/include/mlir/Transforms/DialectConversion.h17
-rw-r--r--mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp28
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp5
-rw-r--r--mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp4
-rw-r--r--mlir/lib/Dialect/AMDGPU/Transforms/FoldMemRefsOps.cpp109
-rw-r--r--mlir/lib/Dialect/EmitC/IR/EmitC.cpp7
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp51
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp9
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp20
-rw-r--r--mlir/lib/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.cpp2
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp441
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp36
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp8
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt2
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp16
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/MorphOps.cpp62
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/NamedToElementwise.cpp98
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp18
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp11
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp22
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp8
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp38
-rw-r--r--mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp42
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/CMakeLists.txt3
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp197
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp22
-rw-r--r--mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp103
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp33
-rw-r--r--mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp14
-rw-r--r--mlir/lib/Target/SPIRV/Serialization/Serializer.cpp33
-rw-r--r--mlir/lib/Transforms/Utils/DialectConversion.cpp287
-rw-r--r--mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py93
-rw-r--r--mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir22
-rw-r--r--mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir14
-rw-r--r--mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir40
-rw-r--r--mlir/test/Dialect/AMDGPU/amdgpu-fold-memrefs.mlir91
-rw-r--r--mlir/test/Dialect/AMDGPU/invalid.mlir8
-rw-r--r--mlir/test/Dialect/AMDGPU/ops.mlir4
-rw-r--r--mlir/test/Dialect/Arith/canonicalize.mlir14
-rw-r--r--mlir/test/Dialect/GPU/ops.mlir12
-rw-r--r--mlir/test/Dialect/GPU/outlining.mlir30
-rw-r--r--mlir/test/Dialect/LLVMIR/call-intrin.mlir9
-rw-r--r--mlir/test/Dialect/LLVMIR/inlining.mlir18
-rw-r--r--mlir/test/Dialect/LLVMIR/mem2reg.mlir35
-rw-r--r--mlir/test/Dialect/LLVMIR/rocdl.mlir7
-rw-r--r--mlir/test/Dialect/LLVMIR/roundtrip.mlir8
-rw-r--r--mlir/test/Dialect/LLVMIR/sroa.mlir2
-rw-r--r--mlir/test/Dialect/Linalg/block-pack-matmul-layout.mlir50
-rw-r--r--mlir/test/Dialect/Linalg/block-pack-matmul.mlir144
-rw-r--r--mlir/test/Dialect/Linalg/elementwise/named-to-elementwise.mlir56
-rw-r--r--mlir/test/Dialect/Linalg/fold-add-into-dest.mlir30
-rw-r--r--mlir/test/Dialect/Linalg/linalg-morph-category-ops.mlir15
-rw-r--r--mlir/test/Dialect/Linalg/linalg-morph-multi-step.mlir14
-rw-r--r--mlir/test/Dialect/Linalg/named-ops.mlir44
-rw-r--r--mlir/test/Dialect/Linalg/rank-reduce-contraction-ops.mlir85
-rw-r--r--mlir/test/Dialect/Linalg/tile-to-forall.mlir2
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir12
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-pad.mlir6
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize-matmul.mlir89
-rw-r--r--mlir/test/Dialect/Linalg/transpose-matmul.mlir38
-rw-r--r--mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir665
-rw-r--r--mlir/test/Dialect/Vector/canonicalize.mlir100
-rw-r--r--mlir/test/Dialect/Vector/invalid.mlir62
-rw-r--r--mlir/test/Dialect/Vector/vector-sink.mlir30
-rw-r--r--mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir8
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/if.mlir53
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir7
-rw-r--r--mlir/test/Dialect/WasmSSA/custom_parser/table.mlir7
-rw-r--r--mlir/test/Dialect/XeGPU/invalid.mlir29
-rw-r--r--mlir/test/Dialect/XeGPU/layout.mlir23
-rw-r--r--mlir/test/Dialect/XeGPU/ops.mlir36
-rw-r--r--mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir37
-rw-r--r--mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir27
-rw-r--r--mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir48
-rw-r--r--mlir/test/Dialect/common_folders.mlir22
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir9
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir8
-rw-r--r--mlir/test/Pass/pipeline-options-parsing.mlir10
-rw-r--r--mlir/test/Target/LLVMIR/Import/intrinsic-prefer-unregistered.ll8
-rw-r--r--mlir/test/Target/LLVMIR/Import/intrinsic.ll12
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir8
-rw-r--r--mlir/test/Target/LLVMIR/nvvmir-invalid.mlir19
-rw-r--r--mlir/test/Target/LLVMIR/nvvmir.mlir11
-rw-r--r--mlir/test/Target/LLVMIR/omptarget-atomic-capture-control-options.mlir44
-rw-r--r--mlir/test/Target/LLVMIR/omptarget-atomic-update-control-options.mlir36
-rw-r--r--mlir/test/Target/LLVMIR/rocdl.mlir17
-rw-r--r--mlir/test/Target/SPIRV/arm-tensor-constant.mlir56
-rw-r--r--mlir/test/Transforms/test-legalizer-fold-after.mlir9
-rw-r--r--mlir/test/Transforms/test-legalizer-fold-before.mlir9
-rw-r--r--mlir/test/Transforms/test-legalizer-no-fold.mlir12
-rw-r--r--mlir/test/Transforms/test-legalizer.mlir17
-rw-r--r--mlir/test/lib/Dialect/Test/TestOps.td26
-rw-r--r--mlir/test/lib/Dialect/Test/TestPatterns.cpp148
-rw-r--r--mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp109
-rw-r--r--mlir/test/lib/Pass/TestPassManager.cpp59
-rw-r--r--mlir/test/python/dialects/linalg/opdsl/test_core_named_ops.py2
-rw-r--r--mlir/utils/tree-sitter-mlir/dialect/linalg.js2
-rw-r--r--mlir/utils/tree-sitter-mlir/queries/highlights.scm2
-rw-r--r--offload/include/Shared/APITypes.h4
-rw-r--r--offload/liboffload/API/Queue.td3
-rw-r--r--offload/liboffload/src/OffloadImpl.cpp47
-rw-r--r--offload/libomptarget/interface.cpp4
-rw-r--r--offload/plugins-nextgen/amdgpu/src/rtl.cpp36
-rw-r--r--offload/plugins-nextgen/common/include/PluginInterface.h33
-rw-r--r--offload/plugins-nextgen/common/src/PluginInterface.cpp38
-rw-r--r--offload/plugins-nextgen/cuda/src/rtl.cpp44
-rw-r--r--offload/plugins-nextgen/host/src/rtl.cpp6
-rw-r--r--offload/unittests/CMakeLists.txt36
-rw-r--r--offload/unittests/Conformance/device_code/CMakeLists.txt8
-rw-r--r--offload/unittests/Conformance/device_code/CUDAMath.cpp178
-rw-r--r--offload/unittests/Conformance/device_code/DeviceAPIs.hpp113
-rw-r--r--offload/unittests/Conformance/device_code/HIPMath.cpp178
-rw-r--r--offload/unittests/Conformance/device_code/KernelRunner.hpp (renamed from offload/unittests/Conformance/device_code/Common.hpp)16
-rw-r--r--offload/unittests/Conformance/device_code/LLVMLibm.cpp5
-rw-r--r--offload/unittests/Conformance/include/mathtest/TestRunner.hpp4
-rw-r--r--offload/unittests/Conformance/tests/CMakeLists.txt5
-rw-r--r--offload/unittests/OffloadAPI/common/Fixtures.hpp18
-rw-r--r--offload/unittests/OffloadAPI/kernel/olLaunchKernel.cpp23
-rw-r--r--offload/unittests/OffloadAPI/queue/olGetQueueInfo.cpp6
-rw-r--r--offload/unittests/OffloadAPI/queue/olGetQueueInfoSize.cpp6
-rw-r--r--utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel3
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel115
-rw-r--r--utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel6
-rw-r--r--utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/config.bzl1
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel20
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel3
-rw-r--r--utils/bazel/third_party_build/zlib-ng.BUILD10
2178 files changed, 68016 insertions, 41460 deletions
diff --git a/.ci/generate_test_report_github.py b/.ci/generate_test_report_github.py
index 4b7f3a2..7242264 100644
--- a/.ci/generate_test_report_github.py
+++ b/.ci/generate_test_report_github.py
@@ -16,11 +16,13 @@ PLATFORM_TITLES = {
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("return_code", help="The build's return code.", type=int)
- parser.add_argument("junit_files", help="Paths to JUnit report files.", nargs="*")
+ parser.add_argument(
+ "build_test_logs", help="Paths to JUnit report files and ninja logs.", nargs="*"
+ )
args = parser.parse_args()
report = generate_test_report_lib.generate_report_from_files(
- PLATFORM_TITLES[platform.system()], args.return_code, args.junit_files
+ PLATFORM_TITLES[platform.system()], args.return_code, args.build_test_logs
)
print(report)
diff --git a/.ci/generate_test_report_lib.py b/.ci/generate_test_report_lib.py
index 25d810f..d868c08 100644
--- a/.ci/generate_test_report_lib.py
+++ b/.ci/generate_test_report_lib.py
@@ -12,6 +12,84 @@ UNRELATED_FAILURES_STR = (
"https://github.com/llvm/llvm-project/issues and add the "
"`infrastructure` label."
)
+# The maximum number of lines to pull from a ninja failure.
+NINJA_LOG_SIZE_THRESHOLD = 500
+
+
+def _parse_ninja_log(ninja_log: list[str]) -> list[tuple[str, str]]:
+ """Parses an individual ninja log."""
+ failures = []
+ index = 0
+ while index < len(ninja_log):
+ while index < len(ninja_log) and not ninja_log[index].startswith("FAILED:"):
+ index += 1
+ if index == len(ninja_log):
+ # We hit the end of the log without finding a build failure, go to
+ # the next log.
+ return failures
+ # We are trying to parse cases like the following:
+ #
+ # [4/5] test/4.stamp
+ # FAILED: touch test/4.stamp
+ # touch test/4.stamp
+ #
+ # index will point to the line that starts with Failed:. The progress
+ # indicator is the line before this ([4/5] test/4.stamp) and contains a pretty
+ # printed version of the target being built (test/4.stamp). We use this line
+ # and remove the progress information to get a succinct name for the target.
+ failing_action = ninja_log[index - 1].split("] ")[1]
+ failure_log = []
+ while (
+ index < len(ninja_log)
+ and not ninja_log[index].startswith("[")
+ and not ninja_log[index].startswith("ninja: build stopped:")
+ and len(failure_log) < NINJA_LOG_SIZE_THRESHOLD
+ ):
+ failure_log.append(ninja_log[index])
+ index += 1
+ failures.append((failing_action, "\n".join(failure_log)))
+ return failures
+
+
+def find_failure_in_ninja_logs(ninja_logs: list[list[str]]) -> list[tuple[str, str]]:
+ """Extracts failure messages from ninja output.
+
+ This function takes stdout/stderr from ninja in the form of a list of files
+ represented as a list of lines. This function then returns tuples containing
+ the name of the target and the error message.
+
+ Args:
+ ninja_logs: A list of files in the form of a list of lines representing the log
+ files captured from ninja.
+
+ Returns:
+ A list of tuples. The first string is the name of the target that failed. The
+ second string is the error message.
+ """
+ failures = []
+ for ninja_log in ninja_logs:
+ log_failures = _parse_ninja_log(ninja_log)
+ failures.extend(log_failures)
+ return failures
+
+
+def _format_ninja_failures(ninja_failures: list[tuple[str, str]]) -> list[str]:
+ """Formats ninja failures into summary views for the report."""
+ output = []
+ for build_failure in ninja_failures:
+ failed_action, failure_message = build_failure
+ output.extend(
+ [
+ "<details>",
+ f"<summary>{failed_action}</summary>",
+ "",
+ "```",
+ failure_message,
+ "```",
+ "</details>",
+ ]
+ )
+ return output
# Set size_limit to limit the byte size of the report. The default is 1MB as this
@@ -24,6 +102,7 @@ def generate_report(
title,
return_code,
junit_objects,
+ ninja_logs: list[list[str]],
size_limit=1024 * 1024,
list_failures=True,
):
@@ -61,15 +140,34 @@ def generate_report(
]
)
else:
- report.extend(
- [
- "The build failed before running any tests.",
- "",
- SEE_BUILD_FILE_STR,
- "",
- UNRELATED_FAILURES_STR,
- ]
- )
+ ninja_failures = find_failure_in_ninja_logs(ninja_logs)
+ if not ninja_failures:
+ report.extend(
+ [
+ "The build failed before running any tests. Detailed "
+ "information about the build failure could not be "
+ "automatically obtained.",
+ "",
+ SEE_BUILD_FILE_STR,
+ "",
+ UNRELATED_FAILURES_STR,
+ ]
+ )
+ else:
+ report.extend(
+ [
+ "The build failed before running any tests. Click on a "
+ "failure below to see the details.",
+ "",
+ ]
+ )
+ report.extend(_format_ninja_failures(ninja_failures))
+ report.extend(
+ [
+ "",
+ UNRELATED_FAILURES_STR,
+ ]
+ )
return "\n".join(report)
tests_passed = tests_run - tests_skipped - tests_failed
@@ -114,14 +212,28 @@ def generate_report(
elif return_code != 0:
# No tests failed but the build was in a failed state. Bring this to the user's
# attention.
- report.extend(
- [
- "",
- "All tests passed but another part of the build **failed**.",
- "",
- SEE_BUILD_FILE_STR,
- ]
- )
+ ninja_failures = find_failure_in_ninja_logs(ninja_logs)
+ if not ninja_failures:
+ report.extend(
+ [
+ "",
+ "All tests passed but another part of the build **failed**. "
+ "Information about the build failure could not be automatically "
+ "obtained.",
+ "",
+ SEE_BUILD_FILE_STR,
+ ]
+ )
+ else:
+ report.extend(
+ [
+ "",
+ "All tests passed but another part of the build **failed**. Click on "
+ "a failure below to see the details.",
+ "",
+ ]
+ )
+ report.extend(_format_ninja_failures(ninja_failures))
if failures or return_code != 0:
report.extend(["", UNRELATED_FAILURES_STR])
@@ -139,9 +251,19 @@ def generate_report(
return report
-def generate_report_from_files(title, return_code, junit_files):
+def generate_report_from_files(title, return_code, build_log_files):
+ junit_files = [
+ junit_file for junit_file in build_log_files if junit_file.endswith(".xml")
+ ]
+ ninja_log_files = [
+ ninja_log for ninja_log in build_log_files if ninja_log.endswith(".log")
+ ]
+ ninja_logs = []
+ for ninja_log_file in ninja_log_files:
+ with open(ninja_log_file, "r") as ninja_log_file_handle:
+ ninja_logs.append(
+ [log_line.strip() for log_line in ninja_log_file_handle.readlines()]
+ )
return generate_report(
- title,
- return_code,
- [JUnitXml.fromfile(p) for p in junit_files],
+ title, return_code, [JUnitXml.fromfile(p) for p in junit_files], ninja_logs
)
diff --git a/.ci/generate_test_report_lib_test.py b/.ci/generate_test_report_lib_test.py
index eda76ead..466a823 100644
--- a/.ci/generate_test_report_lib_test.py
+++ b/.ci/generate_test_report_lib_test.py
@@ -8,6 +8,8 @@
import unittest
from io import StringIO
from textwrap import dedent
+import tempfile
+import os
from junitparser import JUnitXml
@@ -19,9 +21,114 @@ def junit_from_xml(xml):
class TestReports(unittest.TestCase):
+ def test_find_failure_ninja_logs(self):
+ failures = generate_test_report_lib.find_failure_in_ninja_logs(
+ [
+ [
+ "[1/5] test/1.stamp",
+ "[2/5] test/2.stamp",
+ "[3/5] test/3.stamp",
+ "[4/5] test/4.stamp",
+ "FAILED: touch test/4.stamp",
+ "Wow! This system is really broken!",
+ "[5/5] test/5.stamp",
+ ],
+ ]
+ )
+ self.assertEqual(len(failures), 1)
+ self.assertEqual(
+ failures[0],
+ (
+ "test/4.stamp",
+ dedent(
+ """\
+ FAILED: touch test/4.stamp
+ Wow! This system is really broken!"""
+ ),
+ ),
+ )
+
+ def test_no_failure_ninja_log(self):
+ failures = generate_test_report_lib.find_failure_in_ninja_logs(
+ [
+ [
+ "[1/3] test/1.stamp",
+ "[2/3] test/2.stamp",
+ "[3/3] test/3.stamp",
+ ]
+ ]
+ )
+ self.assertEqual(failures, [])
+
+ def test_ninja_log_end(self):
+ failures = generate_test_report_lib.find_failure_in_ninja_logs(
+ [
+ [
+ "[1/3] test/1.stamp",
+ "[2/3] test/2.stamp",
+ "[3/3] test/3.stamp",
+ "FAILED: touch test/3.stamp",
+ "Wow! This system is really broken!",
+ "ninja: build stopped: subcommand failed.",
+ ]
+ ]
+ )
+ self.assertEqual(len(failures), 1)
+ self.assertEqual(
+ failures[0],
+ (
+ "test/3.stamp",
+ dedent(
+ """\
+ FAILED: touch test/3.stamp
+ Wow! This system is really broken!"""
+ ),
+ ),
+ )
+
+ def test_ninja_log_multiple_failures(self):
+ failures = generate_test_report_lib.find_failure_in_ninja_logs(
+ [
+ [
+ "[1/5] test/1.stamp",
+ "[2/5] test/2.stamp",
+ "FAILED: touch test/2.stamp",
+ "Wow! This system is really broken!",
+ "[3/5] test/3.stamp",
+ "[4/5] test/4.stamp",
+ "FAILED: touch test/4.stamp",
+ "Wow! This system is maybe broken!",
+ "[5/5] test/5.stamp",
+ ]
+ ]
+ )
+ self.assertEqual(len(failures), 2)
+ self.assertEqual(
+ failures[0],
+ (
+ "test/2.stamp",
+ dedent(
+ """\
+ FAILED: touch test/2.stamp
+ Wow! This system is really broken!"""
+ ),
+ ),
+ )
+ self.assertEqual(
+ failures[1],
+ (
+ "test/4.stamp",
+ dedent(
+ """\
+ FAILED: touch test/4.stamp
+ Wow! This system is maybe broken!"""
+ ),
+ ),
+ )
+
def test_title_only(self):
self.assertEqual(
- generate_test_report_lib.generate_report("Foo", 0, []),
+ generate_test_report_lib.generate_report("Foo", 0, [], []),
dedent(
"""\
# Foo
@@ -32,12 +139,12 @@ class TestReports(unittest.TestCase):
def test_title_only_failure(self):
self.assertEqual(
- generate_test_report_lib.generate_report("Foo", 1, []),
+ generate_test_report_lib.generate_report("Foo", 1, [], []),
dedent(
"""\
# Foo
- The build failed before running any tests.
+ The build failed before running any tests. Detailed information about the build failure could not be automatically obtained.
Download the build's log file to see the details.
@@ -45,6 +152,45 @@ class TestReports(unittest.TestCase):
),
)
+ def test_title_only_failure_ninja_log(self):
+ self.assertEqual(
+ generate_test_report_lib.generate_report(
+ "Foo",
+ 1,
+ [],
+ [
+ [
+ "[1/5] test/1.stamp",
+ "[2/5] test/2.stamp",
+ "[3/5] test/3.stamp",
+ "[4/5] test/4.stamp",
+ "FAILED: test/4.stamp",
+ "touch test/4.stamp",
+ "Wow! Risk!",
+ "[5/5] test/5.stamp",
+ ]
+ ],
+ ),
+ dedent(
+ """\
+ # Foo
+
+ The build failed before running any tests. Click on a failure below to see the details.
+
+ <details>
+ <summary>test/4.stamp</summary>
+
+ ```
+ FAILED: test/4.stamp
+ touch test/4.stamp
+ Wow! Risk!
+ ```
+ </details>
+
+ If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label."""
+ ),
+ )
+
def test_no_tests_in_testsuite(self):
self.assertEqual(
generate_test_report_lib.generate_report(
@@ -62,12 +208,13 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
),
dedent(
"""\
# Foo
- The build failed before running any tests.
+ The build failed before running any tests. Detailed information about the build failure could not be automatically obtained.
Download the build's log file to see the details.
@@ -93,6 +240,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
),
(
dedent(
@@ -122,6 +270,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
),
(
dedent(
@@ -130,7 +279,7 @@ class TestReports(unittest.TestCase):
* 1 test passed
- All tests passed but another part of the build **failed**.
+ All tests passed but another part of the build **failed**. Information about the build failure could not be automatically obtained.
Download the build's log file to see the details.
@@ -139,6 +288,155 @@ class TestReports(unittest.TestCase):
),
)
+ def test_no_failures_build_failed_ninja_log(self):
+ self.assertEqual(
+ generate_test_report_lib.generate_report(
+ "Foo",
+ 1,
+ [
+ junit_from_xml(
+ dedent(
+ """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuites time="0.00">
+ <testsuite name="Passed" tests="1" failures="0" skipped="0" time="0.00">
+ <testcase classname="Bar/test_1" name="test_1" time="0.00"/>
+ </testsuite>
+ </testsuites>"""
+ )
+ )
+ ],
+ [
+ [
+ "[1/5] test/1.stamp",
+ "[2/5] test/2.stamp",
+ "[3/5] test/3.stamp",
+ "[4/5] test/4.stamp",
+ "FAILED: test/4.stamp",
+ "touch test/4.stamp",
+ "Wow! Close To You!",
+ "[5/5] test/5.stamp",
+ ]
+ ],
+ ),
+ (
+ dedent(
+ """\
+ # Foo
+
+ * 1 test passed
+
+ All tests passed but another part of the build **failed**. Click on a failure below to see the details.
+
+ <details>
+ <summary>test/4.stamp</summary>
+
+ ```
+ FAILED: test/4.stamp
+ touch test/4.stamp
+ Wow! Close To You!
+ ```
+ </details>
+
+ If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label."""
+ )
+ ),
+ )
+
+ def test_no_failures_multiple_build_failed_ninja_log(self):
+ test = generate_test_report_lib.generate_report(
+ "Foo",
+ 1,
+ [
+ junit_from_xml(
+ dedent(
+ """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuites time="0.00">
+ <testsuite name="Passed" tests="1" failures="0" skipped="0" time="0.00">
+ <testcase classname="Bar/test_1" name="test_1" time="0.00"/>
+ </testsuite>
+ </testsuites>"""
+ )
+ )
+ ],
+ [
+ [
+ "[1/5] test/1.stamp",
+ "[2/5] test/2.stamp",
+ "FAILED: touch test/2.stamp",
+ "Wow! Be Kind!",
+ "[3/5] test/3.stamp",
+ "[4/5] test/4.stamp",
+ "FAILED: touch test/4.stamp",
+ "Wow! I Dare You!",
+ "[5/5] test/5.stamp",
+ ]
+ ],
+ )
+ print(test)
+ self.assertEqual(
+ generate_test_report_lib.generate_report(
+ "Foo",
+ 1,
+ [
+ junit_from_xml(
+ dedent(
+ """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuites time="0.00">
+ <testsuite name="Passed" tests="1" failures="0" skipped="0" time="0.00">
+ <testcase classname="Bar/test_1" name="test_1" time="0.00"/>
+ </testsuite>
+ </testsuites>"""
+ )
+ )
+ ],
+ [
+ [
+ "[1/5] test/1.stamp",
+ "[2/5] test/2.stamp",
+ "FAILED: touch test/2.stamp",
+ "Wow! Be Kind!",
+ "[3/5] test/3.stamp",
+ "[4/5] test/4.stamp",
+ "FAILED: touch test/4.stamp",
+ "Wow! I Dare You!",
+ "[5/5] test/5.stamp",
+ ]
+ ],
+ ),
+ (
+ dedent(
+ """\
+ # Foo
+
+ * 1 test passed
+
+ All tests passed but another part of the build **failed**. Click on a failure below to see the details.
+
+ <details>
+ <summary>test/2.stamp</summary>
+
+ ```
+ FAILED: touch test/2.stamp
+ Wow! Be Kind!
+ ```
+ </details>
+ <details>
+ <summary>test/4.stamp</summary>
+
+ ```
+ FAILED: touch test/4.stamp
+ Wow! I Dare You!
+ ```
+ </details>
+
+ If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label."""
+ )
+ ),
+ )
+
def test_report_single_file_single_testsuite(self):
self.assertEqual(
generate_test_report_lib.generate_report(
@@ -166,6 +464,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
),
(
dedent(
@@ -261,6 +560,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
),
self.MULTI_SUITE_OUTPUT,
)
@@ -302,6 +602,7 @@ class TestReports(unittest.TestCase):
)
),
],
+ [],
),
self.MULTI_SUITE_OUTPUT,
)
@@ -326,6 +627,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
list_failures=False,
),
(
@@ -362,6 +664,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
list_failures=False,
),
(
@@ -401,6 +704,7 @@ class TestReports(unittest.TestCase):
)
)
],
+ [],
size_limit=512,
),
(
@@ -416,3 +720,59 @@ class TestReports(unittest.TestCase):
)
),
)
+
+ def test_generate_report_end_to_end(self):
+ with tempfile.TemporaryDirectory() as temp_dir:
+ junit_xml_file = os.path.join(temp_dir, "junit.xml")
+ with open(junit_xml_file, "w") as junit_xml_handle:
+ junit_xml_handle.write(
+ dedent(
+ """\
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuites time="0.00">
+ <testsuite name="Passed" tests="1" failures="0" skipped="0" time="0.00">
+ <testcase classname="Bar/test_1" name="test_1" time="0.00"/>
+ </testsuite>
+ </testsuites>"""
+ )
+ )
+ ninja_log_file = os.path.join(temp_dir, "ninja.log")
+ with open(ninja_log_file, "w") as ninja_log_handle:
+ ninja_log_handle.write(
+ dedent(
+ """\
+ [1/5] test/1.stamp
+ [2/5] test/2.stamp
+ [3/5] test/3.stamp
+ [4/5] test/4.stamp
+ FAILED: test/4.stamp
+ touch test/4.stamp
+ Wow! That's so True!
+ [5/5] test/5.stamp"""
+ )
+ )
+ self.assertEqual(
+ generate_test_report_lib.generate_report_from_files(
+ "Foo", 1, [junit_xml_file, ninja_log_file]
+ ),
+ dedent(
+ """\
+ # Foo
+
+ * 1 test passed
+
+ All tests passed but another part of the build **failed**. Click on a failure below to see the details.
+
+ <details>
+ <summary>test/4.stamp</summary>
+
+ ```
+ FAILED: test/4.stamp
+ touch test/4.stamp
+ Wow! That's so True!
+ ```
+ </details>
+
+ If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label."""
+ ),
+ )
diff --git a/.ci/monolithic-linux.sh b/.ci/monolithic-linux.sh
index b9afc7a..75729b3 100755
--- a/.ci/monolithic-linux.sh
+++ b/.ci/monolithic-linux.sh
@@ -29,7 +29,7 @@ runtime_targets="${4}"
runtime_targets_needs_reconfig="${5}"
enable_cir="${6}"
-lit_args="-v --xunit-xml-output ${BUILD_DIR}/test-results.xml --use-unique-output-file-name --timeout=1200 --time-tests"
+lit_args="-v --xunit-xml-output ${BUILD_DIR}/test-results.xml --use-unique-output-file-name --timeout=1200 --time-tests --succinct"
start-group "CMake"
export PIP_BREAK_SYSTEM_PACKAGES=1
@@ -65,12 +65,12 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
start-group "ninja"
# Targets are not escaped as they are passed as separate arguments.
-ninja -C "${BUILD_DIR}" -k 0 ${targets}
+ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log
if [[ "${runtime_targets}" != "" ]]; then
start-group "ninja Runtimes"
- ninja -C "${BUILD_DIR}" ${runtime_targets}
+ ninja -C "${BUILD_DIR}" ${runtime_targets} |& tee ninja_runtimes.log
fi
# Compiling runtimes with just-built Clang and running their tests
@@ -85,7 +85,8 @@ if [[ "${runtime_targets_needs_reconfig}" != "" ]]; then
start-group "ninja Runtimes C++26"
- ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig}
+ ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig} \
+ |& tee ninja_runtimes_needs_reconfig1.log
start-group "CMake Runtimes Clang Modules"
@@ -96,5 +97,6 @@ if [[ "${runtime_targets_needs_reconfig}" != "" ]]; then
start-group "ninja Runtimes Clang Modules"
- ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig}
+ ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig} \
+ |& tee ninja_runtimes_needs_reconfig2.log
fi
diff --git a/.ci/monolithic-windows.sh b/.ci/monolithic-windows.sh
index 2e81e27..0f3a199 100755
--- a/.ci/monolithic-windows.sh
+++ b/.ci/monolithic-windows.sh
@@ -39,7 +39,7 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
-D LLVM_ENABLE_ASSERTIONS=ON \
-D LLVM_BUILD_EXAMPLES=ON \
-D COMPILER_RT_BUILD_LIBFUZZER=OFF \
- -D LLVM_LIT_ARGS="-v --xunit-xml-output ${BUILD_DIR}/test-results.xml --use-unique-output-file-name --timeout=1200 --time-tests" \
+ -D LLVM_LIT_ARGS="-v --xunit-xml-output ${BUILD_DIR}/test-results.xml --use-unique-output-file-name --timeout=1200 --time-tests --succinct" \
-D COMPILER_RT_BUILD_ORC=OFF \
-D CMAKE_C_COMPILER_LAUNCHER=sccache \
-D CMAKE_CXX_COMPILER_LAUNCHER=sccache \
@@ -51,4 +51,4 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
start-group "ninja"
# Targets are not escaped as they are passed as separate arguments.
-ninja -C "${BUILD_DIR}" -k 0 ${targets}
+ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log
diff --git a/.ci/utils.sh b/.ci/utils.sh
index 6656ffe..30bf2d9 100644
--- a/.ci/utils.sh
+++ b/.ci/utils.sh
@@ -33,7 +33,8 @@ function at-exit {
if [[ "$GITHUB_STEP_SUMMARY" != "" ]]; then
python "${MONOREPO_ROOT}"/.ci/generate_test_report_github.py \
- $retcode "${BUILD_DIR}"/test-results.*.xml >> $GITHUB_STEP_SUMMARY
+ $retcode "${BUILD_DIR}"/test-results.*.xml "${BUILD_DIR}"/ninja*.log \
+ >> $GITHUB_STEP_SUMMARY
fi
}
trap at-exit EXIT
diff --git a/.github/workflows/mlir-spirv-tests.yml b/.github/workflows/mlir-spirv-tests.yml
new file mode 100644
index 0000000..48b6c69
--- /dev/null
+++ b/.github/workflows/mlir-spirv-tests.yml
@@ -0,0 +1,32 @@
+name: MLIR SPIR-V Tests
+
+permissions:
+ contents: read
+
+on:
+ workflow_dispatch:
+ pull_request:
+ paths:
+ - 'mlir/include/mlir/Dialect/SPIRV/**'
+ - 'mlir/lib/Dialect/SPIRV/**'
+ - 'mlir/include/mlir/Target/SPIRV/**'
+ - 'mlir/lib/Target/SPIRV/**'
+ - 'mlir/test/Target/SPIRV/**'
+ - '.github/workflows/mlir-spirv-tests.yml'
+
+concurrency:
+ # Skip intermediate builds: always.
+ # Cancel intermediate builds: only if it is a pull request build.
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
+
+jobs:
+ check_spirv:
+ if: github.repository_owner == 'llvm'
+ name: Test MLIR SPIR-V
+ uses: ./.github/workflows/llvm-project-tests.yml
+ with:
+ build_target: check-mlir
+ projects: mlir
+ extra_cmake_args: '-DLLVM_TARGETS_TO_BUILD="host" -DLLVM_INCLUDE_SPIRV_TOOLS_TESTS=ON'
+ os_list: '["ubuntu-24.04"]'
diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml
index d0518fa..6e59841 100644
--- a/.github/workflows/premerge.yaml
+++ b/.github/workflows/premerge.yaml
@@ -35,9 +35,6 @@ jobs:
with:
fetch-depth: 2
- name: Build and Test
- # Mark the job as a success even if the step fails so that people do
- # not get notified while the new premerge pipeline is in an
- # experimental state.
run: |
git config --global --add safe.directory '*'
@@ -109,9 +106,6 @@ jobs:
echo "windows-projects=${projects_to_build}" >> $GITHUB_OUTPUT
echo "windows-check-targets=${project_check_targets}" >> $GITHUB_OUTPUT
- name: Build and Test
- # Mark the job as a success even if the step fails so that people do
- # not get notified while the new premerge pipeline is in an
- # experimental state.
if: ${{ steps.vars.outputs.windows-projects != '' }}
shell: cmd
run: |
diff --git a/bolt/include/bolt/Profile/DataAggregator.h b/bolt/include/bolt/Profile/DataAggregator.h
index cb1b87f..db0f690 100644
--- a/bolt/include/bolt/Profile/DataAggregator.h
+++ b/bolt/include/bolt/Profile/DataAggregator.h
@@ -502,9 +502,6 @@ private:
/// entries).
void imputeFallThroughs();
- /// Register profiled functions for lite mode.
- void registerProfiledFunctions();
-
/// Debugging dump methods
void dump() const;
void dump(const PerfBranchSample &Sample) const;
diff --git a/bolt/lib/Passes/FrameOptimizer.cpp b/bolt/lib/Passes/FrameOptimizer.cpp
index 81d4d93..b0b7207f 100644
--- a/bolt/lib/Passes/FrameOptimizer.cpp
+++ b/bolt/lib/Passes/FrameOptimizer.cpp
@@ -224,6 +224,11 @@ Error FrameOptimizerPass::runOnFunctions(BinaryContext &BC) {
if (opts::FrameOptimization == FOP_NONE)
return Error::success();
+ if (!BC.isX86()) {
+ BC.errs() << "BOLT-ERROR: " << getName() << " is supported only on X86\n";
+ exit(1);
+ }
+
std::unique_ptr<BinaryFunctionCallGraph> CG;
std::unique_ptr<FrameAnalysis> FA;
std::unique_ptr<RegAnalysis> RA;
diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp
index c13fa6d..3604fdd 100644
--- a/bolt/lib/Profile/DataAggregator.cpp
+++ b/bolt/lib/Profile/DataAggregator.cpp
@@ -581,26 +581,6 @@ void DataAggregator::imputeFallThroughs() {
outs() << "BOLT-INFO: imputed " << InferredTraces << " traces\n";
}
-void DataAggregator::registerProfiledFunctions() {
- DenseSet<uint64_t> Addrs;
- for (const auto &Trace : llvm::make_first_range(Traces)) {
- if (Trace.Branch != Trace::FT_ONLY &&
- Trace.Branch != Trace::FT_EXTERNAL_ORIGIN)
- Addrs.insert(Trace.Branch);
- Addrs.insert(Trace.From);
- }
-
- for (const auto [PC, _] : BasicSamples)
- Addrs.insert(PC);
-
- for (const PerfMemSample &MemSample : MemSamples)
- Addrs.insert(MemSample.PC);
-
- for (const uint64_t Addr : Addrs)
- if (BinaryFunction *Func = getBinaryFunctionContainingAddress(Addr))
- Func->setHasProfileAvailable();
-}
-
Error DataAggregator::preprocessProfile(BinaryContext &BC) {
this->BC = &BC;
@@ -623,7 +603,6 @@ Error DataAggregator::preprocessProfile(BinaryContext &BC) {
exit(0);
}
- registerProfiledFunctions();
return Error::success();
}
@@ -1368,6 +1347,10 @@ std::error_code DataAggregator::parseAggregatedLBREntry() {
}
const uint64_t FromOffset = Addr[0]->Offset;
+ BinaryFunction *FromFunc = getBinaryFunctionContainingAddress(FromOffset);
+ if (FromFunc)
+ FromFunc->setHasProfileAvailable();
+
int64_t Count = Counters[0];
int64_t Mispreds = Counters[1];
@@ -1378,6 +1361,11 @@ std::error_code DataAggregator::parseAggregatedLBREntry() {
return std::error_code();
}
+ const uint64_t ToOffset = Addr[1]->Offset;
+ BinaryFunction *ToFunc = getBinaryFunctionContainingAddress(ToOffset);
+ if (ToFunc)
+ ToFunc->setHasProfileAvailable();
+
/// For fall-through types, adjust locations to match Trace container.
if (Type == FT || Type == FT_EXTERNAL_ORIGIN || Type == FT_EXTERNAL_RETURN) {
Addr[2] = Location(Addr[1]->Offset); // Trace To
@@ -1625,6 +1613,9 @@ std::error_code DataAggregator::parseBranchEvents() {
Traces.reserve(TraceMap.size());
for (const auto &[Trace, Info] : TraceMap) {
Traces.emplace_back(Trace, Info);
+ for (const uint64_t Addr : {Trace.Branch, Trace.From})
+ if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Addr))
+ BF->setHasProfileAvailable();
}
clear(TraceMap);
@@ -1685,6 +1676,9 @@ std::error_code DataAggregator::parseBasicEvents() {
continue;
++NumTotalSamples;
+ if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Sample->PC))
+ BF->setHasProfileAvailable();
+
++BasicSamples[Sample->PC];
EventNames.insert(Sample->EventName);
}
@@ -1722,6 +1716,9 @@ std::error_code DataAggregator::parseMemEvents() {
if (std::error_code EC = Sample.getError())
return EC;
+ if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Sample->PC))
+ BF->setHasProfileAvailable();
+
MemSamples.emplace_back(std::move(Sample.get()));
}
diff --git a/bolt/test/AArch64/unsupported-passes.test b/bolt/test/AArch64/unsupported-passes.test
new file mode 100644
index 0000000..886fc1c
--- /dev/null
+++ b/bolt/test/AArch64/unsupported-passes.test
@@ -0,0 +1,8 @@
+// Checks that non-fully supported passes on AArch64 are handled appropriately.
+
+// REQUIRES: system-linux,asserts,target=aarch64{{.*}}
+
+RUN: %clang %cflags %p/../Inputs/hello.c -o %t -Wl,-q
+RUN: not llvm-bolt %t -o %t.bolt --frame-opt=all 2>&1 | FileCheck %s
+
+CHECK: BOLT-ERROR: frame-optimizer is supported only on X86
diff --git a/bolt/unittests/Core/CMakeLists.txt b/bolt/unittests/Core/CMakeLists.txt
index 54e8ea1..f10b0d9 100644
--- a/bolt/unittests/Core/CMakeLists.txt
+++ b/bolt/unittests/Core/CMakeLists.txt
@@ -11,6 +11,11 @@ add_bolt_unittest(CoreTests
MemoryMaps.cpp
DynoStats.cpp
+ # FIXME CoreTests uses `llvm::detail::TakeError(llvm::Error)`, but linking
+ # to LLVMTestingSupport introduces a transitive dependency on the
+ # dynamic LLVM library when LLVM_LINK_LLVM_DYLIB is ON.
+ ${LLVM_MAIN_SRC_DIR}/lib/Testing/Support/Error.cpp
+
DISABLE_LLVM_LINK_LLVM_DYLIB
)
@@ -20,7 +25,6 @@ target_link_libraries(CoreTests
LLVMBOLTRewrite
LLVMBOLTProfile
LLVMBOLTUtils
- LLVMTestingSupport
)
foreach (tgt ${BOLT_TARGETS_TO_BUILD})
diff --git a/bolt/unittests/Profile/CMakeLists.txt b/bolt/unittests/Profile/CMakeLists.txt
index ce01c6c4..7b3cbd2 100644
--- a/bolt/unittests/Profile/CMakeLists.txt
+++ b/bolt/unittests/Profile/CMakeLists.txt
@@ -16,7 +16,6 @@ target_link_libraries(ProfileTests
LLVMBOLTCore
LLVMBOLTProfile
LLVMTargetParser
- LLVMTestingSupport
)
foreach (tgt ${BOLT_TARGETS_TO_BUILD})
diff --git a/clang-tools-extra/clang-change-namespace/ChangeNamespace.cpp b/clang-tools-extra/clang-change-namespace/ChangeNamespace.cpp
index 3e367ab..471ca45 100644
--- a/clang-tools-extra/clang-change-namespace/ChangeNamespace.cpp
+++ b/clang-tools-extra/clang-change-namespace/ChangeNamespace.cpp
@@ -31,24 +31,9 @@ llvm::SmallVector<llvm::StringRef, 4> splitSymbolName(llvm::StringRef Name) {
return Splitted;
}
-SourceLocation startLocationForType(TypeLoc TLoc) {
- // For elaborated types (e.g. `struct a::A`) we want the portion after the
- // `struct` but including the namespace qualifier, `a::`.
- if (TLoc.getTypeLocClass() == TypeLoc::Elaborated) {
- NestedNameSpecifierLoc NestedNameSpecifier =
- TLoc.castAs<ElaboratedTypeLoc>().getQualifierLoc();
- if (NestedNameSpecifier.getNestedNameSpecifier())
- return NestedNameSpecifier.getBeginLoc();
- TLoc = TLoc.getNextTypeLoc();
- }
- return TLoc.getBeginLoc();
-}
-
SourceLocation endLocationForType(TypeLoc TLoc) {
- // Dig past any namespace or keyword qualifications.
- while (TLoc.getTypeLocClass() == TypeLoc::Elaborated ||
- TLoc.getTypeLocClass() == TypeLoc::Qualified)
- TLoc = TLoc.getNextTypeLoc();
+ if (auto QTL = TLoc.getAs<QualifiedTypeLoc>())
+ TLoc = QTL.getUnqualifiedLoc();
// The location for template specializations (e.g. Foo<int>) includes the
// templated types in its location range. We want to restrict this to just
@@ -550,8 +535,8 @@ void ChangeNamespaceTool::run(
Result.Nodes.getNodeAs<NestedNameSpecifierLoc>(
"nested_specifier_loc")) {
SourceLocation Start = Specifier->getBeginLoc();
- SourceLocation End = endLocationForType(Specifier->getTypeLoc());
- fixTypeLoc(Result, Start, End, Specifier->getTypeLoc());
+ SourceLocation End = endLocationForType(Specifier->castAsTypeLoc());
+ fixTypeLoc(Result, Start, End, Specifier->castAsTypeLoc());
} else if (const auto *BaseInitializer =
Result.Nodes.getNodeAs<CXXCtorInitializer>(
"base_initializer")) {
@@ -562,19 +547,16 @@ void ChangeNamespaceTool::run(
// filtered by matchers in some cases, e.g. the type is templated. We should
// handle the record type qualifier instead.
TypeLoc Loc = *TLoc;
- while (Loc.getTypeLocClass() == TypeLoc::Qualified)
- Loc = Loc.getNextTypeLoc();
- if (Loc.getTypeLocClass() == TypeLoc::Elaborated) {
- NestedNameSpecifierLoc NestedNameSpecifier =
- Loc.castAs<ElaboratedTypeLoc>().getQualifierLoc();
- // FIXME: avoid changing injected class names.
- if (auto *NNS = NestedNameSpecifier.getNestedNameSpecifier()) {
- const Type *SpecifierType = NNS->getAsType();
- if (SpecifierType && SpecifierType->isRecordType())
- return;
- }
- }
- fixTypeLoc(Result, startLocationForType(Loc), endLocationForType(Loc), Loc);
+ if (auto QTL = Loc.getAs<QualifiedTypeLoc>())
+ Loc = QTL.getUnqualifiedLoc();
+ // FIXME: avoid changing injected class names.
+ if (NestedNameSpecifier NestedNameSpecifier =
+ Loc.getPrefix().getNestedNameSpecifier();
+ NestedNameSpecifier.getKind() == NestedNameSpecifier::Kind::Type &&
+ NestedNameSpecifier.getAsType()->isRecordType())
+ return;
+ fixTypeLoc(Result, Loc.getNonElaboratedBeginLoc(), endLocationForType(Loc),
+ Loc);
} else if (const auto *VarRef =
Result.Nodes.getNodeAs<DeclRefExpr>("var_ref")) {
const auto *Var = Result.Nodes.getNodeAs<VarDecl>("var_decl");
@@ -588,10 +570,9 @@ void ChangeNamespaceTool::run(
} else if (const auto *EnumConstRef =
Result.Nodes.getNodeAs<DeclRefExpr>("enum_const_ref")) {
// Do not rename the reference if it is already scoped by the EnumDecl name.
- if (EnumConstRef->hasQualifier() &&
- EnumConstRef->getQualifier()->getKind() ==
- NestedNameSpecifier::SpecifierKind::TypeSpec &&
- EnumConstRef->getQualifier()->getAsType()->isEnumeralType())
+ if (NestedNameSpecifier Qualifier = EnumConstRef->getQualifier();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Type &&
+ Qualifier.getAsType()->isEnumeralType())
return;
const auto *EnumConstDecl =
Result.Nodes.getNodeAs<EnumConstantDecl>("enum_const_decl");
diff --git a/clang-tools-extra/clang-doc/Serialize.cpp b/clang-tools-extra/clang-doc/Serialize.cpp
index de73f68..bcab4f1 100644
--- a/clang-tools-extra/clang-doc/Serialize.cpp
+++ b/clang-tools-extra/clang-doc/Serialize.cpp
@@ -902,8 +902,8 @@ parseBases(RecordInfo &I, const CXXRecordDecl *D, bool IsFileInRootDir,
return;
for (const CXXBaseSpecifier &B : D->bases()) {
if (const RecordType *Ty = B.getType()->getAs<RecordType>()) {
- if (const CXXRecordDecl *Base =
- cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition())) {
+ if (const CXXRecordDecl *Base = cast_or_null<CXXRecordDecl>(
+ Ty->getOriginalDecl()->getDefinition())) {
// Initialized without USR and name, this will be set in the following
// if-else stmt.
BaseRecordInfo BI(
diff --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/FindAllSymbols.cpp b/clang-tools-extra/clang-include-fixer/find-all-symbols/FindAllSymbols.cpp
index bb48883..1f30d27 100644
--- a/clang-tools-extra/clang-include-fixer/find-all-symbols/FindAllSymbols.cpp
+++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/FindAllSymbols.cpp
@@ -216,8 +216,7 @@ void FindAllSymbols::registerMatchers(MatchFinder *MatchFinder) {
// Uses of most types: just look at what the typeLoc refers to.
MatchFinder->addMatcher(
typeLoc(isExpansionInMainFile(),
- loc(qualType(allOf(unless(elaboratedType()),
- hasDeclaration(Types.bind("use")))))),
+ loc(qualType(hasDeclaration(Types.bind("use"))))),
this);
// Uses of typedefs: these are often transparent to hasDeclaration, so we need
// to handle them explicitly.
diff --git a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp
index f9d7597..fac6e04 100644
--- a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp
+++ b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp
@@ -533,7 +533,8 @@ void ClangTidyDiagnosticConsumer::forwardDiagnostic(const Diagnostic &Info) {
Builder << reinterpret_cast<const NamedDecl *>(Info.getRawArg(Index));
break;
case clang::DiagnosticsEngine::ak_nestednamespec:
- Builder << reinterpret_cast<NestedNameSpecifier *>(Info.getRawArg(Index));
+ Builder << NestedNameSpecifier::getFromVoidPointer(
+ reinterpret_cast<void *>(Info.getRawArg(Index)));
break;
case clang::DiagnosticsEngine::ak_declcontext:
Builder << reinterpret_cast<DeclContext *>(Info.getRawArg(Index));
diff --git a/clang-tools-extra/clang-tidy/bugprone/CrtpConstructorAccessibilityCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/CrtpConstructorAccessibilityCheck.cpp
index 6565fa3..0625468 100644
--- a/clang-tools-extra/clang-tidy/bugprone/CrtpConstructorAccessibilityCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/CrtpConstructorAccessibilityCheck.cpp
@@ -43,7 +43,8 @@ static bool isDerivedClassBefriended(const CXXRecordDecl *CRTP,
return false;
}
- return FriendType->getType()->getAsCXXRecordDecl() == Derived;
+ return declaresSameEntity(FriendType->getType()->getAsCXXRecordDecl(),
+ Derived);
});
}
@@ -55,7 +56,8 @@ getDerivedParameter(const ClassTemplateSpecializationDecl *CRTP,
CRTP->getTemplateArgs().asArray(), [&](const TemplateArgument &Arg) {
++Idx;
return Arg.getKind() == TemplateArgument::Type &&
- Arg.getAsType()->getAsCXXRecordDecl() == Derived;
+ declaresSameEntity(Arg.getAsType()->getAsCXXRecordDecl(),
+ Derived);
});
return AnyOf ? CRTP->getSpecializedTemplate()
diff --git a/clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.cpp
index a179d4bf..3cacb90 100644
--- a/clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.cpp
@@ -577,7 +577,7 @@ approximateImplicitConversion(const TheCheck &Check, QualType LType,
ImplicitConversionModellingMode ImplicitMode);
static inline bool isUselessSugar(const Type *T) {
- return isa<AttributedType, DecayedType, ElaboratedType, ParenType>(T);
+ return isa<AttributedType, DecayedType, ParenType>(T);
}
namespace {
@@ -1040,7 +1040,9 @@ approximateStandardConversionSequence(const TheCheck &Check, QualType From,
const auto *ToRecord = To->getAsCXXRecordDecl();
if (isDerivedToBase(FromRecord, ToRecord)) {
LLVM_DEBUG(llvm::dbgs() << "--- approximateStdConv. Derived To Base.\n");
- WorkType = QualType{ToRecord->getTypeForDecl(), FastQualifiersToApply};
+ WorkType = QualType{
+ ToRecord->getASTContext().getCanonicalTagType(ToRecord)->getTypePtr(),
+ FastQualifiersToApply};
}
if (Ctx.getLangOpts().CPlusPlus17 && FromPtr && ToPtr) {
@@ -1072,9 +1074,9 @@ approximateStandardConversionSequence(const TheCheck &Check, QualType From,
WorkType = To;
}
- if (WorkType == To) {
+ if (Ctx.hasSameType(WorkType, To)) {
LLVM_DEBUG(llvm::dbgs() << "<<< approximateStdConv. Reached 'To' type.\n");
- return {WorkType};
+ return {Ctx.getCommonSugaredType(WorkType, To)};
}
LLVM_DEBUG(llvm::dbgs() << "<<< approximateStdConv. Did not reach 'To'.\n");
@@ -1219,7 +1221,7 @@ tryConversionOperators(const TheCheck &Check, const CXXRecordDecl *RD,
if (std::optional<UserDefinedConversionSelector::PreparedConversion>
SelectedConversion = ConversionSet()) {
- QualType RecordType{RD->getTypeForDecl(), 0};
+ CanQualType RecordType = RD->getASTContext().getCanonicalTagType(RD);
ConversionSequence Result{RecordType, ToType};
// The conversion from the operator call's return type to ToType was
@@ -1270,7 +1272,7 @@ tryConvertingConstructors(const TheCheck &Check, QualType FromType,
if (std::optional<UserDefinedConversionSelector::PreparedConversion>
SelectedConversion = ConversionSet()) {
- QualType RecordType{RD->getTypeForDecl(), 0};
+ CanQualType RecordType = RD->getASTContext().getCanonicalTagType(RD);
ConversionSequence Result{FromType, RecordType};
Result.AfterFirstStandard = SelectedConversion->Seq.AfterFirstStandard;
diff --git a/clang-tools-extra/clang-tidy/bugprone/ForwardDeclarationNamespaceCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ForwardDeclarationNamespaceCheck.cpp
index 75ef628..070ed04 100644
--- a/clang-tools-extra/clang-tidy/bugprone/ForwardDeclarationNamespaceCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/ForwardDeclarationNamespaceCheck.cpp
@@ -69,10 +69,9 @@ void ForwardDeclarationNamespaceCheck::check(
// struct B { friend A; };
// \endcode
// `A` will not be marked as "referenced" in the AST.
- if (const TypeSourceInfo *Tsi = Decl->getFriendType()) {
- QualType Desugared = Tsi->getType().getDesugaredType(*Result.Context);
- FriendTypes.insert(Desugared.getTypePtr());
- }
+ if (const TypeSourceInfo *Tsi = Decl->getFriendType())
+ FriendTypes.insert(
+ Tsi->getType()->getCanonicalTypeUnqualified().getTypePtr());
}
}
@@ -119,7 +118,9 @@ void ForwardDeclarationNamespaceCheck::onEndOfTranslationUnit() {
if (CurDecl->hasDefinition() || CurDecl->isReferenced()) {
continue; // Skip forward declarations that are used/referenced.
}
- if (FriendTypes.contains(CurDecl->getTypeForDecl())) {
+ if (FriendTypes.contains(CurDecl->getASTContext()
+ .getCanonicalTagType(CurDecl)
+ ->getTypePtr())) {
continue; // Skip forward declarations referenced as friend.
}
if (CurDecl->getLocation().isMacroID() ||
diff --git a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
index 00e8f7e..10b747e 100644
--- a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
@@ -33,21 +33,17 @@ AST_MATCHER(QualType, isEnableIf) {
BaseType = BaseType->getPointeeType().getTypePtr();
}
// Case: type parameter dependent (enable_if<is_integral<T>>).
- if (const auto *Dependent = BaseType->getAs<DependentNameType>()) {
- BaseType = Dependent->getQualifier()->getAsType();
- }
+ if (const auto *Dependent = BaseType->getAs<DependentNameType>())
+ BaseType = Dependent->getQualifier().getAsType();
if (!BaseType)
return false;
if (CheckTemplate(BaseType->getAs<TemplateSpecializationType>()))
return true; // Case: enable_if_t< >.
- if (const auto *Elaborated = BaseType->getAs<ElaboratedType>()) {
- if (const auto *Q = Elaborated->getQualifier())
- if (const auto *Qualifier = Q->getAsType()) {
- if (CheckTemplate(Qualifier->getAs<TemplateSpecializationType>())) {
- return true; // Case: enable_if< >::type.
- }
- }
- }
+ if (const auto *TT = BaseType->getAs<TypedefType>())
+ if (NestedNameSpecifier Q = TT->getQualifier();
+ Q.getKind() == NestedNameSpecifier::Kind::Type)
+ if (CheckTemplate(Q.getAsType()->getAs<TemplateSpecializationType>()))
+ return true; // Case: enable_if< >::type.
return false;
}
AST_MATCHER_P(TemplateTypeParmDecl, hasDefaultArgument,
diff --git a/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
index 75f1107..07cd90d 100644
--- a/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
@@ -32,13 +32,10 @@ AST_MATCHER_P(TemplateTypeParmDecl, hasUnnamedDefaultArgument,
void IncorrectEnableIfCheck::registerMatchers(MatchFinder *Finder) {
Finder->addMatcher(
templateTypeParmDecl(
- hasUnnamedDefaultArgument(
- elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- loc(qualType(hasDeclaration(namedDecl(
- hasName("::std::enable_if"))))))
- .bind("enable_if_specialization")))
- .bind("elaborated")))
+ hasUnnamedDefaultArgument(templateSpecializationTypeLoc(
+ loc(qualType(hasDeclaration(namedDecl(
+ hasName("::std::enable_if"))))))
+ .bind("enable_if_specialization")))
.bind("enable_if"),
this);
}
@@ -46,13 +43,11 @@ void IncorrectEnableIfCheck::registerMatchers(MatchFinder *Finder) {
void IncorrectEnableIfCheck::check(const MatchFinder::MatchResult &Result) {
const auto *EnableIf =
Result.Nodes.getNodeAs<TemplateTypeParmDecl>("enable_if");
- const auto *ElaboratedLoc =
- Result.Nodes.getNodeAs<ElaboratedTypeLoc>("elaborated");
const auto *EnableIfSpecializationLoc =
Result.Nodes.getNodeAs<TemplateSpecializationTypeLoc>(
"enable_if_specialization");
- if (!EnableIf || !ElaboratedLoc || !EnableIfSpecializationLoc)
+ if (!EnableIf || !EnableIfSpecializationLoc)
return;
const SourceManager &SM = *Result.SourceManager;
@@ -62,8 +57,10 @@ void IncorrectEnableIfCheck::check(const MatchFinder::MatchResult &Result) {
auto Diag = diag(EnableIf->getBeginLoc(),
"incorrect std::enable_if usage detected; use "
"'typename std::enable_if<...>::type'");
+ // FIXME: This should handle the enable_if specialization already having an
+ // elaborated keyword.
if (!getLangOpts().CPlusPlus20) {
- Diag << FixItHint::CreateInsertion(ElaboratedLoc->getBeginLoc(),
+ Diag << FixItHint::CreateInsertion(EnableIfSpecializationLoc->getBeginLoc(),
"typename ");
}
Diag << FixItHint::CreateInsertion(RAngleLoc.getLocWithOffset(1), "::type");
diff --git a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp
index f903e63..7d92ef3 100644
--- a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp
@@ -67,15 +67,15 @@ public:
return Visit(T->getElementType().getTypePtr());
}
bool VisitEnumType(const EnumType *T) {
- if (isCompleteAndHasNoZeroValue(T->getDecl())) {
+ if (isCompleteAndHasNoZeroValue(T->getOriginalDecl())) {
FoundEnum = T;
return true;
}
return false;
}
bool VisitRecordType(const RecordType *T) {
- const RecordDecl *RD = T->getDecl();
- if (RD->isUnion())
+ const RecordDecl *RD = T->getOriginalDecl()->getDefinition();
+ if (!RD || RD->isUnion())
return false;
auto VisitField = [this](const FieldDecl *F) {
return Visit(F->getType().getTypePtr());
@@ -125,7 +125,7 @@ void InvalidEnumDefaultInitializationCheck::check(
if (!Finder.Visit(InitList->getArrayFiller()->getType().getTypePtr()))
return;
InitExpr = InitList;
- Enum = Finder.FoundEnum->getDecl();
+ Enum = Finder.FoundEnum->getOriginalDecl();
}
if (!InitExpr || !Enum)
diff --git a/clang-tools-extra/clang-tidy/bugprone/MoveForwardingReferenceCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/MoveForwardingReferenceCheck.cpp
index bfa2ab5..5dc988d 100644
--- a/clang-tools-extra/clang-tidy/bugprone/MoveForwardingReferenceCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/MoveForwardingReferenceCheck.cpp
@@ -39,24 +39,31 @@ static void replaceMoveWithForward(const UnresolvedLookupExpr *Callee,
// std::move(). This will hopefully prevent erroneous replacements if the
// code does unusual things (e.g. create an alias for std::move() in
// another namespace).
- NestedNameSpecifier *NNS = Callee->getQualifier();
- if (!NNS) {
+ NestedNameSpecifier NNS = Callee->getQualifier();
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
// Called as "move" (i.e. presumably the code had a "using std::move;").
// We still conservatively put a "std::" in front of the forward because
// we don't know whether the code also had a "using std::forward;".
Diag << FixItHint::CreateReplacement(CallRange, "std::" + ForwardName);
- } else if (const NamespaceBaseDecl *Namespace = NNS->getAsNamespace()) {
+ break;
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
if (Namespace->getName() == "std") {
- if (!NNS->getPrefix()) {
+ if (!Prefix) {
// Called as "std::move".
Diag << FixItHint::CreateReplacement(CallRange,
"std::" + ForwardName);
- } else if (NNS->getPrefix()->getKind() == NestedNameSpecifier::Global) {
+ } else if (Prefix.getKind() == NestedNameSpecifier::Kind::Global) {
// Called as "::std::move".
Diag << FixItHint::CreateReplacement(CallRange,
"::std::" + ForwardName);
}
}
+ break;
+ }
+ default:
+ return;
}
}
}
diff --git a/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.cpp
index 5378223..249c77c 100644
--- a/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.cpp
@@ -555,15 +555,22 @@ bool NarrowingConversionsCheck::handleConditionalOperator(
// We have an expression like so: `output = cond ? lhs : rhs`
// From the point of view of narrowing conversion we treat it as two
// expressions `output = lhs` and `output = rhs`.
- handleBinaryOperator(Context, CO->getLHS()->getExprLoc(), Lhs,
- *CO->getLHS());
- handleBinaryOperator(Context, CO->getRHS()->getExprLoc(), Lhs,
- *CO->getRHS());
+ handleConditionalOperatorArgument(Context, Lhs, CO->getLHS());
+ handleConditionalOperatorArgument(Context, Lhs, CO->getRHS());
return true;
}
return false;
}
+void NarrowingConversionsCheck::handleConditionalOperatorArgument(
+ const ASTContext &Context, const Expr &Lhs, const Expr *Arg) {
+ if (const auto *ICE = llvm::dyn_cast<ImplicitCastExpr>(Arg))
+ if (!Arg->getIntegerConstantExpr(Context))
+ Arg = ICE->getSubExpr();
+
+ handleBinaryOperator(Context, Arg->getExprLoc(), Lhs, *Arg);
+}
+
void NarrowingConversionsCheck::handleImplicitCast(
const ASTContext &Context, const ImplicitCastExpr &Cast) {
if (Cast.getExprLoc().isMacroID())
diff --git a/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.h b/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.h
index 20403f9..116a8cba 100644
--- a/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.h
+++ b/clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.h
@@ -85,6 +85,8 @@ private:
bool handleConditionalOperator(const ASTContext &Context, const Expr &Lhs,
const Expr &Rhs);
+ void handleConditionalOperatorArgument(const ASTContext &Context,
+ const Expr &Lhs, const Expr *Arg);
void handleImplicitCast(const ASTContext &Context,
const ImplicitCastExpr &Cast);
diff --git a/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp
index 88e048e6..8da6227 100644
--- a/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp
@@ -425,7 +425,7 @@ void SizeofExpressionCheck::check(const MatchFinder::MatchResult &Result) {
"suspicious usage of 'sizeof(array)/sizeof(...)';"
" denominator differs from the size of array elements")
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
- } else if (NumTy && DenomTy && NumTy == DenomTy &&
+ } else if (NumTy && DenomTy && Ctx.hasSameType(NumTy, DenomTy) &&
!NumTy->isDependentType()) {
// Dependent type should not be compared.
diag(E->getOperatorLoc(),
@@ -434,7 +434,7 @@ void SizeofExpressionCheck::check(const MatchFinder::MatchResult &Result) {
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
} else if (!WarnOnSizeOfPointer) {
// When 'WarnOnSizeOfPointer' is enabled, these messages become redundant:
- if (PointedTy && DenomTy && PointedTy == DenomTy) {
+ if (PointedTy && DenomTy && Ctx.hasSameType(PointedTy, DenomTy)) {
diag(E->getOperatorLoc(),
"suspicious usage of 'sizeof(...)/sizeof(...)'; size of pointer "
"is divided by size of pointed type")
@@ -463,7 +463,8 @@ void SizeofExpressionCheck::check(const MatchFinder::MatchResult &Result) {
const auto *SizeOfExpr =
Result.Nodes.getNodeAs<UnaryExprOrTypeTraitExpr>("sizeof-ptr-mul-expr");
- if ((LPtrTy == RPtrTy) && (LPtrTy == SizeofArgTy)) {
+ if (Ctx.hasSameType(LPtrTy, RPtrTy) &&
+ Ctx.hasSameType(LPtrTy, SizeofArgTy)) {
diag(SizeOfExpr->getBeginLoc(), "suspicious usage of 'sizeof(...)' in "
"pointer arithmetic")
<< SizeOfExpr->getSourceRange() << E->getOperatorLoc()
@@ -477,7 +478,8 @@ void SizeofExpressionCheck::check(const MatchFinder::MatchResult &Result) {
const auto *SizeOfExpr =
Result.Nodes.getNodeAs<UnaryExprOrTypeTraitExpr>("sizeof-ptr-div-expr");
- if ((LPtrTy == RPtrTy) && (LPtrTy == SizeofArgTy)) {
+ if (Ctx.hasSameType(LPtrTy, RPtrTy) &&
+ Ctx.hasSameType(LPtrTy, SizeofArgTy)) {
diag(SizeOfExpr->getBeginLoc(), "suspicious usage of 'sizeof(...)' in "
"pointer arithmetic")
<< SizeOfExpr->getSourceRange() << E->getOperatorLoc()
diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/NoSuspendWithLockCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/NoSuspendWithLockCheck.cpp
index ca29317..29470b1 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/NoSuspendWithLockCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/NoSuspendWithLockCheck.cpp
@@ -23,9 +23,9 @@ void NoSuspendWithLockCheck::storeOptions(ClangTidyOptions::OptionMap &Opts) {
}
void NoSuspendWithLockCheck::registerMatchers(MatchFinder *Finder) {
- auto LockType = elaboratedType(namesType(templateSpecializationType(
+ auto LockType = templateSpecializationType(
hasDeclaration(namedDecl(matchers::matchesAnyListedName(
- utils::options::parseStringList(LockGuards)))))));
+ utils::options::parseStringList(LockGuards)))));
StatementMatcher Lock =
declStmt(has(varDecl(hasType(LockType)).bind("lock-decl")))
diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp
index b413b12..4060759 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp
@@ -190,7 +190,7 @@ struct InitializerInsertion {
// Convenience utility to get a RecordDecl from a QualType.
const RecordDecl *getCanonicalRecordDecl(const QualType &Type) {
if (const auto *RT = Type.getCanonicalType()->getAs<RecordType>())
- return RT->getDecl();
+ return RT->getOriginalDecl();
return nullptr;
}
diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/SlicingCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/SlicingCheck.cpp
index 7675439..40fd15c 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/SlicingCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/SlicingCheck.cpp
@@ -92,7 +92,7 @@ void SlicingCheck::diagnoseSlicedOverriddenMethods(
for (const auto &Base : DerivedDecl.bases()) {
if (const auto *BaseRecordType = Base.getType()->getAs<RecordType>()) {
if (const auto *BaseRecord = cast_or_null<CXXRecordDecl>(
- BaseRecordType->getDecl()->getDefinition()))
+ BaseRecordType->getOriginalDecl()->getDefinition()))
diagnoseSlicedOverriddenMethods(Call, *BaseRecord, BaseDecl);
}
}
diff --git a/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp b/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp
index 80ff97a..0302a5a 100644
--- a/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp
+++ b/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp
@@ -74,7 +74,7 @@ bool MultipleInheritanceCheck::isInterface(const CXXRecordDecl *Node) {
const auto *Ty = I.getType()->getAs<RecordType>();
if (!Ty)
continue;
- const RecordDecl *D = Ty->getDecl()->getDefinition();
+ const RecordDecl *D = Ty->getOriginalDecl()->getDefinition();
if (!D)
continue;
const auto *Base = cast<CXXRecordDecl>(D);
@@ -106,7 +106,8 @@ void MultipleInheritanceCheck::check(const MatchFinder::MatchResult &Result) {
const auto *Ty = I.getType()->getAs<RecordType>();
if (!Ty)
continue;
- const auto *Base = cast<CXXRecordDecl>(Ty->getDecl()->getDefinition());
+ const auto *Base =
+ cast<CXXRecordDecl>(Ty->getOriginalDecl()->getDefinition());
if (!isInterface(Base))
NumConcrete++;
}
@@ -117,7 +118,8 @@ void MultipleInheritanceCheck::check(const MatchFinder::MatchResult &Result) {
const auto *Ty = V.getType()->getAs<RecordType>();
if (!Ty)
continue;
- const auto *Base = cast<CXXRecordDecl>(Ty->getDecl()->getDefinition());
+ const auto *Base =
+ cast<CXXRecordDecl>(Ty->getOriginalDecl()->getDefinition());
if (!isInterface(Base))
NumConcrete++;
}
diff --git a/clang-tools-extra/clang-tidy/google/AvoidCStyleCastsCheck.cpp b/clang-tools-extra/clang-tidy/google/AvoidCStyleCastsCheck.cpp
index e076b39..14e11eb 100644
--- a/clang-tools-extra/clang-tidy/google/AvoidCStyleCastsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/google/AvoidCStyleCastsCheck.cpp
@@ -89,6 +89,30 @@ static StringRef getDestTypeString(const SourceManager &SM,
SM, LangOpts);
}
+static bool sameTypeAsWritten(QualType X, QualType Y) {
+ if (X.getCanonicalType() != Y.getCanonicalType())
+ return false;
+
+ auto TC = X->getTypeClass();
+ if (TC != Y->getTypeClass())
+ return false;
+
+ switch (TC) {
+ case Type::Typedef:
+ return declaresSameEntity(cast<TypedefType>(X)->getDecl(),
+ cast<TypedefType>(Y)->getDecl());
+ case Type::Pointer:
+ return sameTypeAsWritten(cast<PointerType>(X)->getPointeeType(),
+ cast<PointerType>(Y)->getPointeeType());
+ case Type::RValueReference:
+ case Type::LValueReference:
+ return sameTypeAsWritten(cast<ReferenceType>(X)->getPointeeType(),
+ cast<ReferenceType>(Y)->getPointeeType());
+ default:
+ return true;
+ }
+}
+
void AvoidCStyleCastsCheck::check(const MatchFinder::MatchResult &Result) {
const auto *CastExpr = Result.Nodes.getNodeAs<ExplicitCastExpr>("cast");
@@ -128,12 +152,7 @@ void AvoidCStyleCastsCheck::check(const MatchFinder::MatchResult &Result) {
// case of overloaded functions, so detection of redundant casts is trickier
// in this case. Don't emit "redundant cast" warnings for function
// pointer/reference types.
- QualType Src = SourceTypeAsWritten, Dst = DestTypeAsWritten;
- if (const auto *ElTy = dyn_cast<ElaboratedType>(Src))
- Src = ElTy->getNamedType();
- if (const auto *ElTy = dyn_cast<ElaboratedType>(Dst))
- Dst = ElTy->getNamedType();
- if (Src == Dst) {
+ if (sameTypeAsWritten(SourceTypeAsWritten, DestTypeAsWritten)) {
diag(CastExpr->getBeginLoc(), "redundant cast to the same type")
<< FixItHint::CreateRemoval(ReplaceRange);
return;
diff --git a/clang-tools-extra/clang-tidy/google/ExplicitConstructorCheck.cpp b/clang-tools-extra/clang-tidy/google/ExplicitConstructorCheck.cpp
index 3deea06..68233ec 100644
--- a/clang-tools-extra/clang-tidy/google/ExplicitConstructorCheck.cpp
+++ b/clang-tools-extra/clang-tidy/google/ExplicitConstructorCheck.cpp
@@ -72,7 +72,7 @@ static bool isStdInitializerList(QualType Type) {
}
if (const auto *RT = Type->getAs<RecordType>()) {
if (const auto *Specialization =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()))
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl()))
return declIsStdInitializerList(Specialization->getSpecializedTemplate());
}
return false;
diff --git a/clang-tools-extra/clang-tidy/google/UpgradeGoogletestCaseCheck.cpp b/clang-tools-extra/clang-tidy/google/UpgradeGoogletestCaseCheck.cpp
index 805dcaf..274b8af 100644
--- a/clang-tools-extra/clang-tidy/google/UpgradeGoogletestCaseCheck.cpp
+++ b/clang-tools-extra/clang-tidy/google/UpgradeGoogletestCaseCheck.cpp
@@ -257,8 +257,13 @@ getAliasNameRange(const MatchFinder::MatchResult &Result) {
return CharSourceRange::getTokenRange(
Using->getNameInfo().getSourceRange());
}
- return CharSourceRange::getTokenRange(
- Result.Nodes.getNodeAs<TypeLoc>("typeloc")->getSourceRange());
+ TypeLoc TL = *Result.Nodes.getNodeAs<TypeLoc>("typeloc");
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
+
+ if (auto TTL = TL.getAs<TypedefTypeLoc>())
+ return CharSourceRange::getTokenRange(TTL.getNameLoc());
+ return CharSourceRange::getTokenRange(TL.castAs<UsingTypeLoc>().getNameLoc());
}
void UpgradeGoogletestCaseCheck::check(const MatchFinder::MatchResult &Result) {
diff --git a/clang-tools-extra/clang-tidy/misc/ConstCorrectnessCheck.cpp b/clang-tools-extra/clang-tidy/misc/ConstCorrectnessCheck.cpp
index 697398a..b32507d 100644
--- a/clang-tools-extra/clang-tidy/misc/ConstCorrectnessCheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/ConstCorrectnessCheck.cpp
@@ -98,11 +98,12 @@ void ConstCorrectnessCheck::registerMatchers(MatchFinder *Finder) {
hasType(referenceType(pointee(hasCanonicalType(templateTypeParmType())))),
hasType(referenceType(pointee(substTemplateTypeParmType()))));
- const auto AllowedType = hasType(qualType(anyOf(
- hasDeclaration(namedDecl(matchers::matchesAnyListedName(AllowedTypes))),
- references(namedDecl(matchers::matchesAnyListedName(AllowedTypes))),
- pointerType(pointee(hasDeclaration(
- namedDecl(matchers::matchesAnyListedName(AllowedTypes))))))));
+ auto AllowedTypeDecl = namedDecl(
+ anyOf(matchers::matchesAnyListedName(AllowedTypes), usingShadowDecl()));
+
+ const auto AllowedType = hasType(qualType(
+ anyOf(hasDeclaration(AllowedTypeDecl), references(AllowedTypeDecl),
+ pointerType(pointee(hasDeclaration(AllowedTypeDecl))))));
const auto AutoTemplateType = varDecl(
anyOf(hasType(autoType()), hasType(referenceType(pointee(autoType()))),
diff --git a/clang-tools-extra/clang-tidy/misc/MisplacedConstCheck.cpp b/clang-tools-extra/clang-tidy/misc/MisplacedConstCheck.cpp
index 0cdd48c..bb64a56 100644
--- a/clang-tools-extra/clang-tidy/misc/MisplacedConstCheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/MisplacedConstCheck.cpp
@@ -19,13 +19,13 @@ void MisplacedConstCheck::registerMatchers(MatchFinder *Finder) {
pointee(anyOf(isConstQualified(), ignoringParens(functionType()))))));
Finder->addMatcher(
- valueDecl(hasType(qualType(
- isConstQualified(),
- elaboratedType(namesType(typedefType(hasDeclaration(
- anyOf(typedefDecl(NonConstAndNonFunctionPointerType)
- .bind("typedef"),
- typeAliasDecl(NonConstAndNonFunctionPointerType)
- .bind("typeAlias")))))))))
+ valueDecl(
+ hasType(qualType(isConstQualified(),
+ typedefType(hasDeclaration(anyOf(
+ typedefDecl(NonConstAndNonFunctionPointerType)
+ .bind("typedef"),
+ typeAliasDecl(NonConstAndNonFunctionPointerType)
+ .bind("typeAlias")))))))
.bind("decl"),
this);
}
diff --git a/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp b/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp
index 9b2af2a..107eda2 100644
--- a/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp
@@ -45,14 +45,6 @@ static bool incrementWithoutOverflow(const APSInt &Value, APSInt &Result) {
return Value < Result;
}
-static bool areEquivalentNameSpecifier(const NestedNameSpecifier *Left,
- const NestedNameSpecifier *Right) {
- llvm::FoldingSetNodeID LeftID, RightID;
- Left->Profile(LeftID);
- Right->Profile(RightID);
- return LeftID == RightID;
-}
-
static bool areEquivalentExpr(const Expr *Left, const Expr *Right) {
if (!Left || !Right)
return !Left && !Right;
@@ -104,9 +96,8 @@ static bool areEquivalentExpr(const Expr *Left, const Expr *Right) {
if (cast<DependentScopeDeclRefExpr>(Left)->getDeclName() !=
cast<DependentScopeDeclRefExpr>(Right)->getDeclName())
return false;
- return areEquivalentNameSpecifier(
- cast<DependentScopeDeclRefExpr>(Left)->getQualifier(),
- cast<DependentScopeDeclRefExpr>(Right)->getQualifier());
+ return cast<DependentScopeDeclRefExpr>(Left)->getQualifier() ==
+ cast<DependentScopeDeclRefExpr>(Right)->getQualifier();
case Stmt::DeclRefExprClass:
return cast<DeclRefExpr>(Left)->getDecl() ==
cast<DeclRefExpr>(Right)->getDecl();
diff --git a/clang-tools-extra/clang-tidy/misc/UnusedAliasDeclsCheck.cpp b/clang-tools-extra/clang-tidy/misc/UnusedAliasDeclsCheck.cpp
index 86992cd..4fa679a 100644
--- a/clang-tools-extra/clang-tidy/misc/UnusedAliasDeclsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/UnusedAliasDeclsCheck.cpp
@@ -35,12 +35,12 @@ void UnusedAliasDeclsCheck::check(const MatchFinder::MatchResult &Result) {
}
if (const auto *NestedName =
- Result.Nodes.getNodeAs<NestedNameSpecifier>("nns")) {
- if (const auto *AliasDecl = dyn_cast_if_present<NamespaceAliasDecl>(
- NestedName->getAsNamespace())) {
+ Result.Nodes.getNodeAs<NestedNameSpecifier>("nns");
+ NestedName &&
+ NestedName->getKind() == NestedNameSpecifier::Kind::Namespace)
+ if (const auto *AliasDecl = dyn_cast<NamespaceAliasDecl>(
+ NestedName->getAsNamespaceAndPrefix().Namespace))
FoundDecls[AliasDecl] = CharSourceRange();
- }
- }
}
void UnusedAliasDeclsCheck::onEndOfTranslationUnit() {
diff --git a/clang-tools-extra/clang-tidy/misc/UnusedUsingDeclsCheck.cpp b/clang-tools-extra/clang-tidy/misc/UnusedUsingDeclsCheck.cpp
index d5c5fa3..8211a0e 100644
--- a/clang-tools-extra/clang-tidy/misc/UnusedUsingDeclsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/misc/UnusedUsingDeclsCheck.cpp
@@ -71,11 +71,7 @@ void UnusedUsingDeclsCheck::registerMatchers(MatchFinder *Finder) {
templateArgument().bind("used")))),
this);
Finder->addMatcher(userDefinedLiteral().bind("used"), this);
- Finder->addMatcher(
- loc(elaboratedType(unless(hasQualifier(nestedNameSpecifier())),
- hasUnqualifiedDesugaredType(
- type(asTagDecl(tagDecl().bind("used")))))),
- this);
+ Finder->addMatcher(loc(asTagDecl(tagDecl().bind("used"))), this);
// Cases where we can identify the UsingShadowDecl directly, rather than
// just its target.
// FIXME: cover more cases in this way, as the AST supports it.
@@ -136,7 +132,7 @@ void UnusedUsingDeclsCheck::check(const MatchFinder::MatchResult &Result) {
}
if (const auto *ECD = dyn_cast<EnumConstantDecl>(Used)) {
if (const auto *ET = ECD->getType()->getAs<EnumType>())
- removeFromFoundDecls(ET->getDecl());
+ removeFromFoundDecls(ET->getOriginalDecl());
}
};
// We rely on the fact that the clang AST is walked in order, usages are only
diff --git a/clang-tools-extra/clang-tidy/modernize/DeprecatedIosBaseAliasesCheck.cpp b/clang-tools-extra/clang-tidy/modernize/DeprecatedIosBaseAliasesCheck.cpp
index f22f48d..2aca610 100644
--- a/clang-tools-extra/clang-tidy/modernize/DeprecatedIosBaseAliasesCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/DeprecatedIosBaseAliasesCheck.cpp
@@ -29,8 +29,7 @@ static std::optional<const char *> getReplacementType(StringRef Type) {
void DeprecatedIosBaseAliasesCheck::registerMatchers(MatchFinder *Finder) {
auto IoStateDecl = typedefDecl(hasAnyName(DeprecatedTypes)).bind("TypeDecl");
- auto IoStateType =
- qualType(hasDeclaration(IoStateDecl), unless(elaboratedType()));
+ auto IoStateType = typedefType(hasDeclaration(IoStateDecl));
Finder->addMatcher(typeLoc(loc(IoStateType)).bind("TypeLoc"), this);
}
@@ -43,12 +42,14 @@ void DeprecatedIosBaseAliasesCheck::check(
StringRef TypeName = Typedef->getName();
auto Replacement = getReplacementType(TypeName);
- const auto *TL = Result.Nodes.getNodeAs<TypeLoc>("TypeLoc");
- SourceLocation IoStateLoc = TL->getBeginLoc();
+ TypeLoc TL = *Result.Nodes.getNodeAs<TypeLoc>("TypeLoc");
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
+ SourceLocation IoStateLoc = TL.castAs<TypedefTypeLoc>().getNameLoc();
// Do not generate fixits for matches depending on template arguments and
// macro expansions.
- bool Fix = Replacement && !TL->getType()->isDependentType();
+ bool Fix = Replacement && !TL.getType()->isDependentType();
if (IoStateLoc.isMacroID()) {
IoStateLoc = SM.getSpellingLoc(IoStateLoc);
Fix = false;
diff --git a/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp b/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp
index 1e271dfa..a54d072 100644
--- a/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp
@@ -77,8 +77,7 @@ AST_MATCHER_P(CXXRecordDecl, isMoveConstructibleInBoundCXXRecordDecl, StringRef,
static TypeMatcher notTemplateSpecConstRefType() {
return lValueReferenceType(
- pointee(unless(elaboratedType(namesType(templateSpecializationType()))),
- isConstQualified()));
+ pointee(unless(templateSpecializationType()), isConstQualified()));
}
static TypeMatcher nonConstValueType() {
diff --git a/clang-tools-extra/clang-tidy/modernize/ReplaceAutoPtrCheck.cpp b/clang-tools-extra/clang-tidy/modernize/ReplaceAutoPtrCheck.cpp
index 1ad31d3..f2142b8 100644
--- a/clang-tools-extra/clang-tidy/modernize/ReplaceAutoPtrCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/ReplaceAutoPtrCheck.cpp
@@ -48,7 +48,7 @@ void ReplaceAutoPtrCheck::storeOptions(ClangTidyOptions::OptionMap &Opts) {
void ReplaceAutoPtrCheck::registerMatchers(MatchFinder *Finder) {
auto AutoPtrDecl = recordDecl(hasName("auto_ptr"), isInStdNamespace());
- auto AutoPtrType = qualType(hasDeclaration(AutoPtrDecl));
+ auto AutoPtrType = hasCanonicalType(recordType(hasDeclaration(AutoPtrDecl)));
// std::auto_ptr<int> a;
// ^~~~~~~~~~~~~
@@ -58,11 +58,7 @@ void ReplaceAutoPtrCheck::registerMatchers(MatchFinder *Finder) {
//
// std::auto_ptr<int> fn(std::auto_ptr<int>);
// ^~~~~~~~~~~~~ ^~~~~~~~~~~~~
- Finder->addMatcher(typeLoc(loc(qualType(AutoPtrType,
- // Skip elaboratedType() as the named
- // type will match soon thereafter.
- unless(elaboratedType()))))
- .bind(AutoPtrTokenId),
+ Finder->addMatcher(typeLoc(loc(qualType(AutoPtrType))).bind(AutoPtrTokenId),
this);
// using std::auto_ptr;
@@ -118,10 +114,13 @@ void ReplaceAutoPtrCheck::check(const MatchFinder::MatchResult &Result) {
}
SourceLocation AutoPtrLoc;
- if (const auto *TL = Result.Nodes.getNodeAs<TypeLoc>(AutoPtrTokenId)) {
+ if (const auto *PTL = Result.Nodes.getNodeAs<TypeLoc>(AutoPtrTokenId)) {
+ auto TL = *PTL;
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
// std::auto_ptr<int> i;
// ^
- if (auto Loc = TL->getAs<TemplateSpecializationTypeLoc>())
+ if (auto Loc = TL.getAs<TemplateSpecializationTypeLoc>())
AutoPtrLoc = Loc.getTemplateNameLoc();
} else if (const auto *D =
Result.Nodes.getNodeAs<UsingDecl>(AutoPtrTokenId)) {
diff --git a/clang-tools-extra/clang-tidy/modernize/TypeTraitsCheck.cpp b/clang-tools-extra/clang-tidy/modernize/TypeTraitsCheck.cpp
index ff0b321..de2d42d 100644
--- a/clang-tools-extra/clang-tidy/modernize/TypeTraitsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/TypeTraitsCheck.cpp
@@ -168,19 +168,6 @@ static DeclarationName getName(const DeclRefExpr &D) {
return D.getDecl()->getDeclName();
}
-static bool isNamedType(const ElaboratedTypeLoc &ETL) {
- if (const auto *TFT =
- ETL.getNamedTypeLoc().getTypePtr()->getAs<TypedefType>()) {
- const TypedefNameDecl *Decl = TFT->getDecl();
- return Decl->getDeclName().isIdentifier() && Decl->getName() == "type";
- }
- return false;
-}
-
-static bool isNamedType(const DependentNameTypeLoc &DTL) {
- return DTL.getTypePtr()->getIdentifier()->getName() == "type";
-}
-
namespace {
AST_POLYMORPHIC_MATCHER(isValue, AST_POLYMORPHIC_SUPPORTED_TYPES(
DeclRefExpr, DependentScopeDeclRefExpr)) {
@@ -188,10 +175,14 @@ AST_POLYMORPHIC_MATCHER(isValue, AST_POLYMORPHIC_SUPPORTED_TYPES(
return Ident && Ident->isStr("value");
}
-AST_POLYMORPHIC_MATCHER(isType,
- AST_POLYMORPHIC_SUPPORTED_TYPES(ElaboratedTypeLoc,
- DependentNameTypeLoc)) {
- return Node.getBeginLoc().isValid() && isNamedType(Node);
+AST_MATCHER(TypeLoc, isType) {
+ if (auto TL = Node.getAs<TypedefTypeLoc>()) {
+ const auto *TD = TL.getDecl();
+ return TD->getDeclName().isIdentifier() && TD->getName() == "type";
+ }
+ if (auto TL = Node.getAs<DependentNameTypeLoc>())
+ return TL.getTypePtr()->getIdentifier()->getName() == "type";
+ return false;
}
} // namespace
@@ -214,10 +205,7 @@ void TypeTraitsCheck::registerMatchers(MatchFinder *Finder) {
.bind(Bind),
this);
}
- Finder->addMatcher(mapAnyOf(elaboratedTypeLoc, dependentNameTypeLoc)
- .with(isType())
- .bind(Bind),
- this);
+ Finder->addMatcher(typeLoc(isType()).bind(Bind), this);
}
static bool isNamedDeclInStdTraitsSet(const NamedDecl *ND,
@@ -226,14 +214,11 @@ static bool isNamedDeclInStdTraitsSet(const NamedDecl *ND,
Set.contains(ND->getName());
}
-static bool checkTemplatedDecl(const NestedNameSpecifier *NNS,
+static bool checkTemplatedDecl(NestedNameSpecifier NNS,
const llvm::StringSet<> &Set) {
- if (!NNS)
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return false;
- const Type *NNST = NNS->getAsType();
- if (!NNST)
- return false;
- const auto *TST = NNST->getAs<TemplateSpecializationType>();
+ const auto *TST = NNS.getAsType()->getAs<TemplateSpecializationType>();
if (!TST)
return false;
if (const TemplateDecl *TD = TST->getTemplateName().getAsTemplateDecl()) {
@@ -250,8 +235,8 @@ void TypeTraitsCheck::check(const MatchFinder::MatchResult &Result) {
auto EmitValueWarning = [this, &Result](const NestedNameSpecifierLoc &QualLoc,
SourceLocation EndLoc) {
SourceLocation TemplateNameEndLoc;
- if (auto TSTL = QualLoc.getTypeLoc().getAs<TemplateSpecializationTypeLoc>();
- !TSTL.isNull())
+ if (auto TSTL =
+ QualLoc.getAsTypeLoc().getAs<TemplateSpecializationTypeLoc>())
TemplateNameEndLoc = Lexer::getLocForEndOfToken(
TSTL.getTemplateNameLoc(), 0, *Result.SourceManager,
Result.Context->getLangOpts());
@@ -274,8 +259,8 @@ void TypeTraitsCheck::check(const MatchFinder::MatchResult &Result) {
SourceLocation EndLoc,
SourceLocation TypenameLoc) {
SourceLocation TemplateNameEndLoc;
- if (auto TSTL = QualLoc.getTypeLoc().getAs<TemplateSpecializationTypeLoc>();
- !TSTL.isNull())
+ if (auto TSTL =
+ QualLoc.getAsTypeLoc().getAs<TemplateSpecializationTypeLoc>())
TemplateNameEndLoc = Lexer::getLocForEndOfToken(
TSTL.getTemplateNameLoc(), 0, *Result.SourceManager,
Result.Context->getLangOpts());
@@ -301,23 +286,21 @@ void TypeTraitsCheck::check(const MatchFinder::MatchResult &Result) {
if (!DRE->hasQualifier())
return;
if (const auto *CTSD = dyn_cast_if_present<ClassTemplateSpecializationDecl>(
- DRE->getQualifier()->getAsRecordDecl())) {
+ DRE->getQualifier().getAsRecordDecl())) {
if (isNamedDeclInStdTraitsSet(CTSD, ValueTraits))
EmitValueWarning(DRE->getQualifierLoc(), DRE->getEndLoc());
}
return;
}
- if (const auto *ETL = Result.Nodes.getNodeAs<ElaboratedTypeLoc>(Bind)) {
- const NestedNameSpecifierLoc QualLoc = ETL->getQualifierLoc();
- const auto *NNS = QualLoc.getNestedNameSpecifier();
- if (!NNS)
- return;
+ if (const auto *TL = Result.Nodes.getNodeAs<TypedefTypeLoc>(Bind)) {
+ const NestedNameSpecifierLoc QualLoc = TL->getQualifierLoc();
+ NestedNameSpecifier NNS = QualLoc.getNestedNameSpecifier();
if (const auto *CTSD = dyn_cast_if_present<ClassTemplateSpecializationDecl>(
- NNS->getAsRecordDecl())) {
+ NNS.getAsRecordDecl())) {
if (isNamedDeclInStdTraitsSet(CTSD, TypeTraits))
- EmitTypeWarning(ETL->getQualifierLoc(), ETL->getEndLoc(),
- ETL->getElaboratedKeywordLoc());
+ EmitTypeWarning(TL->getQualifierLoc(), TL->getEndLoc(),
+ TL->getElaboratedKeywordLoc());
}
return;
}
diff --git a/clang-tools-extra/clang-tidy/modernize/UseAutoCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseAutoCheck.cpp
index f4b6308..b601620 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseAutoCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseAutoCheck.cpp
@@ -186,16 +186,14 @@ TypeMatcher nestedIterator() {
/// declarations and which name standard iterators for standard containers.
TypeMatcher iteratorFromUsingDeclaration() {
auto HasIteratorDecl = hasDeclaration(namedDecl(hasStdIteratorName()));
- // Types resulting from using declarations are represented by elaboratedType.
- return elaboratedType(
- // Unwrap the nested name specifier to test for one of the standard
- // containers.
- hasQualifier(specifiesType(templateSpecializationType(hasDeclaration(
- namedDecl(hasStdContainerName(), isInStdNamespace()))))),
- // the named type is what comes after the final '::' in the type. It
- // should name one of the standard iterator names.
- namesType(
- anyOf(typedefType(HasIteratorDecl), recordType(HasIteratorDecl))));
+ // Unwrap the nested name specifier to test for one of the standard
+ // containers.
+ auto Qualifier = hasQualifier(specifiesType(templateSpecializationType(
+ hasDeclaration(namedDecl(hasStdContainerName(), isInStdNamespace())))));
+ // the named type is what comes after the final '::' in the type. It should
+ // name one of the standard iterator names.
+ return anyOf(typedefType(HasIteratorDecl, Qualifier),
+ recordType(HasIteratorDecl, Qualifier));
}
/// This matcher returns declaration statements that contain variable
diff --git a/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
index e9b96c4..07274d0 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
@@ -60,9 +60,11 @@ matchEnableIfSpecializationImplTypename(TypeLoc TheType) {
Keyword != ElaboratedTypeKeyword::None)) {
return std::nullopt;
}
- TheType = Dep.getQualifierLoc().getTypeLoc();
+ TheType = Dep.getQualifierLoc().getAsTypeLoc();
if (TheType.isNull())
return std::nullopt;
+ } else {
+ return std::nullopt;
}
if (const auto SpecializationLoc =
@@ -89,9 +91,6 @@ matchEnableIfSpecializationImplTypename(TypeLoc TheType) {
static std::optional<TemplateSpecializationTypeLoc>
matchEnableIfSpecializationImplTrait(TypeLoc TheType) {
- if (const auto Elaborated = TheType.getAs<ElaboratedTypeLoc>())
- TheType = Elaborated.getNamedTypeLoc();
-
if (const auto SpecializationLoc =
TheType.getAs<TemplateSpecializationTypeLoc>()) {
diff --git a/clang-tools-extra/clang-tidy/modernize/UseEmplaceCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseEmplaceCheck.cpp
index aaf24ea..ee49d8a 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseEmplaceCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseEmplaceCheck.cpp
@@ -164,10 +164,10 @@ void UseEmplaceCheck::registerMatchers(MatchFinder *Finder) {
auto CallEmplacy = cxxMemberCallExpr(
hasDeclaration(
functionDecl(hasAnyNameIgnoringTemplates(EmplacyFunctions))),
- on(hasTypeOrPointeeType(hasCanonicalType(hasDeclaration(
- has(typedefNameDecl(hasName("value_type"),
- hasType(type(hasUnqualifiedDesugaredType(
- recordType().bind("value_type")))))))))));
+ on(hasTypeOrPointeeType(
+ hasCanonicalType(hasDeclaration(has(typedefNameDecl(
+ hasName("value_type"),
+ hasType(hasCanonicalType(recordType().bind("value_type"))))))))));
// We can't replace push_backs of smart pointer because
// if emplacement fails (f.e. bad_alloc in vector) we will have leak of
@@ -241,17 +241,16 @@ void UseEmplaceCheck::registerMatchers(MatchFinder *Finder) {
auto HasConstructExprWithValueTypeType =
has(ignoringImplicit(cxxConstructExpr(
- SoughtConstructExpr, hasType(type(hasUnqualifiedDesugaredType(
- type(equalsBoundNode("value_type"))))))));
-
- auto HasBracedInitListWithValueTypeType =
- anyOf(allOf(HasConstructInitListExpr,
- has(initListExpr(hasType(type(hasUnqualifiedDesugaredType(
- type(equalsBoundNode("value_type")))))))),
- has(cxxBindTemporaryExpr(
- HasConstructInitListExpr,
- has(initListExpr(hasType(type(hasUnqualifiedDesugaredType(
- type(equalsBoundNode("value_type"))))))))));
+ SoughtConstructExpr,
+ hasType(hasCanonicalType(type(equalsBoundNode("value_type")))))));
+
+ auto HasBracedInitListWithValueTypeType = anyOf(
+ allOf(HasConstructInitListExpr,
+ has(initListExpr(hasType(
+ hasCanonicalType(type(equalsBoundNode("value_type"))))))),
+ has(cxxBindTemporaryExpr(HasConstructInitListExpr,
+ has(initListExpr(hasType(hasCanonicalType(
+ type(equalsBoundNode("value_type")))))))));
auto HasConstructExprWithValueTypeTypeAsLastArgument = hasLastArgument(
materializeTemporaryExpr(
@@ -289,19 +288,17 @@ void UseEmplaceCheck::registerMatchers(MatchFinder *Finder) {
this);
Finder->addMatcher(
- traverse(
- TK_AsIs,
- cxxMemberCallExpr(
- CallEmplacy,
- on(hasType(cxxRecordDecl(has(typedefNameDecl(
- hasName("value_type"),
- hasType(type(
- hasUnqualifiedDesugaredType(recordType(hasDeclaration(
- cxxRecordDecl(hasAnyName(SmallVector<StringRef, 2>(
- TupleTypes.begin(), TupleTypes.end()))))))))))))),
- has(MakeTuple), hasSameNumArgsAsDeclNumParams(),
- unless(isInTemplateInstantiation()))
- .bind("emplacy_call")),
+ traverse(TK_AsIs,
+ cxxMemberCallExpr(
+ CallEmplacy,
+ on(hasType(cxxRecordDecl(has(typedefNameDecl(
+ hasName("value_type"),
+ hasType(hasCanonicalType(recordType(hasDeclaration(
+ cxxRecordDecl(hasAnyName(SmallVector<StringRef, 2>(
+ TupleTypes.begin(), TupleTypes.end())))))))))))),
+ has(MakeTuple), hasSameNumArgsAsDeclNumParams(),
+ unless(isInTemplateInstantiation()))
+ .bind("emplacy_call")),
this);
}
diff --git a/clang-tools-extra/clang-tidy/modernize/UseScopedLockCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseScopedLockCheck.cpp
index 52e9a9f..5310f2f 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseScopedLockCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseScopedLockCheck.cpp
@@ -29,7 +29,7 @@ static bool isLockGuardDecl(const NamedDecl *Decl) {
static bool isLockGuard(const QualType &Type) {
if (const auto *Record = Type->getAs<RecordType>())
- if (const RecordDecl *Decl = Record->getDecl())
+ if (const RecordDecl *Decl = Record->getOriginalDecl())
return isLockGuardDecl(Decl);
if (const auto *TemplateSpecType = Type->getAs<TemplateSpecializationType>())
@@ -89,17 +89,6 @@ findLocksInCompoundStmt(const CompoundStmt *Block,
return LockGuardGroups;
}
-static TemplateSpecializationTypeLoc
-getTemplateLockGuardTypeLoc(const TypeSourceInfo *SourceInfo) {
- const TypeLoc Loc = SourceInfo->getTypeLoc();
-
- const auto ElaboratedLoc = Loc.getAs<ElaboratedTypeLoc>();
- if (!ElaboratedLoc)
- return {};
-
- return ElaboratedLoc.getNamedTypeLoc().getAs<TemplateSpecializationTypeLoc>();
-}
-
// Find the exact source range of the 'lock_guard' token
static SourceRange getLockGuardRange(const TypeSourceInfo *SourceInfo) {
const TypeLoc LockGuardTypeLoc = SourceInfo->getTypeLoc();
@@ -110,7 +99,7 @@ static SourceRange getLockGuardRange(const TypeSourceInfo *SourceInfo) {
// Find the exact source range of the 'lock_guard' name token
static SourceRange getLockGuardNameRange(const TypeSourceInfo *SourceInfo) {
const TemplateSpecializationTypeLoc TemplateLoc =
- getTemplateLockGuardTypeLoc(SourceInfo);
+ SourceInfo->getTypeLoc().getAs<TemplateSpecializationTypeLoc>();
if (!TemplateLoc)
return {};
@@ -136,11 +125,11 @@ void UseScopedLockCheck::registerMatchers(MatchFinder *Finder) {
const auto LockGuardClassDecl =
namedDecl(hasName("lock_guard"), isInStdNamespace());
- const auto LockGuardType = qualType(anyOf(
- hasUnqualifiedDesugaredType(
- recordType(hasDeclaration(LockGuardClassDecl))),
- elaboratedType(namesType(hasUnqualifiedDesugaredType(
- templateSpecializationType(hasDeclaration(LockGuardClassDecl)))))));
+ const auto LockGuardType =
+ qualType(anyOf(hasUnqualifiedDesugaredType(
+ recordType(hasDeclaration(LockGuardClassDecl))),
+ hasUnqualifiedDesugaredType(templateSpecializationType(
+ hasDeclaration(LockGuardClassDecl)))));
const auto LockVarDecl = varDecl(hasType(LockGuardType));
@@ -165,18 +154,16 @@ void UseScopedLockCheck::registerMatchers(MatchFinder *Finder) {
if (WarnOnUsingAndTypedef) {
// Match 'typedef std::lock_guard<std::mutex> Lock'
Finder->addMatcher(typedefDecl(unless(isExpansionInSystemHeader()),
- hasUnderlyingType(LockGuardType))
+ hasType(hasUnderlyingType(LockGuardType)))
.bind("lock-guard-typedef"),
this);
// Match 'using Lock = std::lock_guard<std::mutex>'
- Finder->addMatcher(
- typeAliasDecl(
- unless(isExpansionInSystemHeader()),
- hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(LockGuardClassDecl))))))
- .bind("lock-guard-using-alias"),
- this);
+ Finder->addMatcher(typeAliasDecl(unless(isExpansionInSystemHeader()),
+ hasType(templateSpecializationType(
+ hasDeclaration(LockGuardClassDecl))))
+ .bind("lock-guard-using-alias"),
+ this);
// Match 'using std::lock_guard'
Finder->addMatcher(
@@ -288,8 +275,8 @@ void UseScopedLockCheck::diagOnSourceInfo(
const ast_matchers::MatchFinder::MatchResult &Result) {
const TypeLoc TL = LockGuardSourceInfo->getTypeLoc();
- if (const auto ElaboratedTL = TL.getAs<ElaboratedTypeLoc>()) {
- auto Diag = diag(ElaboratedTL.getBeginLoc(), UseScopedLockMessage);
+ if (const auto TTL = TL.getAs<TemplateSpecializationTypeLoc>()) {
+ auto Diag = diag(TTL.getBeginLoc(), UseScopedLockMessage);
const SourceRange LockGuardRange =
getLockGuardNameRange(LockGuardSourceInfo);
diff --git a/clang-tools-extra/clang-tidy/modernize/UseTrailingReturnTypeCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseTrailingReturnTypeCheck.cpp
index ced4825..82f6409 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseTrailingReturnTypeCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseTrailingReturnTypeCheck.cpp
@@ -64,66 +64,65 @@ public:
return false;
}
- bool TraverseTypeLoc(TypeLoc TL, bool Elaborated = false) {
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) {
if (TL.isNull())
return true;
- if (!Elaborated) {
- switch (TL.getTypeLocClass()) {
- case TypeLoc::Record:
- if (visitUnqualName(
- TL.getAs<RecordTypeLoc>().getTypePtr()->getDecl()->getName()))
- return false;
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::InjectedClassName:
+ case TypeLoc::Record:
+ case TypeLoc::Enum: {
+ auto TTL = TL.getAs<TagTypeLoc>();
+ const auto *T = TTL.getTypePtr();
+ if (T->getKeyword() != ElaboratedTypeKeyword::None ||
+ TTL.getQualifierLoc())
break;
- case TypeLoc::Enum:
- if (visitUnqualName(
- TL.getAs<EnumTypeLoc>().getTypePtr()->getDecl()->getName()))
- return false;
- break;
- case TypeLoc::TemplateSpecialization:
- if (visitUnqualName(TL.getAs<TemplateSpecializationTypeLoc>()
- .getTypePtr()
- ->getTemplateName()
- .getAsTemplateDecl()
- ->getName()))
- return false;
- break;
- case TypeLoc::Typedef:
- if (visitUnqualName(
- TL.getAs<TypedefTypeLoc>().getTypePtr()->getDecl()->getName()))
- return false;
+ if (visitUnqualName(T->getOriginalDecl()->getName()))
+ return false;
+ break;
+ }
+ case TypeLoc::TemplateSpecialization: {
+ auto TTL = TL.getAs<TemplateSpecializationTypeLoc>();
+ const auto *T = TTL.getTypePtr();
+ if (T->getKeyword() != ElaboratedTypeKeyword::None ||
+ TTL.getQualifierLoc())
break;
- case TypeLoc::Using:
- if (visitUnqualName(TL.getAs<UsingTypeLoc>()
- .getTypePtr()
- ->getFoundDecl()
- ->getName()))
- return false;
+ if (visitUnqualName(T->getTemplateName().getAsTemplateDecl()->getName()))
+ return false;
+ break;
+ }
+ case TypeLoc::Typedef: {
+ auto TTL = TL.getAs<TypedefTypeLoc>();
+ const auto *T = TTL.getTypePtr();
+ if (T->getKeyword() != ElaboratedTypeKeyword::None ||
+ TTL.getQualifierLoc())
break;
- default:
+ if (visitUnqualName(T->getDecl()->getName()))
+ return false;
+ break;
+ }
+ case TypeLoc::Using: {
+ auto TTL = TL.getAs<UsingTypeLoc>();
+ const auto *T = TTL.getTypePtr();
+ if (T->getKeyword() != ElaboratedTypeKeyword::None ||
+ TTL.getQualifierLoc())
break;
- }
+ if (visitUnqualName(T->getDecl()->getName()))
+ return false;
+ break;
+ }
+ default:
+ break;
}
- return RecursiveASTVisitor<UnqualNameVisitor>::TraverseTypeLoc(TL);
+ return RecursiveASTVisitor<UnqualNameVisitor>::TraverseTypeLoc(
+ TL, TraverseQualifier);
}
// Replace the base method in order to call our own
// TraverseTypeLoc().
- bool TraverseQualifiedTypeLoc(QualifiedTypeLoc TL) {
- return TraverseTypeLoc(TL.getUnqualifiedLoc());
- }
-
- // Replace the base version to inform TraverseTypeLoc that the type is
- // elaborated.
- bool TraverseElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- if (TL.getQualifierLoc() &&
- !TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()))
- return false;
- const auto *T = TL.getTypePtr();
- return TraverseTypeLoc(TL.getNamedTypeLoc(),
- T->getKeyword() != ElaboratedTypeKeyword::None ||
- T->getQualifier());
+ bool TraverseQualifiedTypeLoc(QualifiedTypeLoc TL, bool TraverseQualifier) {
+ return TraverseTypeLoc(TL.getUnqualifiedLoc(), TraverseQualifier);
}
bool VisitDeclRefExpr(DeclRefExpr *S) {
diff --git a/clang-tools-extra/clang-tidy/modernize/UseTransparentFunctorsCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseTransparentFunctorsCheck.cpp
index a053c07..2373a26 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseTransparentFunctorsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseTransparentFunctorsCheck.cpp
@@ -37,15 +37,13 @@ void UseTransparentFunctorsCheck::registerMatchers(MatchFinder *Finder) {
// Non-transparent functor mentioned as a template parameter. FIXIT.
Finder->addMatcher(
- loc(qualType(
- unless(elaboratedType()),
- hasDeclaration(classTemplateSpecializationDecl(
- unless(hasAnyTemplateArgument(templateArgument(refersToType(
- qualType(pointsTo(qualType(isAnyCharacter()))))))),
- hasAnyTemplateArgument(
- templateArgument(refersToType(qualType(hasDeclaration(
- TransparentFunctors))))
- .bind("Functor"))))))
+ loc(qualType(hasDeclaration(classTemplateSpecializationDecl(
+ unless(hasAnyTemplateArgument(templateArgument(refersToType(
+ qualType(pointsTo(qualType(isAnyCharacter()))))))),
+ hasAnyTemplateArgument(
+ templateArgument(refersToType(qualType(
+ hasDeclaration(TransparentFunctors))))
+ .bind("Functor"))))))
.bind("FunctorParentLoc"),
this);
diff --git a/clang-tools-extra/clang-tidy/performance/NoAutomaticMoveCheck.cpp b/clang-tools-extra/clang-tidy/performance/NoAutomaticMoveCheck.cpp
index 7022e9d..1c01899 100644
--- a/clang-tools-extra/clang-tidy/performance/NoAutomaticMoveCheck.cpp
+++ b/clang-tools-extra/clang-tidy/performance/NoAutomaticMoveCheck.cpp
@@ -42,11 +42,11 @@ void NoAutomaticMoveCheck::registerMatchers(MatchFinder *Finder) {
// A matcher for a `DstT::DstT(const Src&)` where DstT also has a
// `DstT::DstT(Src&&)`.
const auto LValueRefCtor = cxxConstructorDecl(
- hasParameter(0,
- hasType(lValueReferenceType(pointee(type().bind("SrcT"))))),
+ hasParameter(0, hasType(hasCanonicalType(
+ lValueReferenceType(pointee(type().bind("SrcT")))))),
ofClass(cxxRecordDecl(hasMethod(cxxConstructorDecl(
- hasParameter(0, hasType(rValueReferenceType(
- pointee(type(equalsBoundNode("SrcT")))))))))));
+ hasParameter(0, hasType(hasCanonicalType(rValueReferenceType(
+ pointee(type(equalsBoundNode("SrcT"))))))))))));
// A matcher for `DstT::DstT(const Src&&)`, which typically comes from an
// instantiation of `template <typename U> DstT::DstT(U&&)`.
diff --git a/clang-tools-extra/clang-tidy/portability/StdAllocatorConstCheck.cpp b/clang-tools-extra/clang-tidy/portability/StdAllocatorConstCheck.cpp
index 3b4d65b..5a3c9a4 100644
--- a/clang-tools-extra/clang-tidy/portability/StdAllocatorConstCheck.cpp
+++ b/clang-tools-extra/clang-tidy/portability/StdAllocatorConstCheck.cpp
@@ -15,10 +15,11 @@ namespace clang::tidy::portability {
void StdAllocatorConstCheck::registerMatchers(MatchFinder *Finder) {
// Match std::allocator<const T>.
- auto AllocatorConst =
+ auto AllocatorConst = qualType(hasCanonicalType(
recordType(hasDeclaration(classTemplateSpecializationDecl(
hasName("::std::allocator"),
- hasTemplateArgument(0, refersToType(qualType(isConstQualified()))))));
+ hasTemplateArgument(0,
+ refersToType(qualType(isConstQualified()))))))));
auto HasContainerName =
hasAnyName("::std::vector", "::std::deque", "::std::list",
@@ -31,8 +32,10 @@ void StdAllocatorConstCheck::registerMatchers(MatchFinder *Finder) {
// aren't caught.
Finder->addMatcher(
typeLoc(
- templateSpecializationTypeLoc(),
- loc(hasUnqualifiedDesugaredType(anyOf(
+ anyOf(templateSpecializationTypeLoc(),
+ qualifiedTypeLoc(
+ hasUnqualifiedLoc(templateSpecializationTypeLoc()))),
+ loc(qualType(anyOf(
recordType(hasDeclaration(classTemplateSpecializationDecl(
HasContainerName,
anyOf(
diff --git a/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp b/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp
index ce736a8..c4dc319 100644
--- a/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp
@@ -238,9 +238,12 @@ void ContainerSizeEmptyCheck::check(const MatchFinder::MatchResult &Result) {
? MemberCallObject
: (Pointee ? Pointee : Result.Nodes.getNodeAs<Expr>("STLObject"));
FixItHint Hint;
- std::string ReplacementText = std::string(
- Lexer::getSourceText(CharSourceRange::getTokenRange(E->getSourceRange()),
- *Result.SourceManager, getLangOpts()));
+ std::string ReplacementText =
+ E->isImplicitCXXThis()
+ ? ""
+ : std::string(Lexer::getSourceText(
+ CharSourceRange::getTokenRange(E->getSourceRange()),
+ *Result.SourceManager, getLangOpts()));
const auto *OpCallExpr = dyn_cast<CXXOperatorCallExpr>(E);
if (isBinaryOrTernary(E) || isa<UnaryOperator>(E) ||
(OpCallExpr && (OpCallExpr->getOperator() == OO_Star))) {
@@ -251,6 +254,8 @@ void ContainerSizeEmptyCheck::check(const MatchFinder::MatchResult &Result) {
// This can happen if the object is a smart pointer. Don't add anything
// because a '->' is already there (PR#51776), just call the method.
ReplacementText += "empty()";
+ } else if (E->isImplicitCXXThis()) {
+ ReplacementText += "empty()";
} else if (E->getType()->isPointerType())
ReplacementText += "->empty()";
else
diff --git a/clang-tools-extra/clang-tidy/readability/StaticAccessedThroughInstanceCheck.cpp b/clang-tools-extra/clang-tidy/readability/StaticAccessedThroughInstanceCheck.cpp
index fffb136..a7b3c4a 100644
--- a/clang-tools-extra/clang-tidy/readability/StaticAccessedThroughInstanceCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/StaticAccessedThroughInstanceCheck.cpp
@@ -19,19 +19,25 @@ namespace {
AST_MATCHER(CXXMethodDecl, isStatic) { return Node.isStatic(); }
} // namespace
-static unsigned getNameSpecifierNestingLevel(const QualType &QType) {
- if (const auto *ElType = QType->getAs<ElaboratedType>()) {
- if (const NestedNameSpecifier *NestedSpecifiers = ElType->getQualifier()) {
- unsigned NameSpecifierNestingLevel = 1;
- do {
- NameSpecifierNestingLevel++;
- NestedSpecifiers = NestedSpecifiers->getPrefix();
- } while (NestedSpecifiers);
-
+static unsigned getNameSpecifierNestingLevel(QualType QType) {
+ unsigned NameSpecifierNestingLevel = 1;
+ for (NestedNameSpecifier Qualifier = QType->getPrefix(); /**/;
+ ++NameSpecifierNestingLevel) {
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
return NameSpecifierNestingLevel;
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return NameSpecifierNestingLevel + 1;
+ case NestedNameSpecifier::Kind::Namespace:
+ Qualifier = Qualifier.getAsNamespaceAndPrefix().Prefix;
+ continue;
+ case NestedNameSpecifier::Kind::Type:
+ Qualifier = Qualifier.getAsType()->getPrefix();
+ continue;
}
+ llvm_unreachable("unhandled nested name specifier kind");
}
- return 0;
}
void StaticAccessedThroughInstanceCheck::storeOptions(
diff --git a/clang-tools-extra/clang-tidy/readability/SuspiciousCallArgumentCheck.cpp b/clang-tools-extra/clang-tidy/readability/SuspiciousCallArgumentCheck.cpp
index 5a04029..447c243 100644
--- a/clang-tools-extra/clang-tidy/readability/SuspiciousCallArgumentCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/SuspiciousCallArgumentCheck.cpp
@@ -414,9 +414,9 @@ static bool areTypesCompatible(QualType ArgType, QualType ParamType,
// Arithmetic types are interconvertible, except scoped enums.
if (ParamType->isArithmeticType() && ArgType->isArithmeticType()) {
if ((ParamType->isEnumeralType() &&
- ParamType->castAs<EnumType>()->getDecl()->isScoped()) ||
+ ParamType->castAs<EnumType>()->getOriginalDecl()->isScoped()) ||
(ArgType->isEnumeralType() &&
- ArgType->castAs<EnumType>()->getDecl()->isScoped()))
+ ArgType->castAs<EnumType>()->getOriginalDecl()->isScoped()))
return false;
return true;
diff --git a/clang-tools-extra/clang-tidy/readability/UseStdMinMaxCheck.cpp b/clang-tools-extra/clang-tidy/readability/UseStdMinMaxCheck.cpp
index 6f6b8a8..718467e 100644
--- a/clang-tools-extra/clang-tidy/readability/UseStdMinMaxCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/UseStdMinMaxCheck.cpp
@@ -67,11 +67,7 @@ static QualType getNonTemplateAlias(QualType QT) {
if (!TT->getDecl()->getDescribedTemplate() &&
!TT->getDecl()->getDeclContext()->isDependentContext())
return QT;
- QT = TT->getDecl()->getUnderlyingType();
- }
- // cast to elaborated type
- else if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(QT)) {
- QT = ET->getNamedType();
+ QT = TT->desugar();
} else {
break;
}
diff --git a/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp b/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp
index 0637d0e..aa6aefc 100644
--- a/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp
+++ b/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp
@@ -66,7 +66,8 @@ ExceptionSpecAnalyzer::analyzeBase(const CXXBaseSpecifier &Base,
if (!RecType)
return State::Unknown;
- const auto *BaseClass = cast<CXXRecordDecl>(RecType->getDecl());
+ const auto *BaseClass =
+ cast<CXXRecordDecl>(RecType->getOriginalDecl())->getDefinitionOrSelf();
return analyzeRecord(BaseClass, Kind);
}
diff --git a/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp b/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp
index e1c1bee..0df8e91 100644
--- a/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp
+++ b/clang-tools-extra/clang-tidy/utils/FormatStringConverter.cpp
@@ -461,8 +461,9 @@ bool FormatStringConverter::emitIntegerArgument(
// the signedness based on the format string, so we need to do the
// same.
if (const auto *ET = ArgType->getAs<EnumType>()) {
- if (const std::optional<std::string> MaybeCastType =
- castTypeForArgument(ArgKind, ET->getDecl()->getIntegerType()))
+ if (const std::optional<std::string> MaybeCastType = castTypeForArgument(
+ ArgKind,
+ ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType()))
ArgFixes.emplace_back(
ArgIndex, (Twine("static_cast<") + *MaybeCastType + ">(").str());
else
diff --git a/clang-tools-extra/clang-tidy/utils/Matchers.cpp b/clang-tools-extra/clang-tidy/utils/Matchers.cpp
index 4974a9c..bd7b03e 100644
--- a/clang-tools-extra/clang-tidy/utils/Matchers.cpp
+++ b/clang-tools-extra/clang-tidy/utils/Matchers.cpp
@@ -34,7 +34,7 @@ bool MatchesAnyListedTypeNameMatcher::matches(
PrintingPolicy PrintingPolicyWithSuppressedTag(
Finder->getASTContext().getLangOpts());
PrintingPolicyWithSuppressedTag.PrintAsCanonical = CanonicalTypes;
- PrintingPolicyWithSuppressedTag.SuppressElaboration = true;
+ PrintingPolicyWithSuppressedTag.FullyQualifiedName = true;
PrintingPolicyWithSuppressedTag.SuppressScope = false;
PrintingPolicyWithSuppressedTag.SuppressTagKeyword = true;
PrintingPolicyWithSuppressedTag.SuppressUnwrittenScope = true;
diff --git a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
index eaa04fe..20853dc 100644
--- a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
+++ b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
@@ -281,9 +281,10 @@ public:
}
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc Loc) {
- if (const NestedNameSpecifier *Spec = Loc.getNestedNameSpecifier()) {
+ if (NestedNameSpecifier Spec = Loc.getNestedNameSpecifier();
+ Spec.getKind() == NestedNameSpecifier::Kind::Namespace) {
if (const auto *Decl =
- dyn_cast_if_present<NamespaceDecl>(Spec->getAsNamespace()))
+ dyn_cast<NamespaceDecl>(Spec.getAsNamespaceAndPrefix().Namespace))
Check->addUsage(Decl, Loc.getLocalSourceRange(), SM);
}
@@ -323,48 +324,34 @@ public:
}
bool VisitTypedefTypeLoc(const TypedefTypeLoc &Loc) {
- Check->addUsage(Loc.getTypedefNameDecl(), Loc.getSourceRange(), SM);
+ Check->addUsage(Loc.getDecl(), Loc.getNameLoc(), SM);
return true;
}
bool VisitTagTypeLoc(const TagTypeLoc &Loc) {
- Check->addUsage(Loc.getDecl(), Loc.getSourceRange(), SM);
- return true;
- }
-
- bool VisitInjectedClassNameTypeLoc(const InjectedClassNameTypeLoc &Loc) {
- Check->addUsage(Loc.getDecl(), Loc.getSourceRange(), SM);
+ Check->addUsage(Loc.getOriginalDecl(), Loc.getNameLoc(), SM);
return true;
}
bool VisitUnresolvedUsingTypeLoc(const UnresolvedUsingTypeLoc &Loc) {
- Check->addUsage(Loc.getDecl(), Loc.getSourceRange(), SM);
+ Check->addUsage(Loc.getDecl(), Loc.getNameLoc(), SM);
return true;
}
bool VisitTemplateTypeParmTypeLoc(const TemplateTypeParmTypeLoc &Loc) {
- Check->addUsage(Loc.getDecl(), Loc.getSourceRange(), SM);
+ Check->addUsage(Loc.getDecl(), Loc.getNameLoc(), SM);
return true;
}
bool
VisitTemplateSpecializationTypeLoc(const TemplateSpecializationTypeLoc &Loc) {
const TemplateDecl *Decl =
- Loc.getTypePtr()->getTemplateName().getAsTemplateDecl();
+ Loc.getTypePtr()->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true);
- SourceRange Range(Loc.getTemplateNameLoc(), Loc.getTemplateNameLoc());
- if (const auto *ClassDecl = dyn_cast<TemplateDecl>(Decl)) {
+ if (const auto *ClassDecl = dyn_cast<TemplateDecl>(Decl))
if (const NamedDecl *TemplDecl = ClassDecl->getTemplatedDecl())
- Check->addUsage(TemplDecl, Range, SM);
- }
-
- return true;
- }
-
- bool VisitDependentTemplateSpecializationTypeLoc(
- const DependentTemplateSpecializationTypeLoc &Loc) {
- if (const TagDecl *Decl = Loc.getTypePtr()->getAsTagDecl())
- Check->addUsage(Decl, Loc.getSourceRange(), SM);
+ Check->addUsage(TemplDecl, Loc.getTemplateNameLoc(), SM);
return true;
}
diff --git a/clang-tools-extra/clang-tidy/utils/TypeTraits.cpp b/clang-tools-extra/clang-tidy/utils/TypeTraits.cpp
index 44db0c2..96d3a5b 100644
--- a/clang-tools-extra/clang-tidy/utils/TypeTraits.cpp
+++ b/clang-tools-extra/clang-tidy/utils/TypeTraits.cpp
@@ -124,7 +124,8 @@ bool isTriviallyDefaultConstructible(QualType Type, const ASTContext &Context) {
return true;
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
- return recordIsTriviallyDefaultConstructible(*RT->getDecl(), Context);
+ return recordIsTriviallyDefaultConstructible(
+ *RT->getOriginalDecl()->getDefinitionOrSelf(), Context);
}
// No other types can match.
diff --git a/clang-tools-extra/clangd/AST.cpp b/clang-tools-extra/clangd/AST.cpp
index f2631e5..82aee4c 100644
--- a/clang-tools-extra/clangd/AST.cpp
+++ b/clang-tools-extra/clangd/AST.cpp
@@ -102,54 +102,78 @@ getUsingNamespaceDirectives(const DeclContext *DestContext,
// ancestor is redundant, therefore we stop at lowest common ancestor.
// In addition to that stops early whenever IsVisible returns true. This can be
// used to implement support for "using namespace" decls.
-std::string
-getQualification(ASTContext &Context, const DeclContext *DestContext,
- const DeclContext *SourceContext,
- llvm::function_ref<bool(NestedNameSpecifier *)> IsVisible) {
- std::vector<const NestedNameSpecifier *> Parents;
- bool ReachedNS = false;
+std::string getQualification(ASTContext &Context,
+ const DeclContext *DestContext,
+ const DeclContext *SourceContext,
+ llvm::function_ref<bool(const Decl *)> IsVisible) {
+ std::vector<const Decl *> Parents;
+ [[maybe_unused]] bool ReachedNS = false;
for (const DeclContext *CurContext = SourceContext; CurContext;
CurContext = CurContext->getLookupParent()) {
// Stop once we reach a common ancestor.
if (CurContext->Encloses(DestContext))
break;
- NestedNameSpecifier *NNS = nullptr;
+ const Decl *CurD;
if (auto *TD = llvm::dyn_cast<TagDecl>(CurContext)) {
// There can't be any more tag parents after hitting a namespace.
assert(!ReachedNS);
- (void)ReachedNS;
- NNS = NestedNameSpecifier::Create(Context, nullptr, TD->getTypeForDecl());
+ CurD = TD;
} else if (auto *NSD = llvm::dyn_cast<NamespaceDecl>(CurContext)) {
ReachedNS = true;
- NNS = NestedNameSpecifier::Create(Context, nullptr, NSD);
// Anonymous and inline namespace names are not spelled while qualifying
// a name, so skip those.
if (NSD->isAnonymousNamespace() || NSD->isInlineNamespace())
continue;
+ CurD = NSD;
} else {
// Other types of contexts cannot be spelled in code, just skip over
// them.
continue;
}
// Stop if this namespace is already visible at DestContext.
- if (IsVisible(NNS))
+ if (IsVisible(CurD))
break;
- Parents.push_back(NNS);
+ Parents.push_back(CurD);
+ }
+
+ // Go over the declarations in reverse order, since we stored inner-most
+ // parent first.
+ NestedNameSpecifier Qualifier = std::nullopt;
+ bool IsFirst = true;
+ for (const auto *CurD : llvm::reverse(Parents)) {
+ if (auto *TD = llvm::dyn_cast<TagDecl>(CurD)) {
+ QualType T;
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(TD);
+ ClassTemplateDecl *CTD = RD->getDescribedClassTemplate()) {
+ ArrayRef<TemplateArgument> Args;
+ if (const auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ Args = SD->getTemplateArgs().asArray();
+ else
+ Args = CTD->getTemplateParameters()->getInjectedTemplateArgs(Context);
+ T = Context.getTemplateSpecializationType(
+ ElaboratedTypeKeyword::None,
+ Context.getQualifiedTemplateName(
+ Qualifier, /*TemplateKeyword=*/!IsFirst, TemplateName(CTD)),
+ Args, /*CanonicalArgs=*/{}, Context.getCanonicalTagType(RD));
+ } else {
+ T = Context.getTagType(ElaboratedTypeKeyword::None, Qualifier, TD,
+ /*OwnsTag=*/false);
+ }
+ Qualifier = NestedNameSpecifier(T.getTypePtr());
+ } else {
+ Qualifier =
+ NestedNameSpecifier(Context, cast<NamespaceDecl>(CurD), Qualifier);
+ }
+ IsFirst = false;
}
+ if (!Qualifier)
+ return "";
- // Go over name-specifiers in reverse order to create necessary qualification,
- // since we stored inner-most parent first.
std::string Result;
llvm::raw_string_ostream OS(Result);
- for (const auto *Parent : llvm::reverse(Parents)) {
- if (Parent != *Parents.rbegin() && Parent->isDependent() &&
- Parent->getAsRecordDecl() &&
- Parent->getAsRecordDecl()->getDescribedClassTemplate())
- OS << "template ";
- Parent->print(OS, Context.getPrintingPolicy());
- }
+ Qualifier.print(OS, Context.getPrintingPolicy());
return OS.str();
}
@@ -187,6 +211,7 @@ std::string printQualifiedName(const NamedDecl &ND) {
// include them, but at query time it's hard to find all the inline
// namespaces to query: the preamble doesn't have a dedicated list.
Policy.SuppressUnwrittenScope = true;
+ Policy.SuppressScope = true;
// (unnamed struct), not (unnamed struct at /path/to/foo.cc:42:1).
// In clangd, context is usually available and paths are mostly noise.
Policy.AnonymousTagLocations = false;
@@ -213,8 +238,7 @@ std::string printUsingNamespaceName(const ASTContext &Ctx,
std::string Name;
llvm::raw_string_ostream Out(Name);
- if (auto *Qual = D.getQualifier())
- Qual->print(Out, PP);
+ D.getQualifier().print(Out, PP);
D.getNominatedNamespaceAsWritten()->printName(Out);
return Out.str();
}
@@ -229,8 +253,7 @@ std::string printName(const ASTContext &Ctx, const NamedDecl &ND) {
// Handle 'using namespace'. They all have the same name - <using-directive>.
if (auto *UD = llvm::dyn_cast<UsingDirectiveDecl>(&ND)) {
Out << "using namespace ";
- if (auto *Qual = UD->getQualifier())
- Qual->print(Out, PP);
+ UD->getQualifier().print(Out, PP);
UD->getNominatedNamespaceAsWritten()->printName(Out);
return Out.str();
}
@@ -250,8 +273,7 @@ std::string printName(const ASTContext &Ctx, const NamedDecl &ND) {
}
// Print nested name qualifier if it was written in the source code.
- if (auto *Qualifier = getQualifierLoc(ND).getNestedNameSpecifier())
- Qualifier->print(Out, PP);
+ getQualifierLoc(ND).getNestedNameSpecifier().print(Out, PP);
// Print the name itself.
ND.getDeclName().print(Out, PP);
// Print template arguments.
@@ -391,12 +413,13 @@ preferredIncludeDirective(llvm::StringRef FileName, const LangOptions &LangOpts,
}
std::string printType(const QualType QT, const DeclContext &CurContext,
- const llvm::StringRef Placeholder) {
+ const llvm::StringRef Placeholder, bool FullyQualify) {
std::string Result;
llvm::raw_string_ostream OS(Result);
PrintingPolicy PP(CurContext.getParentASTContext().getPrintingPolicy());
PP.SuppressTagKeyword = true;
PP.SuppressUnwrittenScope = true;
+ PP.FullyQualifiedName = FullyQualify;
class PrintCB : public PrintingCallbacks {
public:
@@ -439,6 +462,7 @@ QualType declaredType(const TypeDecl *D) {
if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(D))
if (const auto *Args = CTSD->getTemplateArgsAsWritten())
return Context.getTemplateSpecializationType(
+ ElaboratedTypeKeyword::None,
TemplateName(CTSD->getSpecializedTemplate()), Args->arguments(),
/*CanonicalArgs=*/{});
return Context.getTypeDeclType(D);
@@ -664,13 +688,10 @@ std::string getQualification(ASTContext &Context,
auto VisibleNamespaceDecls =
getUsingNamespaceDirectives(DestContext, InsertionPoint);
return getQualification(
- Context, DestContext, ND->getDeclContext(),
- [&](NestedNameSpecifier *NNS) {
- const NamespaceDecl *NS =
- dyn_cast_if_present<NamespaceDecl>(NNS->getAsNamespace());
- if (!NS)
+ Context, DestContext, ND->getDeclContext(), [&](const Decl *D) {
+ if (D->getKind() != Decl::Namespace)
return false;
- NS = NS->getCanonicalDecl();
+ const auto *NS = cast<NamespaceDecl>(D)->getCanonicalDecl();
return llvm::any_of(VisibleNamespaceDecls,
[NS](const NamespaceDecl *NSD) {
return NSD->getCanonicalDecl() == NS;
@@ -687,12 +708,11 @@ std::string getQualification(ASTContext &Context,
(void)NS;
}
return getQualification(
- Context, DestContext, ND->getDeclContext(),
- [&](NestedNameSpecifier *NNS) {
+ Context, DestContext, ND->getDeclContext(), [&](const Decl *D) {
return llvm::any_of(VisibleNamespaces, [&](llvm::StringRef Namespace) {
std::string NS;
llvm::raw_string_ostream OS(NS);
- NNS->print(OS, Context.getPrintingPolicy());
+ D->print(OS, Context.getPrintingPolicy());
return OS.str() == Namespace;
});
});
diff --git a/clang-tools-extra/clangd/AST.h b/clang-tools-extra/clangd/AST.h
index fb0722d..1538d12 100644
--- a/clang-tools-extra/clangd/AST.h
+++ b/clang-tools-extra/clangd/AST.h
@@ -135,7 +135,8 @@ preferredIncludeDirective(llvm::StringRef FileName, const LangOptions &LangOpts,
/// Returns a QualType as string. The result doesn't contain unwritten scopes
/// like anonymous/inline namespace.
std::string printType(const QualType QT, const DeclContext &CurContext,
- llvm::StringRef Placeholder = "");
+ llvm::StringRef Placeholder = "",
+ bool FullyQualify = false);
/// Indicates if \p D is a template instantiation implicitly generated by the
/// compiler, e.g.
diff --git a/clang-tools-extra/clangd/CMakeLists.txt b/clang-tools-extra/clangd/CMakeLists.txt
index a1e9da4..06920a9 100644
--- a/clang-tools-extra/clangd/CMakeLists.txt
+++ b/clang-tools-extra/clangd/CMakeLists.txt
@@ -108,6 +108,7 @@ add_clang_library(clangDaemon STATIC
SemanticHighlighting.cpp
SemanticSelection.cpp
SourceCode.cpp
+ SymbolDocumentation.cpp
SystemIncludeExtractor.cpp
TidyProvider.cpp
TUScheduler.cpp
diff --git a/clang-tools-extra/clangd/CodeComplete.cpp b/clang-tools-extra/clangd/CodeComplete.cpp
index 9c17b4c..16559e4 100644
--- a/clang-tools-extra/clangd/CodeComplete.cpp
+++ b/clang-tools-extra/clangd/CodeComplete.cpp
@@ -1466,19 +1466,15 @@ bool allowIndex(CodeCompletionContext &CC) {
auto Scope = CC.getCXXScopeSpecifier();
if (!Scope)
return true;
- NestedNameSpecifier *NameSpec = (*Scope)->getScopeRep();
- if (!NameSpec)
- return true;
// We only query the index when qualifier is a namespace.
// If it's a class, we rely solely on sema completions.
- switch (NameSpec->getKind()) {
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Namespace:
+ switch ((*Scope)->getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::Namespace:
return true;
- case NestedNameSpecifier::Super:
- case NestedNameSpecifier::TypeSpec:
- // Unresolved inside a template.
- case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ case NestedNameSpecifier::Kind::Type:
return false;
}
llvm_unreachable("invalid NestedNameSpecifier kind");
@@ -2434,6 +2430,9 @@ CompletionItem CodeCompletion::render(const CodeCompleteOptions &Opts) const {
}
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const CodeCompletion &C) {
+ OS << "Signature: " << "\"" << C.Signature << "\", "
+ << "SnippetSuffix: " << "\"" << C.SnippetSuffix << "\""
+ << ", Rendered:";
// For now just lean on CompletionItem.
return OS << C.render(CodeCompleteOptions());
}
diff --git a/clang-tools-extra/clangd/CodeCompletionStrings.cpp b/clang-tools-extra/clangd/CodeCompletionStrings.cpp
index 9b4442b..d657964 100644
--- a/clang-tools-extra/clangd/CodeCompletionStrings.cpp
+++ b/clang-tools-extra/clangd/CodeCompletionStrings.cpp
@@ -7,13 +7,18 @@
//===----------------------------------------------------------------------===//
#include "CodeCompletionStrings.h"
+#include "Config.h"
+#include "SymbolDocumentation.h"
#include "clang-c/Index.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/RawCommentList.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Sema/CodeCompleteConsumer.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/JSON.h"
+#include "llvm/Support/raw_ostream.h"
#include <limits>
#include <utility>
@@ -100,16 +105,51 @@ std::string getDeclComment(const ASTContext &Ctx, const NamedDecl &Decl) {
// the comments for namespaces.
return "";
}
- const RawComment *RC = getCompletionComment(Ctx, &Decl);
- if (!RC)
- return "";
- // Sanity check that the comment does not come from the PCH. We choose to not
- // write them into PCH, because they are racy and slow to load.
- assert(!Ctx.getSourceManager().isLoadedSourceLocation(RC->getBeginLoc()));
- std::string Doc =
- RC->getFormattedText(Ctx.getSourceManager(), Ctx.getDiagnostics());
- if (!looksLikeDocComment(Doc))
- return "";
+
+ const RawComment *RC = nullptr;
+ const Config &Cfg = Config::current();
+
+ std::string Doc;
+
+ if (Cfg.Documentation.CommentFormat == Config::CommentFormatPolicy::Doxygen &&
+ isa<ParmVarDecl>(Decl)) {
+ // Parameters are documented in their declaration context (function or
+ // template function).
+ const NamedDecl *ND = dyn_cast<NamedDecl>(Decl.getDeclContext());
+ if (!ND)
+ return "";
+
+ RC = getCompletionComment(Ctx, ND);
+ if (!RC)
+ return "";
+
+ // Sanity check that the comment does not come from the PCH. We choose to
+ // not write them into PCH, because they are racy and slow to load.
+ assert(!Ctx.getSourceManager().isLoadedSourceLocation(RC->getBeginLoc()));
+
+ comments::FullComment *FC = RC->parse(Ctx, /*PP=*/nullptr, ND);
+ if (!FC)
+ return "";
+
+ SymbolDocCommentVisitor V(FC, Ctx.getLangOpts().CommentOpts);
+ std::string RawDoc;
+ llvm::raw_string_ostream OS(RawDoc);
+
+ V.parameterDocToString(dyn_cast<ParmVarDecl>(&Decl)->getName(), OS);
+
+ Doc = StringRef(RawDoc).trim().str();
+ } else {
+ RC = getCompletionComment(Ctx, &Decl);
+ if (!RC)
+ return "";
+ // Sanity check that the comment does not come from the PCH. We choose to
+ // not write them into PCH, because they are racy and slow to load.
+ assert(!Ctx.getSourceManager().isLoadedSourceLocation(RC->getBeginLoc()));
+ Doc = RC->getFormattedText(Ctx.getSourceManager(), Ctx.getDiagnostics());
+ if (!looksLikeDocComment(Doc))
+ return "";
+ }
+
// Clang requires source to be UTF-8, but doesn't enforce this in comments.
if (!llvm::json::isUTF8(Doc))
Doc = llvm::json::fixUTF8(Doc);
diff --git a/clang-tools-extra/clangd/ConfigFragment.h b/clang-tools-extra/clangd/ConfigFragment.h
index a6a7cd5..0f11f37 100644
--- a/clang-tools-extra/clangd/ConfigFragment.h
+++ b/clang-tools-extra/clangd/ConfigFragment.h
@@ -315,7 +315,7 @@ struct Fragment {
/// AngledHeaders (i.e. a header matches a regex in both QuotedHeaders and
/// AngledHeaders), system headers use <> and non-system headers use "".
/// These can match any suffix of the header file in question.
- /// Matching is performed against the header text, not its absolute path
+ /// Matching is performed against the absolute path of the header
/// within the project.
std::vector<Located<std::string>> QuotedHeaders;
/// List of regexes for headers that should always be included with a
@@ -323,7 +323,7 @@ struct Fragment {
/// AngledHeaders (i.e. a header matches a regex in both QuotedHeaders and
/// AngledHeaders), system headers use <> and non-system headers use "".
/// These can match any suffix of the header file in question.
- /// Matching is performed against the header text, not its absolute path
+ /// Matching is performed against the absolute path of the header
/// within the project.
std::vector<Located<std::string>> AngledHeaders;
};
diff --git a/clang-tools-extra/clangd/DumpAST.cpp b/clang-tools-extra/clangd/DumpAST.cpp
index c6075e7..9a8d41d 100644
--- a/clang-tools-extra/clangd/DumpAST.cpp
+++ b/clang-tools-extra/clangd/DumpAST.cpp
@@ -147,17 +147,17 @@ class DumpVisitor : public RecursiveASTVisitor<DumpVisitor> {
}
llvm_unreachable("Unhandled ArgKind enum");
}
- std::string getKind(const NestedNameSpecifierLoc &NNSL) {
- assert(NNSL.getNestedNameSpecifier());
- switch (NNSL.getNestedNameSpecifier()->getKind()) {
+ std::string getKind(NestedNameSpecifierLoc NNSL) {
+ switch (NNSL.getNestedNameSpecifier().getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
#define NNS_KIND(X) \
- case NestedNameSpecifier::X: \
+ case NestedNameSpecifier::Kind::X: \
return #X
- NNS_KIND(Identifier);
NNS_KIND(Namespace);
- NNS_KIND(TypeSpec);
+ NNS_KIND(Type);
NNS_KIND(Global);
- NNS_KIND(Super);
+ NNS_KIND(MicrosoftSuper);
#undef NNS_KIND
}
llvm_unreachable("Unhandled SpecifierKind enum");
@@ -261,7 +261,7 @@ class DumpVisitor : public RecursiveASTVisitor<DumpVisitor> {
return TL.getType().getLocalQualifiers().getAsString(
Ctx.getPrintingPolicy());
if (const auto *TT = dyn_cast<TagType>(TL.getTypePtr()))
- return getDetail(TT->getDecl());
+ return getDetail(TT->getOriginalDecl());
if (const auto *DT = dyn_cast<DeducedType>(TL.getTypePtr()))
if (DT->isDeduced())
return DT->getDeducedType().getAsString(Ctx.getPrintingPolicy());
@@ -273,16 +273,11 @@ class DumpVisitor : public RecursiveASTVisitor<DumpVisitor> {
return getDetail(TT->getDecl());
return "";
}
- std::string getDetail(const NestedNameSpecifierLoc &NNSL) {
- const auto &NNS = *NNSL.getNestedNameSpecifier();
- switch (NNS.getKind()) {
- case NestedNameSpecifier::Identifier:
- return NNS.getAsIdentifier()->getName().str() + "::";
- case NestedNameSpecifier::Namespace:
- return NNS.getAsNamespace()->getNameAsString() + "::";
- default:
+ std::string getDetail(NestedNameSpecifierLoc NNSL) {
+ NestedNameSpecifier NNS = NNSL.getNestedNameSpecifier();
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Namespace)
return "";
- }
+ return NNS.getAsNamespaceAndPrefix().Namespace->getNameAsString() + "::";
}
std::string getDetail(const CXXCtorInitializer *CCI) {
if (FieldDecl *FD = CCI->getAnyMember())
@@ -346,8 +341,10 @@ public:
return !D || isInjectedClassName(D) ||
traverseNode("declaration", D, [&] { Base::TraverseDecl(D); });
}
- bool TraverseTypeLoc(TypeLoc TL) {
- return !TL || traverseNode("type", TL, [&] { Base::TraverseTypeLoc(TL); });
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) {
+ return !TL || traverseNode("type", TL, [&] {
+ Base::TraverseTypeLoc(TL, TraverseQualifier);
+ });
}
bool TraverseTemplateName(const TemplateName &TN) {
return traverseNode("template name", TN,
@@ -389,11 +386,11 @@ public:
// This means we'd never see 'int' in 'const int'! Work around that here.
// (The reason for the behavior is to avoid traversing the nested Type twice,
// but we ignore TraverseType anyway).
- bool TraverseQualifiedTypeLoc(QualifiedTypeLoc QTL) {
+ bool TraverseQualifiedTypeLoc(QualifiedTypeLoc QTL, bool TraverseQualifier) {
return TraverseTypeLoc(QTL.getUnqualifiedLoc());
}
// Uninteresting parts of the AST that don't have locations within them.
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *) { return true; }
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier) { return true; }
bool TraverseType(QualType) { return true; }
// OpaqueValueExpr blocks traversal, we must explicitly traverse it.
@@ -420,7 +417,7 @@ ASTNode dumpAST(const DynTypedNode &N, const syntax::TokenBuffer &Tokens,
V.TraverseNestedNameSpecifierLoc(
*const_cast<NestedNameSpecifierLoc *>(NNSL));
else if (const auto *NNS = N.get<NestedNameSpecifier>())
- V.TraverseNestedNameSpecifier(const_cast<NestedNameSpecifier *>(NNS));
+ V.TraverseNestedNameSpecifier(*NNS);
else if (const auto *TL = N.get<TypeLoc>())
V.TraverseTypeLoc(*const_cast<TypeLoc *>(TL));
else if (const auto *QT = N.get<QualType>())
diff --git a/clang-tools-extra/clangd/FindTarget.cpp b/clang-tools-extra/clangd/FindTarget.cpp
index b108957..32018d1 100644
--- a/clang-tools-extra/clangd/FindTarget.cpp
+++ b/clang-tools-extra/clangd/FindTarget.cpp
@@ -366,19 +366,11 @@ public:
Visitor(TargetFinder &Outer, RelSet Flags) : Outer(Outer), Flags(Flags) {}
void VisitTagType(const TagType *TT) {
- Outer.add(TT->getAsTagDecl(), Flags);
- }
-
- void VisitElaboratedType(const ElaboratedType *ET) {
- Outer.add(ET->desugar(), Flags);
+ Outer.add(cast<TagType>(TT)->getOriginalDecl(), Flags);
}
void VisitUsingType(const UsingType *ET) {
- Outer.add(ET->getFoundDecl(), Flags);
- }
-
- void VisitInjectedClassNameType(const InjectedClassNameType *ICNT) {
- Outer.add(ICNT->getDecl(), Flags);
+ Outer.add(ET->getDecl(), Flags);
}
void VisitDecltypeType(const DecltypeType *DTT) {
@@ -483,30 +475,27 @@ public:
Visitor(*this, Flags).Visit(T.getTypePtr());
}
- void add(const NestedNameSpecifier *NNS, RelSet Flags) {
+ void add(NestedNameSpecifier NNS, RelSet Flags) {
if (!NNS)
return;
- debug(*NNS, Flags);
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Namespace:
- add(NNS->getAsNamespace(), Flags);
- return;
- case NestedNameSpecifier::Identifier:
- if (Resolver) {
- add(Resolver->resolveNestedNameSpecifierToType(NNS), Flags);
- }
+ debug(NNS, Flags);
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace:
+ add(NNS.getAsNamespaceAndPrefix().Namespace, Flags);
return;
- case NestedNameSpecifier::TypeSpec:
- add(QualType(NNS->getAsType(), 0), Flags);
+ case NestedNameSpecifier::Kind::Type:
+ add(QualType(NNS.getAsType(), 0), Flags);
return;
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// This should be TUDecl, but we can't get a pointer to it!
return;
- case NestedNameSpecifier::Super:
- add(NNS->getAsRecordDecl(), Flags);
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ add(NNS.getAsMicrosoftSuper(), Flags);
return;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
- llvm_unreachable("unhandled NestedNameSpecifier::SpecifierKind");
+ llvm_unreachable("unhandled NestedNameSpecifier::Kind");
}
void add(const CXXCtorInitializer *CCI, RelSet Flags) {
@@ -555,7 +544,7 @@ allTargetDecls(const DynTypedNode &N, const HeuristicResolver *Resolver) {
else if (const NestedNameSpecifierLoc *NNSL = N.get<NestedNameSpecifierLoc>())
Finder.add(NNSL->getNestedNameSpecifier(), Flags);
else if (const NestedNameSpecifier *NNS = N.get<NestedNameSpecifier>())
- Finder.add(NNS, Flags);
+ Finder.add(*NNS, Flags);
else if (const TypeLoc *TL = N.get<TypeLoc>())
Finder.add(TL->getType(), Flags);
else if (const QualType *QT = N.get<QualType>())
@@ -861,32 +850,25 @@ refInTypeLoc(TypeLoc L, const HeuristicResolver *Resolver) {
const HeuristicResolver *Resolver;
llvm::SmallVector<ReferenceLoc> Refs;
- void VisitElaboratedTypeLoc(ElaboratedTypeLoc L) {
- // We only know about qualifier, rest if filled by inner locations.
- size_t InitialSize = Refs.size();
- Visit(L.getNamedTypeLoc().getUnqualifiedLoc());
- size_t NewSize = Refs.size();
- // Add qualifier for the newly-added refs.
- for (unsigned I = InitialSize; I < NewSize; ++I) {
- ReferenceLoc *Ref = &Refs[I];
- // Fill in the qualifier.
- assert(!Ref->Qualifier.hasQualifier() && "qualifier already set");
- Ref->Qualifier = L.getQualifierLoc();
- }
+ void VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc L) {
+ Refs.push_back(ReferenceLoc{L.getQualifierLoc(),
+ L.getLocalSourceRange().getBegin(),
+ /*IsDecl=*/false,
+ {L.getDecl()}});
}
void VisitUsingTypeLoc(UsingTypeLoc L) {
- Refs.push_back(ReferenceLoc{NestedNameSpecifierLoc(),
+ Refs.push_back(ReferenceLoc{L.getQualifierLoc(),
L.getLocalSourceRange().getBegin(),
/*IsDecl=*/false,
- {L.getFoundDecl()}});
+ {L.getDecl()}});
}
void VisitTagTypeLoc(TagTypeLoc L) {
- Refs.push_back(ReferenceLoc{NestedNameSpecifierLoc(),
+ Refs.push_back(ReferenceLoc{L.getQualifierLoc(),
L.getNameLoc(),
/*IsDecl=*/false,
- {L.getDecl()}});
+ {L.getOriginalDecl()}});
}
void VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc L) {
@@ -906,25 +888,18 @@ refInTypeLoc(TypeLoc L, const HeuristicResolver *Resolver) {
// 2. 'vector<int>' with mask 'Underlying'.
// we want to return only #1 in this case.
Refs.push_back(ReferenceLoc{
- NestedNameSpecifierLoc(), L.getTemplateNameLoc(), /*IsDecl=*/false,
+ L.getQualifierLoc(), L.getTemplateNameLoc(), /*IsDecl=*/false,
explicitReferenceTargets(DynTypedNode::create(L.getType()),
DeclRelation::Alias, Resolver)});
}
void VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc L) {
Refs.push_back(ReferenceLoc{
- NestedNameSpecifierLoc(), L.getNameLoc(), /*IsDecl=*/false,
+ L.getQualifierLoc(), L.getNameLoc(), /*IsDecl=*/false,
explicitReferenceTargets(DynTypedNode::create(L.getType()),
DeclRelation::Alias, Resolver)});
}
- void VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- Refs.push_back(ReferenceLoc{NestedNameSpecifierLoc(),
- TL.getNameLoc(),
- /*IsDecl=*/false,
- {TL.getDecl()}});
- }
-
void VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc L) {
Refs.push_back(
@@ -943,12 +918,12 @@ refInTypeLoc(TypeLoc L, const HeuristicResolver *Resolver) {
}
void VisitTypedefTypeLoc(TypedefTypeLoc L) {
- if (shouldSkipTypedef(L.getTypedefNameDecl()))
+ if (shouldSkipTypedef(L.getDecl()))
return;
- Refs.push_back(ReferenceLoc{NestedNameSpecifierLoc(),
+ Refs.push_back(ReferenceLoc{L.getQualifierLoc(),
L.getNameLoc(),
/*IsDecl=*/false,
- {L.getTypedefNameDecl()}});
+ {L.getDecl()}});
}
void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc L) {
@@ -980,17 +955,6 @@ public:
return true;
}
- bool TraverseElaboratedTypeLoc(ElaboratedTypeLoc L) {
- // ElaboratedTypeLoc will reports information for its inner type loc.
- // Otherwise we loose information about inner types loc's qualifier.
- TypeLoc Inner = L.getNamedTypeLoc().getUnqualifiedLoc();
- if (L.getBeginLoc() == Inner.getBeginLoc())
- return RecursiveASTVisitor::TraverseTypeLoc(Inner);
- else
- TypeLocsToSkip.insert(Inner.getBeginLoc());
- return RecursiveASTVisitor::TraverseElaboratedTypeLoc(L);
- }
-
bool VisitStmt(Stmt *S) {
visitNode(DynTypedNode::create(*S));
return true;
@@ -1051,7 +1015,7 @@ public:
return true;
visitNode(DynTypedNode::create(L));
// Inner type is missing information about its qualifier, skip it.
- if (auto TL = L.getTypeLoc())
+ if (auto TL = L.getAsTypeLoc())
TypeLocsToSkip.insert(TL.getBeginLoc());
return RecursiveASTVisitor::TraverseNestedNameSpecifierLoc(L);
}
@@ -1092,12 +1056,21 @@ private:
if (auto *S = N.get<Stmt>())
return refInStmt(S, Resolver);
if (auto *NNSL = N.get<NestedNameSpecifierLoc>()) {
- // (!) 'DeclRelation::Alias' ensures we do not loose namespace aliases.
- return {ReferenceLoc{
- NNSL->getPrefix(), NNSL->getLocalBeginLoc(), false,
- explicitReferenceTargets(
- DynTypedNode::create(*NNSL->getNestedNameSpecifier()),
- DeclRelation::Alias, Resolver)}};
+ // (!) 'DeclRelation::Alias' ensures we do not lose namespace aliases.
+ NestedNameSpecifierLoc Qualifier;
+ SourceLocation NameLoc;
+ if (auto TL = NNSL->getAsTypeLoc()) {
+ Qualifier = TL.getPrefix();
+ NameLoc = TL.getNonPrefixBeginLoc();
+ } else {
+ Qualifier = NNSL->getAsNamespaceAndPrefix().Prefix;
+ NameLoc = NNSL->getLocalBeginLoc();
+ }
+ return {
+ ReferenceLoc{Qualifier, NameLoc, false,
+ explicitReferenceTargets(
+ DynTypedNode::create(NNSL->getNestedNameSpecifier()),
+ DeclRelation::Alias, Resolver)}};
}
if (const TypeLoc *TL = N.get<TypeLoc>())
return refInTypeLoc(*TL, Resolver);
@@ -1210,8 +1183,8 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, ReferenceLoc R) {
OS << "}";
if (R.Qualifier) {
OS << ", qualifier = '";
- R.Qualifier.getNestedNameSpecifier()->print(OS,
- PrintingPolicy(LangOptions()));
+ R.Qualifier.getNestedNameSpecifier().print(OS,
+ PrintingPolicy(LangOptions()));
OS << "'";
}
if (R.IsDecl)
diff --git a/clang-tools-extra/clangd/Headers.cpp b/clang-tools-extra/clangd/Headers.cpp
index 87fd261b..b9d67cc6 100644
--- a/clang-tools-extra/clangd/Headers.cpp
+++ b/clang-tools-extra/clangd/Headers.cpp
@@ -304,16 +304,17 @@ IncludeInserter::calculateIncludePath(const HeaderFile &InsertedHeader,
// FIXME: should we allow (some limited number of) "../header.h"?
if (llvm::sys::path::is_absolute(Suggested))
return std::nullopt;
+ auto HeaderPath = llvm::sys::path::convert_to_slash(InsertedHeader.File);
bool IsAngled = false;
for (auto &Filter : AngledHeaders) {
- if (Filter(Suggested)) {
+ if (Filter(HeaderPath)) {
IsAngled = true;
break;
}
}
bool IsQuoted = false;
for (auto &Filter : QuotedHeaders) {
- if (Filter(Suggested)) {
+ if (Filter(HeaderPath)) {
IsQuoted = true;
break;
}
@@ -324,7 +325,7 @@ IncludeInserter::calculateIncludePath(const HeaderFile &InsertedHeader,
if (IsAngled && IsQuoted) {
elog("Header '{0}' matches both quoted and angled regexes, default will "
"be used.",
- Suggested);
+ HeaderPath);
}
IsAngled = IsAngledByDefault;
}
diff --git a/clang-tools-extra/clangd/Hover.cpp b/clang-tools-extra/clangd/Hover.cpp
index 1e0718d..a7cf45c 100644
--- a/clang-tools-extra/clangd/Hover.cpp
+++ b/clang-tools-extra/clangd/Hover.cpp
@@ -18,6 +18,7 @@
#include "Protocol.h"
#include "Selection.h"
#include "SourceCode.h"
+#include "SymbolDocumentation.h"
#include "clang-include-cleaner/Analysis.h"
#include "clang-include-cleaner/IncludeSpeller.h"
#include "clang-include-cleaner/Types.h"
@@ -41,6 +42,7 @@
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
@@ -170,13 +172,14 @@ HoverInfo::PrintedType printType(QualType QT, ASTContext &ASTCtx,
QT = QT->castAs<DecltypeType>()->getUnderlyingType();
HoverInfo::PrintedType Result;
llvm::raw_string_ostream OS(Result.Type);
- // Special case: if the outer type is a tag type without qualifiers, then
- // include the tag for extra clarity.
- // This isn't very idiomatic, so don't attempt it for complex cases, including
- // pointers/references, template specializations, etc.
+ // Special case: if the outer type is a canonical tag type, then include the
+ // tag for extra clarity. This isn't very idiomatic, so don't attempt it for
+ // complex cases, including pointers/references, template specializations,
+ // etc.
if (!QT.isNull() && !QT.hasQualifiers() && PP.SuppressTagKeyword) {
- if (auto *TT = llvm::dyn_cast<TagType>(QT.getTypePtr()))
- OS << TT->getDecl()->getKindName() << " ";
+ if (auto *TT = llvm::dyn_cast<TagType>(QT.getTypePtr());
+ TT && TT->isCanonicalUnqualified())
+ OS << TT->getOriginalDecl()->getKindName() << " ";
}
QT.print(OS, PP);
@@ -452,7 +455,7 @@ std::optional<std::string> printExprValue(const Expr *E,
// Compare to int64_t to avoid bit-width match requirements.
int64_t Val = Constant.Val.getInt().getExtValue();
for (const EnumConstantDecl *ECD :
- T->castAs<EnumType>()->getDecl()->enumerators())
+ T->castAs<EnumType>()->getOriginalDecl()->enumerators())
if (ECD->getInitVal() == Val)
return llvm::formatv("{0} ({1})", ECD->getNameAsString(),
printHex(Constant.Val.getInt()))
@@ -627,6 +630,9 @@ HoverInfo getHoverContents(const NamedDecl *D, const PrintingPolicy &PP,
HI.Name = printName(Ctx, *D);
const auto *CommentD = getDeclForComment(D);
HI.Documentation = getDeclComment(Ctx, *CommentD);
+ // save the language options to be able to create the comment::CommandTraits
+ // to parse the documentation
+ HI.CommentOpts = D->getASTContext().getLangOpts().CommentOpts;
enhanceFromIndex(HI, *CommentD, Index);
if (HI.Documentation.empty())
HI.Documentation = synthesizeDocumentation(D);
@@ -967,10 +973,11 @@ void addLayoutInfo(const NamedDecl &ND, HoverInfo &HI) {
const auto &Ctx = ND.getASTContext();
if (auto *RD = llvm::dyn_cast<RecordDecl>(&ND)) {
- if (auto Size = Ctx.getTypeSizeInCharsIfKnown(RD->getTypeForDecl()))
+ CanQualType RT = Ctx.getCanonicalTagType(RD);
+ if (auto Size = Ctx.getTypeSizeInCharsIfKnown(RT))
HI.Size = Size->getQuantity() * 8;
if (!RD->isDependentType() && RD->isCompleteDefinition())
- HI.Align = Ctx.getTypeAlign(RD->getTypeForDecl());
+ HI.Align = Ctx.getTypeAlign(RT);
return;
}
@@ -1388,9 +1395,100 @@ static std::string formatOffset(uint64_t OffsetInBits) {
return Offset;
}
-markup::Document HoverInfo::present() const {
- markup::Document Output;
+void HoverInfo::calleeArgInfoToMarkupParagraph(markup::Paragraph &P) const {
+ assert(CallPassType);
+ std::string Buffer;
+ llvm::raw_string_ostream OS(Buffer);
+ OS << "Passed ";
+ if (CallPassType->PassBy != HoverInfo::PassType::Value) {
+ OS << "by ";
+ if (CallPassType->PassBy == HoverInfo::PassType::ConstRef)
+ OS << "const ";
+ OS << "reference ";
+ }
+ if (CalleeArgInfo->Name)
+ OS << "as " << CalleeArgInfo->Name;
+ else if (CallPassType->PassBy == HoverInfo::PassType::Value)
+ OS << "by value";
+ if (CallPassType->Converted && CalleeArgInfo->Type)
+ OS << " (converted to " << CalleeArgInfo->Type->Type << ")";
+ P.appendText(OS.str());
+}
+
+void HoverInfo::usedSymbolNamesToMarkup(markup::Document &Output) const {
+ markup::Paragraph &P = Output.addParagraph();
+ P.appendText("provides ");
+
+ const std::vector<std::string>::size_type SymbolNamesLimit = 5;
+ auto Front = llvm::ArrayRef(UsedSymbolNames).take_front(SymbolNamesLimit);
+
+ llvm::interleave(
+ Front, [&](llvm::StringRef Sym) { P.appendCode(Sym); },
+ [&] { P.appendText(", "); });
+ if (UsedSymbolNames.size() > Front.size()) {
+ P.appendText(" and ");
+ P.appendText(std::to_string(UsedSymbolNames.size() - Front.size()));
+ P.appendText(" more");
+ }
+}
+
+void HoverInfo::providerToMarkupParagraph(markup::Document &Output) const {
+ markup::Paragraph &DI = Output.addParagraph();
+ DI.appendText("provided by");
+ DI.appendSpace();
+ DI.appendCode(Provider);
+}
+
+void HoverInfo::definitionScopeToMarkup(markup::Document &Output) const {
+ std::string Buffer;
+
+ // Append scope comment, dropping trailing "::".
+ // Note that we don't print anything for global namespace, to not annoy
+ // non-c++ projects or projects that are not making use of namespaces.
+ if (!LocalScope.empty()) {
+ // Container name, e.g. class, method, function.
+ // We might want to propagate some info about container type to print
+ // function foo, class X, method X::bar, etc.
+ Buffer += "// In " + llvm::StringRef(LocalScope).rtrim(':').str() + '\n';
+ } else if (NamespaceScope && !NamespaceScope->empty()) {
+ Buffer += "// In namespace " +
+ llvm::StringRef(*NamespaceScope).rtrim(':').str() + '\n';
+ }
+
+ if (!AccessSpecifier.empty()) {
+ Buffer += AccessSpecifier + ": ";
+ }
+ Buffer += Definition;
+
+ Output.addCodeBlock(Buffer, DefinitionLanguage);
+}
+
+void HoverInfo::valueToMarkupParagraph(markup::Paragraph &P) const {
+ P.appendText("Value = ");
+ P.appendCode(*Value);
+}
+
+void HoverInfo::offsetToMarkupParagraph(markup::Paragraph &P) const {
+ P.appendText("Offset: " + formatOffset(*Offset));
+}
+
+void HoverInfo::sizeToMarkupParagraph(markup::Paragraph &P) const {
+ P.appendText("Size: " + formatSize(*Size));
+ if (Padding && *Padding != 0) {
+ P.appendText(llvm::formatv(" (+{0} padding)", formatSize(*Padding)).str());
+ }
+ if (Align)
+ P.appendText(", alignment " + formatSize(*Align));
+}
+
+markup::Document HoverInfo::presentDoxygen() const {
+ // NOTE: this function is currently almost identical to presentDefault().
+ // This is to have a minimal change when introducing the doxygen parser.
+ // This function will be changed when rearranging the output for doxygen
+ // parsed documentation.
+
+ markup::Document Output;
// Header contains a text of the form:
// variable `var`
//
@@ -1407,14 +1505,99 @@ markup::Document HoverInfo::present() const {
if (Kind != index::SymbolKind::Unknown)
Header.appendText(index::getSymbolKindString(Kind)).appendSpace();
assert(!Name.empty() && "hover triggered on a nameless symbol");
+
Header.appendCode(Name);
if (!Provider.empty()) {
- markup::Paragraph &DI = Output.addParagraph();
- DI.appendText("provided by");
- DI.appendSpace();
- DI.appendCode(Provider);
+ providerToMarkupParagraph(Output);
+ }
+
+ // Put a linebreak after header to increase readability.
+ Output.addRuler();
+ // Print Types on their own lines to reduce chances of getting line-wrapped by
+ // editor, as they might be long.
+ if (ReturnType) {
+ // For functions we display signature in a list form, e.g.:
+ // → `x`
+ // Parameters:
+ // - `bool param1`
+ // - `int param2 = 5`
+ Output.addParagraph().appendText("→ ").appendCode(
+ llvm::to_string(*ReturnType));
+ }
+
+ SymbolDocCommentVisitor SymbolDoc(Documentation, CommentOpts);
+
+ if (Parameters && !Parameters->empty()) {
+ Output.addParagraph().appendText("Parameters:");
+ markup::BulletList &L = Output.addBulletList();
+ for (const auto &Param : *Parameters) {
+ markup::Paragraph &P = L.addItem().addParagraph();
+ P.appendCode(llvm::to_string(Param));
+
+ if (SymbolDoc.isParameterDocumented(llvm::to_string(Param.Name))) {
+ P.appendText(" -");
+ SymbolDoc.parameterDocToMarkup(llvm::to_string(Param.Name), P);
+ }
+ }
+ }
+ // Don't print Type after Parameters or ReturnType as this will just duplicate
+ // the information
+ if (Type && !ReturnType && !Parameters)
+ Output.addParagraph().appendText("Type: ").appendCode(
+ llvm::to_string(*Type));
+
+ if (Value) {
+ valueToMarkupParagraph(Output.addParagraph());
+ }
+
+ if (Offset)
+ offsetToMarkupParagraph(Output.addParagraph());
+ if (Size) {
+ sizeToMarkupParagraph(Output.addParagraph());
+ }
+
+ if (CalleeArgInfo) {
+ calleeArgInfoToMarkupParagraph(Output.addParagraph());
+ }
+
+ SymbolDoc.docToMarkup(Output);
+
+ if (!Definition.empty()) {
Output.addRuler();
+ definitionScopeToMarkup(Output);
+ }
+
+ if (!UsedSymbolNames.empty()) {
+ Output.addRuler();
+ usedSymbolNamesToMarkup(Output);
+ }
+
+ return Output;
+}
+
+markup::Document HoverInfo::presentDefault() const {
+ markup::Document Output;
+ // Header contains a text of the form:
+ // variable `var`
+ //
+ // class `X`
+ //
+ // function `foo`
+ //
+ // expression
+ //
+ // Note that we are making use of a level-3 heading because VSCode renders
+ // level 1 and 2 headers in a huge font, see
+ // https://github.com/microsoft/vscode/issues/88417 for details.
+ markup::Paragraph &Header = Output.addHeading(3);
+ if (Kind != index::SymbolKind::Unknown)
+ Header.appendText(index::getSymbolKindString(Kind)).appendSpace();
+ assert(!Name.empty() && "hover triggered on a nameless symbol");
+ Header.appendCode(Name);
+
+ if (!Provider.empty()) {
+ providerToMarkupParagraph(Output);
}
// Put a linebreak after header to increase readability.
@@ -1445,41 +1628,17 @@ markup::Document HoverInfo::present() const {
llvm::to_string(*Type));
if (Value) {
- markup::Paragraph &P = Output.addParagraph();
- P.appendText("Value = ");
- P.appendCode(*Value);
+ valueToMarkupParagraph(Output.addParagraph());
}
if (Offset)
- Output.addParagraph().appendText("Offset: " + formatOffset(*Offset));
+ offsetToMarkupParagraph(Output.addParagraph());
if (Size) {
- auto &P = Output.addParagraph().appendText("Size: " + formatSize(*Size));
- if (Padding && *Padding != 0) {
- P.appendText(
- llvm::formatv(" (+{0} padding)", formatSize(*Padding)).str());
- }
- if (Align)
- P.appendText(", alignment " + formatSize(*Align));
+ sizeToMarkupParagraph(Output.addParagraph());
}
if (CalleeArgInfo) {
- assert(CallPassType);
- std::string Buffer;
- llvm::raw_string_ostream OS(Buffer);
- OS << "Passed ";
- if (CallPassType->PassBy != HoverInfo::PassType::Value) {
- OS << "by ";
- if (CallPassType->PassBy == HoverInfo::PassType::ConstRef)
- OS << "const ";
- OS << "reference ";
- }
- if (CalleeArgInfo->Name)
- OS << "as " << CalleeArgInfo->Name;
- else if (CallPassType->PassBy == HoverInfo::PassType::Value)
- OS << "by value";
- if (CallPassType->Converted && CalleeArgInfo->Type)
- OS << " (converted to " << CalleeArgInfo->Type->Type << ")";
- Output.addParagraph().appendText(OS.str());
+ calleeArgInfoToMarkupParagraph(Output.addParagraph());
}
if (!Documentation.empty())
@@ -1487,49 +1646,12 @@ markup::Document HoverInfo::present() const {
if (!Definition.empty()) {
Output.addRuler();
- std::string Buffer;
-
- if (!Definition.empty()) {
- // Append scope comment, dropping trailing "::".
- // Note that we don't print anything for global namespace, to not annoy
- // non-c++ projects or projects that are not making use of namespaces.
- if (!LocalScope.empty()) {
- // Container name, e.g. class, method, function.
- // We might want to propagate some info about container type to print
- // function foo, class X, method X::bar, etc.
- Buffer +=
- "// In " + llvm::StringRef(LocalScope).rtrim(':').str() + '\n';
- } else if (NamespaceScope && !NamespaceScope->empty()) {
- Buffer += "// In namespace " +
- llvm::StringRef(*NamespaceScope).rtrim(':').str() + '\n';
- }
-
- if (!AccessSpecifier.empty()) {
- Buffer += AccessSpecifier + ": ";
- }
-
- Buffer += Definition;
- }
-
- Output.addCodeBlock(Buffer, DefinitionLanguage);
+ definitionScopeToMarkup(Output);
}
if (!UsedSymbolNames.empty()) {
Output.addRuler();
- markup::Paragraph &P = Output.addParagraph();
- P.appendText("provides ");
-
- const std::vector<std::string>::size_type SymbolNamesLimit = 5;
- auto Front = llvm::ArrayRef(UsedSymbolNames).take_front(SymbolNamesLimit);
-
- llvm::interleave(
- Front, [&](llvm::StringRef Sym) { P.appendCode(Sym); },
- [&] { P.appendText(", "); });
- if (UsedSymbolNames.size() > Front.size()) {
- P.appendText(" and ");
- P.appendText(std::to_string(UsedSymbolNames.size() - Front.size()));
- P.appendText(" more");
- }
+ usedSymbolNamesToMarkup(Output);
}
return Output;
@@ -1538,21 +1660,19 @@ markup::Document HoverInfo::present() const {
std::string HoverInfo::present(MarkupKind Kind) const {
if (Kind == MarkupKind::Markdown) {
const Config &Cfg = Config::current();
- if ((Cfg.Documentation.CommentFormat ==
- Config::CommentFormatPolicy::Markdown) ||
- (Cfg.Documentation.CommentFormat ==
- Config::CommentFormatPolicy::Doxygen))
- // If the user prefers Markdown, we use the present() method to generate
- // the Markdown output.
- return present().asMarkdown();
+ if (Cfg.Documentation.CommentFormat ==
+ Config::CommentFormatPolicy::Markdown)
+ return presentDefault().asMarkdown();
+ if (Cfg.Documentation.CommentFormat == Config::CommentFormatPolicy::Doxygen)
+ return presentDoxygen().asMarkdown();
if (Cfg.Documentation.CommentFormat ==
Config::CommentFormatPolicy::PlainText)
// If the user prefers plain text, we use the present() method to generate
// the plain text output.
- return present().asEscapedMarkdown();
+ return presentDefault().asEscapedMarkdown();
}
- return present().asPlainText();
+ return presentDefault().asPlainText();
}
// If the backtick at `Offset` starts a probable quoted range, return the range
diff --git a/clang-tools-extra/clangd/Hover.h b/clang-tools-extra/clangd/Hover.h
index 2f65431..614180a 100644
--- a/clang-tools-extra/clangd/Hover.h
+++ b/clang-tools-extra/clangd/Hover.h
@@ -74,6 +74,8 @@ struct HoverInfo {
std::optional<Range> SymRange;
index::SymbolKind Kind = index::SymbolKind::Unknown;
std::string Documentation;
+ // required to create a comments::CommandTraits object without the ASTContext
+ CommentOptions CommentOpts;
/// Source code containing the definition of the symbol.
std::string Definition;
const char *DefinitionLanguage = "cpp";
@@ -118,10 +120,23 @@ struct HoverInfo {
// alphabetical order.
std::vector<std::string> UsedSymbolNames;
- /// Produce a user-readable information.
- markup::Document present() const;
-
+ /// Produce a user-readable information based on the specified markup kind.
std::string present(MarkupKind Kind) const;
+
+private:
+ void usedSymbolNamesToMarkup(markup::Document &Output) const;
+ void providerToMarkupParagraph(markup::Document &Output) const;
+ void definitionScopeToMarkup(markup::Document &Output) const;
+ void calleeArgInfoToMarkupParagraph(markup::Paragraph &P) const;
+ void valueToMarkupParagraph(markup::Paragraph &P) const;
+ void offsetToMarkupParagraph(markup::Paragraph &P) const;
+ void sizeToMarkupParagraph(markup::Paragraph &P) const;
+
+ /// Parse and render the hover information as Doxygen documentation.
+ markup::Document presentDoxygen() const;
+
+ /// Render the hover information as a default documentation.
+ markup::Document presentDefault() const;
};
inline bool operator==(const HoverInfo::PrintedType &LHS,
diff --git a/clang-tools-extra/clangd/IncludeFixer.cpp b/clang-tools-extra/clangd/IncludeFixer.cpp
index 50bc2bd..c27d960 100644
--- a/clang-tools-extra/clangd/IncludeFixer.cpp
+++ b/clang-tools-extra/clangd/IncludeFixer.cpp
@@ -173,7 +173,7 @@ std::vector<Fix> IncludeFixer::fix(DiagnosticsEngine::Level DiagLevel,
// `enum x : int;' is not formally an incomplete type.
// We may need a full definition anyway.
if (auto * ET = llvm::dyn_cast<EnumType>(T))
- if (!ET->getDecl()->getDefinition())
+ if (!ET->getOriginalDecl()->getDefinition())
return fixIncompleteType(*T);
}
}
@@ -400,35 +400,35 @@ std::optional<CheapUnresolvedName> extractUnresolvedNameCheaply(
CheapUnresolvedName Result;
Result.Name = Unresolved.getAsString();
if (SS && SS->isNotEmpty()) { // "::" or "ns::"
- if (auto *Nested = SS->getScopeRep()) {
- if (Nested->getKind() == NestedNameSpecifier::Global) {
- Result.ResolvedScope = "";
- } else if (const NamespaceBaseDecl *NSB = Nested->getAsNamespace()) {
- if (const auto *NS = dyn_cast<NamespaceDecl>(NSB)) {
- std::string SpecifiedNS = printNamespaceScope(*NS);
- std::optional<std::string> Spelling = getSpelledSpecifier(*SS, SM);
-
- // Check the specifier spelled in the source.
- // If the resolved scope doesn't end with the spelled scope, the
- // resolved scope may come from a sema typo correction. For example,
- // sema assumes that "clangd::" is a typo of "clang::" and uses
- // "clang::" as the specified scope in:
- // namespace clang { clangd::X; }
- // In this case, we use the "typo" specifier as extra scope instead
- // of using the scope assumed by sema.
- if (!Spelling || llvm::StringRef(SpecifiedNS).ends_with(*Spelling)) {
- Result.ResolvedScope = std::move(SpecifiedNS);
- } else {
- Result.UnresolvedScope = std::move(*Spelling);
- }
+ NestedNameSpecifier Nested = SS->getScopeRep();
+ if (Nested.getKind() == NestedNameSpecifier::Kind::Global) {
+ Result.ResolvedScope = "";
+ } else if (Nested.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ const NamespaceBaseDecl *NSB = Nested.getAsNamespaceAndPrefix().Namespace;
+ if (const auto *NS = dyn_cast<NamespaceDecl>(NSB)) {
+ std::string SpecifiedNS = printNamespaceScope(*NS);
+ std::optional<std::string> Spelling = getSpelledSpecifier(*SS, SM);
+
+ // Check the specifier spelled in the source.
+ // If the resolved scope doesn't end with the spelled scope, the
+ // resolved scope may come from a sema typo correction. For example,
+ // sema assumes that "clangd::" is a typo of "clang::" and uses
+ // "clang::" as the specified scope in:
+ // namespace clang { clangd::X; }
+ // In this case, we use the "typo" specifier as extra scope instead
+ // of using the scope assumed by sema.
+ if (!Spelling || llvm::StringRef(SpecifiedNS).ends_with(*Spelling)) {
+ Result.ResolvedScope = std::move(SpecifiedNS);
} else {
- Result.ResolvedScope = printNamespaceScope(*cast<NamespaceAliasDecl>(NSB)->getNamespace());
+ Result.UnresolvedScope = std::move(*Spelling);
}
} else {
- // We don't fix symbols in scopes that are not top-level e.g. class
- // members, as we don't collect includes for them.
- return std::nullopt;
+ Result.ResolvedScope = printNamespaceScope(*cast<NamespaceAliasDecl>(NSB)->getNamespace());
}
+ } else {
+ // We don't fix symbols in scopes that are not top-level e.g. class
+ // members, as we don't collect includes for them.
+ return std::nullopt;
}
}
diff --git a/clang-tools-extra/clangd/InlayHints.cpp b/clang-tools-extra/clangd/InlayHints.cpp
index 197c62c..cd479e1 100644
--- a/clang-tools-extra/clangd/InlayHints.cpp
+++ b/clang-tools-extra/clangd/InlayHints.cpp
@@ -55,18 +55,24 @@ void stripLeadingUnderscores(StringRef &Name) { Name = Name.ltrim('_'); }
// getDeclForType() returns the decl responsible for Type's spelling.
// This is the inverse of ASTContext::getTypeDeclType().
-template <typename Ty, typename = decltype(((Ty *)nullptr)->getDecl())>
-const NamedDecl *getDeclForTypeImpl(const Ty *T) {
- return T->getDecl();
-}
-const NamedDecl *getDeclForTypeImpl(const void *T) { return nullptr; }
const NamedDecl *getDeclForType(const Type *T) {
switch (T->getTypeClass()) {
-#define ABSTRACT_TYPE(TY, BASE)
-#define TYPE(TY, BASE) \
- case Type::TY: \
- return getDeclForTypeImpl(llvm::cast<TY##Type>(T));
-#include "clang/AST/TypeNodes.inc"
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName:
+ return cast<TagType>(T)->getOriginalDecl();
+ case Type::TemplateSpecialization:
+ return cast<TemplateSpecializationType>(T)
+ ->getTemplateName()
+ .getAsTemplateDecl(/*IgnoreDeduced=*/true);
+ case Type::Typedef:
+ return cast<TypedefType>(T)->getDecl();
+ case Type::UnresolvedUsing:
+ return cast<UnresolvedUsingType>(T)->getDecl();
+ case Type::Using:
+ return cast<UsingType>(T)->getDecl();
+ default:
+ return nullptr;
}
llvm_unreachable("Unknown TypeClass enum");
}
@@ -81,8 +87,6 @@ llvm::StringRef getSimpleName(const NamedDecl &D) {
return getSimpleName(D.getDeclName());
}
llvm::StringRef getSimpleName(QualType T) {
- if (const auto *ET = llvm::dyn_cast<ElaboratedType>(T))
- return getSimpleName(ET->getNamedType());
if (const auto *BT = llvm::dyn_cast<BuiltinType>(T)) {
PrintingPolicy PP(LangOptions{});
PP.adjustForCPlusPlus();
diff --git a/clang-tools-extra/clangd/Selection.cpp b/clang-tools-extra/clangd/Selection.cpp
index 277cb87..06165df 100644
--- a/clang-tools-extra/clangd/Selection.cpp
+++ b/clang-tools-extra/clangd/Selection.cpp
@@ -62,7 +62,8 @@ void recordMetrics(const SelectionTree &S, const LangOptions &Lang) {
}
// Return the range covering a node and all its children.
-SourceRange getSourceRange(const DynTypedNode &N) {
+SourceRange getSourceRange(const DynTypedNode &N,
+ bool IncludeQualifier = false) {
// MemberExprs to implicitly access anonymous fields should not claim any
// tokens for themselves. Given:
// struct A { struct { int b; }; };
@@ -80,7 +81,7 @@ SourceRange getSourceRange(const DynTypedNode &N) {
? getSourceRange(DynTypedNode::create(*ME->getBase()))
: SourceRange();
}
- return N.getSourceRange();
+ return N.getSourceRange(IncludeQualifier);
}
// An IntervalSet maintains a set of disjoint subranges of an array.
@@ -643,8 +644,9 @@ public:
}
return traverseNode(X, [&] { return Base::TraverseDecl(X); });
}
- bool TraverseTypeLoc(TypeLoc X) {
- return traverseNode(&X, [&] { return Base::TraverseTypeLoc(X); });
+ bool TraverseTypeLoc(TypeLoc X, bool TraverseQualifier = true) {
+ return traverseNode(
+ &X, [&] { return Base::TraverseTypeLoc(X, TraverseQualifier); });
}
bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &X) {
return traverseNode(&X,
@@ -690,7 +692,8 @@ public:
// This means we'd never see 'int' in 'const int'! Work around that here.
// (The reason for the behavior is to avoid traversing the nested Type twice,
// but we ignore TraverseType anyway).
- bool TraverseQualifiedTypeLoc(QualifiedTypeLoc QX) {
+ bool TraverseQualifiedTypeLoc(QualifiedTypeLoc QX,
+ bool TraverseQualifier = true) {
return traverseNode<TypeLoc>(
&QX, [&] { return TraverseTypeLoc(QX.getUnqualifiedLoc()); });
}
@@ -698,7 +701,7 @@ public:
return traverseNode(&PL, [&] { return Base::TraverseObjCProtocolLoc(PL); });
}
// Uninteresting parts of the AST that don't have locations within them.
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *) { return true; }
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier) { return true; }
bool TraverseType(QualType) { return true; }
// The DeclStmt for the loop variable claims to cover the whole range
@@ -798,7 +801,7 @@ private:
// An optimization for a common case: nodes outside macro expansions that
// don't intersect the selection may be recursively skipped.
bool canSafelySkipNode(const DynTypedNode &N) {
- SourceRange S = getSourceRange(N);
+ SourceRange S = getSourceRange(N, /*IncludeQualifier=*/true);
if (auto *TL = N.get<TypeLoc>()) {
// FIXME: TypeLoc::getBeginLoc()/getEndLoc() are pretty fragile
// heuristics. We should consider only pruning critical TypeLoc nodes, to
diff --git a/clang-tools-extra/clangd/SemanticHighlighting.cpp b/clang-tools-extra/clangd/SemanticHighlighting.cpp
index e6d5cf7..2b151b1 100644
--- a/clang-tools-extra/clangd/SemanticHighlighting.cpp
+++ b/clang-tools-extra/clangd/SemanticHighlighting.cpp
@@ -1127,21 +1127,6 @@ public:
return RecursiveASTVisitor::TraverseTemplateArgumentLoc(L);
}
- // findExplicitReferences will walk nested-name-specifiers and
- // find anything that can be resolved to a Decl. However, non-leaf
- // components of nested-name-specifiers which are dependent names
- // (kind "Identifier") cannot be resolved to a decl, so we visit
- // them here.
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc Q) {
- if (NestedNameSpecifier *NNS = Q.getNestedNameSpecifier()) {
- if (NNS->getKind() == NestedNameSpecifier::Identifier)
- H.addToken(Q.getLocalBeginLoc(), HighlightingKind::Type)
- .addModifier(HighlightingModifier::DependentName)
- .addModifier(HighlightingModifier::ClassScope);
- }
- return RecursiveASTVisitor::TraverseNestedNameSpecifierLoc(Q);
- }
-
private:
HighlightingsBuilder &H;
};
diff --git a/clang-tools-extra/clangd/SymbolDocumentation.cpp b/clang-tools-extra/clangd/SymbolDocumentation.cpp
new file mode 100644
index 0000000..dea637b
--- /dev/null
+++ b/clang-tools-extra/clangd/SymbolDocumentation.cpp
@@ -0,0 +1,297 @@
+//===--- SymbolDocumentation.cpp ==-------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SymbolDocumentation.h"
+
+#include "support/Markup.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace clangd {
+namespace {
+
+std::string commandMarkerAsString(comments::CommandMarkerKind CommandMarker) {
+ switch (CommandMarker) {
+ case comments::CommandMarkerKind::CMK_At:
+ return "@";
+ case comments::CommandMarkerKind::CMK_Backslash:
+ return "\\";
+ }
+ llvm_unreachable("Unknown command marker kind");
+}
+
+void commandToMarkup(markup::Paragraph &Out, StringRef Command,
+ comments::CommandMarkerKind CommandMarker,
+ StringRef Args) {
+ Out.appendBoldText(commandMarkerAsString(CommandMarker) + Command.str());
+ if (!Args.empty()) {
+ Out.appendSpace();
+ Out.appendEmphasizedText(Args.str());
+ }
+}
+} // namespace
+
+class ParagraphToMarkupDocument
+ : public comments::ConstCommentVisitor<ParagraphToMarkupDocument> {
+public:
+ ParagraphToMarkupDocument(markup::Paragraph &Out,
+ const comments::CommandTraits &Traits)
+ : Out(Out), Traits(Traits) {}
+
+ void visitParagraphComment(const comments::ParagraphComment *C) {
+ if (!C)
+ return;
+
+ for (const auto *Child = C->child_begin(); Child != C->child_end();
+ ++Child) {
+ visit(*Child);
+ }
+ }
+
+ void visitTextComment(const comments::TextComment *C) {
+ // Always trim leading space after a newline.
+ StringRef Text = C->getText();
+ if (LastChunkEndsWithNewline && C->getText().starts_with(' '))
+ Text = Text.drop_front();
+
+ LastChunkEndsWithNewline = C->hasTrailingNewline();
+ Out.appendText(Text.str() + (LastChunkEndsWithNewline ? "\n" : ""));
+ }
+
+ void visitInlineCommandComment(const comments::InlineCommandComment *C) {
+
+ if (C->getNumArgs() > 0) {
+ std::string ArgText;
+ for (unsigned I = 0; I < C->getNumArgs(); ++I) {
+ if (!ArgText.empty())
+ ArgText += " ";
+ ArgText += C->getArgText(I);
+ }
+
+ switch (C->getRenderKind()) {
+ case comments::InlineCommandRenderKind::Monospaced:
+ Out.appendCode(ArgText);
+ break;
+ case comments::InlineCommandRenderKind::Bold:
+ Out.appendBoldText(ArgText);
+ break;
+ case comments::InlineCommandRenderKind::Emphasized:
+ Out.appendEmphasizedText(ArgText);
+ break;
+ default:
+ commandToMarkup(Out, C->getCommandName(Traits), C->getCommandMarker(),
+ ArgText);
+ break;
+ }
+ } else {
+ if (C->getCommandName(Traits) == "n") {
+ // \n is a special case, it is used to create a new line.
+ Out.appendText(" \n");
+ LastChunkEndsWithNewline = true;
+ return;
+ }
+
+ commandToMarkup(Out, C->getCommandName(Traits), C->getCommandMarker(),
+ "");
+ }
+ }
+
+ void visitHTMLStartTagComment(const comments::HTMLStartTagComment *STC) {
+ std::string TagText = "<" + STC->getTagName().str();
+
+ for (unsigned I = 0; I < STC->getNumAttrs(); ++I) {
+ const comments::HTMLStartTagComment::Attribute &Attr = STC->getAttr(I);
+ TagText += " " + Attr.Name.str() + "=\"" + Attr.Value.str() + "\"";
+ }
+
+ if (STC->isSelfClosing())
+ TagText += " /";
+ TagText += ">";
+
+ LastChunkEndsWithNewline = STC->hasTrailingNewline();
+ Out.appendText(TagText + (LastChunkEndsWithNewline ? "\n" : ""));
+ }
+
+ void visitHTMLEndTagComment(const comments::HTMLEndTagComment *ETC) {
+ LastChunkEndsWithNewline = ETC->hasTrailingNewline();
+ Out.appendText("</" + ETC->getTagName().str() + ">" +
+ (LastChunkEndsWithNewline ? "\n" : ""));
+ }
+
+private:
+ markup::Paragraph &Out;
+ const comments::CommandTraits &Traits;
+
+ /// If true, the next leading space after a new line is trimmed.
+ bool LastChunkEndsWithNewline = false;
+};
+
+class ParagraphToString
+ : public comments::ConstCommentVisitor<ParagraphToString> {
+public:
+ ParagraphToString(llvm::raw_string_ostream &Out,
+ const comments::CommandTraits &Traits)
+ : Out(Out), Traits(Traits) {}
+
+ void visitParagraphComment(const comments::ParagraphComment *C) {
+ if (!C)
+ return;
+
+ for (const auto *Child = C->child_begin(); Child != C->child_end();
+ ++Child) {
+ visit(*Child);
+ }
+ }
+
+ void visitTextComment(const comments::TextComment *C) { Out << C->getText(); }
+
+ void visitInlineCommandComment(const comments::InlineCommandComment *C) {
+ Out << commandMarkerAsString(C->getCommandMarker());
+ Out << C->getCommandName(Traits);
+ if (C->getNumArgs() > 0) {
+ for (unsigned I = 0; I < C->getNumArgs(); ++I)
+ Out << " " << C->getArgText(I);
+ }
+ Out << " ";
+ }
+
+ void visitHTMLStartTagComment(const comments::HTMLStartTagComment *STC) {
+ Out << "<" << STC->getTagName().str();
+
+ for (unsigned I = 0; I < STC->getNumAttrs(); ++I) {
+ const comments::HTMLStartTagComment::Attribute &Attr = STC->getAttr(I);
+ Out << " " << Attr.Name.str();
+ if (!Attr.Value.str().empty())
+ Out << "=\"" << Attr.Value.str() << "\"";
+ }
+
+ if (STC->isSelfClosing())
+ Out << " /";
+ Out << ">";
+
+ Out << (STC->hasTrailingNewline() ? "\n" : "");
+ }
+
+ void visitHTMLEndTagComment(const comments::HTMLEndTagComment *ETC) {
+ Out << "</" << ETC->getTagName().str() << ">"
+ << (ETC->hasTrailingNewline() ? "\n" : "");
+ }
+
+private:
+ llvm::raw_string_ostream &Out;
+ const comments::CommandTraits &Traits;
+};
+
+class BlockCommentToMarkupDocument
+ : public comments::ConstCommentVisitor<BlockCommentToMarkupDocument> {
+public:
+ BlockCommentToMarkupDocument(markup::Document &Out,
+ const comments::CommandTraits &Traits)
+ : Out(Out), Traits(Traits) {}
+
+ void visitBlockCommandComment(const comments::BlockCommandComment *B) {
+
+ switch (B->getCommandID()) {
+ case comments::CommandTraits::KCI_arg:
+ case comments::CommandTraits::KCI_li:
+ // \li and \arg are special cases, they are used to create a list item.
+ // In markdown it is a bullet list.
+ ParagraphToMarkupDocument(Out.addBulletList().addItem().addParagraph(),
+ Traits)
+ .visit(B->getParagraph());
+ break;
+ default: {
+ // Some commands have arguments, like \throws.
+ // The arguments are not part of the paragraph.
+ // We need reconstruct them here.
+ std::string ArgText;
+ for (unsigned I = 0; I < B->getNumArgs(); ++I) {
+ if (!ArgText.empty())
+ ArgText += " ";
+ ArgText += B->getArgText(I);
+ }
+ auto &P = Out.addParagraph();
+ commandToMarkup(P, B->getCommandName(Traits), B->getCommandMarker(),
+ ArgText);
+ if (B->getParagraph() && !B->getParagraph()->isWhitespace()) {
+ // For commands with arguments, the paragraph starts after the first
+ // space. Therefore we need to append a space manually in this case.
+ if (!ArgText.empty())
+ P.appendSpace();
+ ParagraphToMarkupDocument(P, Traits).visit(B->getParagraph());
+ }
+ }
+ }
+ }
+
+ void visitVerbatimBlockComment(const comments::VerbatimBlockComment *VB) {
+ commandToMarkup(Out.addParagraph(), VB->getCommandName(Traits),
+ VB->getCommandMarker(), "");
+
+ std::string VerbatimText;
+
+ for (const auto *LI = VB->child_begin(); LI != VB->child_end(); ++LI) {
+ if (const auto *Line = cast<comments::VerbatimBlockLineComment>(*LI)) {
+ VerbatimText += Line->getText().str() + "\n";
+ }
+ }
+
+ Out.addCodeBlock(VerbatimText, "");
+
+ commandToMarkup(Out.addParagraph(), VB->getCloseName(),
+ VB->getCommandMarker(), "");
+ }
+
+ void visitVerbatimLineComment(const comments::VerbatimLineComment *VL) {
+ auto &P = Out.addParagraph();
+ commandToMarkup(P, VL->getCommandName(Traits), VL->getCommandMarker(), "");
+ P.appendSpace().appendCode(VL->getText().str(), true).appendSpace();
+ }
+
+private:
+ markup::Document &Out;
+ const comments::CommandTraits &Traits;
+ StringRef CommentEscapeMarker;
+};
+
+void SymbolDocCommentVisitor::parameterDocToMarkup(StringRef ParamName,
+ markup::Paragraph &Out) {
+ if (ParamName.empty())
+ return;
+
+ if (const auto *P = Parameters.lookup(ParamName)) {
+ ParagraphToMarkupDocument(Out, Traits).visit(P->getParagraph());
+ }
+}
+
+void SymbolDocCommentVisitor::parameterDocToString(
+ StringRef ParamName, llvm::raw_string_ostream &Out) {
+ if (ParamName.empty())
+ return;
+
+ if (const auto *P = Parameters.lookup(ParamName)) {
+ ParagraphToString(Out, Traits).visit(P->getParagraph());
+ }
+}
+
+void SymbolDocCommentVisitor::docToMarkup(markup::Document &Out) {
+ for (unsigned I = 0; I < CommentPartIndex; ++I) {
+ if (const auto *BC = BlockCommands.lookup(I)) {
+ BlockCommentToMarkupDocument(Out, Traits).visit(BC);
+ } else if (const auto *P = FreeParagraphs.lookup(I)) {
+ ParagraphToMarkupDocument(Out.addParagraph(), Traits).visit(P);
+ }
+ }
+}
+
+} // namespace clangd
+} // namespace clang
diff --git a/clang-tools-extra/clangd/SymbolDocumentation.h b/clang-tools-extra/clangd/SymbolDocumentation.h
new file mode 100644
index 0000000..b5120ba
--- /dev/null
+++ b/clang-tools-extra/clangd/SymbolDocumentation.h
@@ -0,0 +1,155 @@
+//===--- SymbolDocumentation.h ==---------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Class to parse doxygen comments into a flat structure for consumption
+// in e.g. Hover and Code Completion
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_SYMBOLDOCUMENTATION_H
+#define LLVM_CLANG_TOOLS_EXTRA_CLANGD_SYMBOLDOCUMENTATION_H
+
+#include "support/Markup.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/CommentParser.h"
+#include "clang/AST/CommentSema.h"
+#include "clang/AST/CommentVisitor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+namespace clang {
+namespace clangd {
+
+class SymbolDocCommentVisitor
+ : public comments::ConstCommentVisitor<SymbolDocCommentVisitor> {
+public:
+ SymbolDocCommentVisitor(comments::FullComment *FC,
+ const CommentOptions &CommentOpts)
+ : Traits(Allocator, CommentOpts), Allocator() {
+ if (!FC)
+ return;
+
+ for (auto *Block : FC->getBlocks()) {
+ visit(Block);
+ }
+ }
+
+ SymbolDocCommentVisitor(llvm::StringRef Documentation,
+ const CommentOptions &CommentOpts)
+ : Traits(Allocator, CommentOpts), Allocator() {
+
+ if (Documentation.empty())
+ return;
+
+ CommentWithMarkers.reserve(Documentation.size() +
+ Documentation.count('\n') * 3);
+
+ // The comment lexer expects doxygen markers, so add them back.
+ // We need to use the /// style doxygen markers because the comment could
+ // contain the closing the closing tag "*/" of a C Style "/** */" comment
+ // which would break the parsing if we would just enclose the comment text
+ // with "/** */".
+ CommentWithMarkers = "///";
+ bool NewLine = true;
+ for (char C : Documentation) {
+ if (C == '\n') {
+ CommentWithMarkers += "\n///";
+ NewLine = true;
+ } else {
+ if (NewLine && (C == '<')) {
+ // A comment line starting with '///<' is treated as a doxygen
+ // comment. Therefore add a space to separate the '<' from the comment
+ // marker. This allows to parse html tags at the beginning of a line
+ // and the escape marker prevents adding the artificial space in the
+ // markup documentation. The extra space will not be rendered, since
+ // we render it as markdown.
+ CommentWithMarkers += ' ';
+ }
+ CommentWithMarkers += C;
+ NewLine = false;
+ }
+ }
+ SourceManagerForFile SourceMgrForFile("mock_file.cpp", CommentWithMarkers);
+
+ SourceManager &SourceMgr = SourceMgrForFile.get();
+ // The doxygen Sema requires a Diagostics consumer, since it reports
+ // warnings e.g. when parameters are not documented correctly. These
+ // warnings are not relevant for us, so we can ignore them.
+ SourceMgr.getDiagnostics().setClient(new IgnoringDiagConsumer);
+
+ comments::Sema S(Allocator, SourceMgr, SourceMgr.getDiagnostics(), Traits,
+ /*PP=*/nullptr);
+ comments::Lexer L(Allocator, SourceMgr.getDiagnostics(), Traits,
+ SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()),
+ CommentWithMarkers.data(),
+ CommentWithMarkers.data() + CommentWithMarkers.size());
+ comments::Parser P(L, S, Allocator, SourceMgr, SourceMgr.getDiagnostics(),
+ Traits);
+ comments::FullComment *FC = P.parseFullComment();
+
+ if (!FC)
+ return;
+
+ for (auto *Block : FC->getBlocks()) {
+ visit(Block);
+ }
+ }
+
+ bool isParameterDocumented(StringRef ParamName) const {
+ return Parameters.contains(ParamName);
+ }
+
+ void parameterDocToMarkup(StringRef ParamName, markup::Paragraph &Out);
+
+ void parameterDocToString(StringRef ParamName, llvm::raw_string_ostream &Out);
+
+ void docToMarkup(markup::Document &Out);
+
+ void visitBlockCommandComment(const comments::BlockCommandComment *B) {
+ BlockCommands[CommentPartIndex] = B;
+ CommentPartIndex++;
+ }
+
+ void visitParagraphComment(const comments::ParagraphComment *P) {
+ FreeParagraphs[CommentPartIndex] = P;
+ CommentPartIndex++;
+ }
+
+ void visitParamCommandComment(const comments::ParamCommandComment *P) {
+ Parameters[P->getParamNameAsWritten()] = P;
+ }
+
+private:
+ comments::CommandTraits Traits;
+ llvm::BumpPtrAllocator Allocator;
+ std::string CommentWithMarkers;
+
+ /// Index to keep track of the order of the comments.
+ /// We want to rearange some commands like \\param.
+ /// This index allows us to keep the order of the other comment parts.
+ unsigned CommentPartIndex = 0;
+
+ /// Parsed paragaph(s) of the "param" comamnd(s)
+ llvm::SmallDenseMap<StringRef, const comments::ParamCommandComment *>
+ Parameters;
+
+ /// All the block commands.
+ llvm::SmallDenseMap<unsigned, const comments::BlockCommandComment *>
+ BlockCommands;
+
+ /// All "free" text paragraphs.
+ llvm::SmallDenseMap<unsigned, const comments::ParagraphComment *>
+ FreeParagraphs;
+};
+
+} // namespace clangd
+} // namespace clang
+
+#endif // LLVM_CLANG_TOOLS_EXTRA_CLANGD_SYMBOLDOCUMENTATION_H
diff --git a/clang-tools-extra/clangd/XRefs.cpp b/clang-tools-extra/clangd/XRefs.cpp
index 83a8b72..11ee510 100644
--- a/clang-tools-extra/clangd/XRefs.cpp
+++ b/clang-tools-extra/clangd/XRefs.cpp
@@ -1965,7 +1965,8 @@ std::vector<const CXXRecordDecl *> findRecordTypeAt(ParsedAST &AST,
// Return the type most associated with an AST node.
// This isn't precisely defined: we want "go to type" to do something useful.
-static QualType typeForNode(const SelectionTree::Node *N) {
+static QualType typeForNode(const ASTContext &Ctx,
+ const SelectionTree::Node *N) {
// If we're looking at a namespace qualifier, walk up to what it's qualifying.
// (If we're pointing at a *class* inside a NNS, N will be a TypeLoc).
while (N && N->ASTNode.get<NestedNameSpecifierLoc>())
@@ -1999,10 +2000,13 @@ static QualType typeForNode(const SelectionTree::Node *N) {
if (const Decl *D = N->ASTNode.get<Decl>()) {
struct Visitor : ConstDeclVisitor<Visitor, QualType> {
+ const ASTContext &Ctx;
+ Visitor(const ASTContext &Ctx) : Ctx(Ctx) {}
+
QualType VisitValueDecl(const ValueDecl *D) { return D->getType(); }
// Declaration of a type => that type.
QualType VisitTypeDecl(const TypeDecl *D) {
- return QualType(D->getTypeForDecl(), 0);
+ return Ctx.getTypeDeclType(D);
}
// Exception: alias declaration => the underlying type, not the alias.
QualType VisitTypedefNameDecl(const TypedefNameDecl *D) {
@@ -2012,7 +2016,7 @@ static QualType typeForNode(const SelectionTree::Node *N) {
QualType VisitTemplateDecl(const TemplateDecl *D) {
return Visit(D->getTemplatedDecl());
}
- } V;
+ } V(Ctx);
return V.Visit(D);
}
@@ -2156,7 +2160,8 @@ std::vector<LocatedSymbol> findType(ParsedAST &AST, Position Pos,
// unique_ptr<unique_ptr<T>>. Let's *not* remove them, because it gives you some
// information about the type you may have not known before
// (since unique_ptr<unique_ptr<T>> != unique_ptr<T>).
- for (const QualType& Type : unwrapFindType(typeForNode(N), AST.getHeuristicResolver()))
+ for (const QualType &Type : unwrapFindType(
+ typeForNode(AST.getASTContext(), N), AST.getHeuristicResolver()))
llvm::copy(locateSymbolForType(AST, Type, Index),
std::back_inserter(LocatedSymbols));
diff --git a/clang-tools-extra/clangd/refactor/tweaks/AddUsing.cpp b/clang-tools-extra/clangd/refactor/tweaks/AddUsing.cpp
index 67fc451..f65c74f 100644
--- a/clang-tools-extra/clangd/refactor/tweaks/AddUsing.cpp
+++ b/clang-tools-extra/clangd/refactor/tweaks/AddUsing.cpp
@@ -115,13 +115,6 @@ private:
const SourceManager &SM;
};
-bool isFullyQualified(const NestedNameSpecifier *NNS) {
- if (!NNS)
- return false;
- return NNS->getKind() == NestedNameSpecifier::Global ||
- isFullyQualified(NNS->getPrefix());
-}
-
struct InsertionPointData {
// Location to insert the "using" statement. If invalid then the statement
// should not be inserted at all (it already exists).
@@ -167,18 +160,20 @@ findInsertionPoint(const Tweak::Selection &Inputs,
for (auto &U : Usings) {
// Only "upgrade" to fully qualified is all relevant using decls are fully
// qualified. Otherwise trust what the user typed.
- if (!isFullyQualified(U->getQualifier()))
+ if (!U->getQualifier().isFullyQualified())
AlwaysFullyQualify = false;
if (SM.isBeforeInTranslationUnit(Inputs.Cursor, U->getUsingLoc()))
// "Usings" is sorted, so we're done.
break;
- if (const auto *Namespace = dyn_cast_if_present<NamespaceDecl>(
- U->getQualifier()->getAsNamespace())) {
+ if (NestedNameSpecifier Qualifier = U->getQualifier();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ const auto *Namespace =
+ U->getQualifier().getAsNamespaceAndPrefix().Namespace;
if (Namespace->getCanonicalDecl() ==
QualifierToRemove.getNestedNameSpecifier()
- ->getAsNamespace()
- ->getCanonicalDecl() &&
+ .getAsNamespaceAndPrefix()
+ .Namespace->getCanonicalDecl() &&
U->getName() == Name) {
return InsertionPointData();
}
@@ -232,8 +227,9 @@ findInsertionPoint(const Tweak::Selection &Inputs,
}
bool isNamespaceForbidden(const Tweak::Selection &Inputs,
- const NestedNameSpecifier &Namespace) {
- const auto *NS = dyn_cast<NamespaceDecl>(Namespace.getAsNamespace());
+ NestedNameSpecifier Namespace) {
+ const auto *NS =
+ dyn_cast<NamespaceDecl>(Namespace.getAsNamespaceAndPrefix().Namespace);
if (!NS)
return true;
std::string NamespaceStr = printNamespaceScope(*NS);
@@ -247,11 +243,11 @@ bool isNamespaceForbidden(const Tweak::Selection &Inputs,
return false;
}
-std::string getNNSLAsString(NestedNameSpecifierLoc &NNSL,
+std::string getNNSLAsString(NestedNameSpecifierLoc NNSL,
const PrintingPolicy &Policy) {
std::string Out;
llvm::raw_string_ostream OutStream(Out);
- NNSL.getNestedNameSpecifier()->print(OutStream, Policy);
+ NNSL.getNestedNameSpecifier().print(OutStream, Policy);
return OutStream.str();
}
@@ -276,16 +272,15 @@ bool AddUsing::prepare(const Selection &Inputs) {
continue;
}
if (auto *T = Node->ASTNode.get<TypeLoc>()) {
- if (T->getAs<ElaboratedTypeLoc>()) {
+ // Find the outermost TypeLoc.
+ if (Node->Parent->ASTNode.get<NestedNameSpecifierLoc>())
+ continue;
+ if (isa<TagType, TemplateSpecializationType, TypedefType, UsingType,
+ UnresolvedUsingType>(T->getTypePtr()))
break;
- }
- if (Node->Parent->ASTNode.get<TypeLoc>() ||
- Node->Parent->ASTNode.get<NestedNameSpecifierLoc>()) {
- // Node is TypeLoc, but it's parent is either TypeLoc or
- // NestedNameSpecifier. In both cases, we want to go up, to find
- // the outermost TypeLoc.
+ // Find the outermost TypeLoc.
+ if (Node->Parent->ASTNode.get<TypeLoc>())
continue;
- }
}
break;
}
@@ -307,32 +302,70 @@ bool AddUsing::prepare(const Selection &Inputs) {
MustInsertAfterLoc = D->getDecl()->getBeginLoc();
}
} else if (auto *T = Node->ASTNode.get<TypeLoc>()) {
- if (auto E = T->getAs<ElaboratedTypeLoc>()) {
- QualifierToRemove = E.getQualifierLoc();
-
- SpelledNameRange = E.getSourceRange();
- if (auto T = E.getNamedTypeLoc().getAs<TemplateSpecializationTypeLoc>()) {
- // Remove the template arguments from the name.
- SpelledNameRange.setEnd(T.getLAngleLoc().getLocWithOffset(-1));
- }
-
- if (const auto *ET = E.getTypePtr()) {
- if (const auto *TDT =
- dyn_cast<TypedefType>(ET->getNamedType().getTypePtr())) {
- MustInsertAfterLoc = TDT->getDecl()->getBeginLoc();
- } else if (auto *TD = ET->getAsTagDecl()) {
- MustInsertAfterLoc = TD->getBeginLoc();
- }
- }
+ switch (T->getTypeLocClass()) {
+ case TypeLoc::TemplateSpecialization: {
+ auto TL = T->castAs<TemplateSpecializationTypeLoc>();
+ QualifierToRemove = TL.getQualifierLoc();
+ if (!QualifierToRemove)
+ break;
+ SpelledNameRange = TL.getTemplateNameLoc();
+ if (auto *TD = TL.getTypePtr()->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true))
+ MustInsertAfterLoc = TD->getBeginLoc();
+ break;
+ }
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName: {
+ auto TL = T->castAs<TagTypeLoc>();
+ QualifierToRemove = TL.getQualifierLoc();
+ if (!QualifierToRemove)
+ break;
+ SpelledNameRange = TL.getNameLoc();
+ MustInsertAfterLoc = TL.getOriginalDecl()->getBeginLoc();
+ break;
+ }
+ case TypeLoc::Typedef: {
+ auto TL = T->castAs<TypedefTypeLoc>();
+ QualifierToRemove = TL.getQualifierLoc();
+ if (!QualifierToRemove)
+ break;
+ SpelledNameRange = TL.getNameLoc();
+ MustInsertAfterLoc = TL.getDecl()->getBeginLoc();
+ break;
+ }
+ case TypeLoc::UnresolvedUsing: {
+ auto TL = T->castAs<UnresolvedUsingTypeLoc>();
+ QualifierToRemove = TL.getQualifierLoc();
+ if (!QualifierToRemove)
+ break;
+ SpelledNameRange = TL.getNameLoc();
+ MustInsertAfterLoc = TL.getDecl()->getBeginLoc();
+ break;
+ }
+ case TypeLoc::Using: {
+ auto TL = T->castAs<UsingTypeLoc>();
+ QualifierToRemove = TL.getQualifierLoc();
+ if (!QualifierToRemove)
+ break;
+ SpelledNameRange = TL.getNameLoc();
+ MustInsertAfterLoc = TL.getDecl()->getBeginLoc();
+ break;
+ }
+ default:
+ break;
}
+ if (QualifierToRemove)
+ SpelledNameRange.setBegin(QualifierToRemove.getBeginLoc());
}
if (!QualifierToRemove ||
// FIXME: This only supports removing qualifiers that are made up of just
// namespace names. If qualifier contains a type, we could take the
// longest namespace prefix and remove that.
- !QualifierToRemove.getNestedNameSpecifier()->getAsNamespace() ||
+ QualifierToRemove.getNestedNameSpecifier().getKind() !=
+ NestedNameSpecifier::Kind::Namespace ||
// Respect user config.
- isNamespaceForbidden(Inputs, *QualifierToRemove.getNestedNameSpecifier()))
+ isNamespaceForbidden(Inputs, QualifierToRemove.getNestedNameSpecifier()))
return false;
// Macros are difficult. We only want to offer code action when what's spelled
// under the cursor is a namespace qualifier. If it's a macro that expands to
@@ -384,7 +417,7 @@ Expected<Tweak::Effect> AddUsing::apply(const Selection &Inputs) {
llvm::raw_string_ostream UsingTextStream(UsingText);
UsingTextStream << "using ";
if (InsertionPoint->AlwaysFullyQualify &&
- !isFullyQualified(QualifierToRemove.getNestedNameSpecifier()))
+ !QualifierToRemove.getNestedNameSpecifier().isFullyQualified())
UsingTextStream << "::";
UsingTextStream << QualifierToSpell << SpelledName << ";"
<< InsertionPoint->Suffix;
diff --git a/clang-tools-extra/clangd/refactor/tweaks/ExtractFunction.cpp b/clang-tools-extra/clangd/refactor/tweaks/ExtractFunction.cpp
index cd07cbf..134ae89 100644
--- a/clang-tools-extra/clangd/refactor/tweaks/ExtractFunction.cpp
+++ b/clang-tools-extra/clangd/refactor/tweaks/ExtractFunction.cpp
@@ -362,7 +362,7 @@ struct NewFunction {
SourceLocation DefinitionPoint;
std::optional<SourceLocation> ForwardDeclarationPoint;
const CXXRecordDecl *EnclosingClass = nullptr;
- const NestedNameSpecifier *DefinitionQualifier = nullptr;
+ NestedNameSpecifier DefinitionQualifier = std::nullopt;
const DeclContext *SemanticDC = nullptr;
const DeclContext *SyntacticDC = nullptr;
const DeclContext *ForwardDeclarationSyntacticDC = nullptr;
@@ -455,13 +455,12 @@ std::string NewFunction::renderQualifiers() const {
}
std::string NewFunction::renderDeclarationName(FunctionDeclKind K) const {
- if (DefinitionQualifier == nullptr || K != OutOfLineDefinition) {
+ if (!DefinitionQualifier || K != OutOfLineDefinition)
return Name;
- }
std::string QualifierName;
llvm::raw_string_ostream Oss(QualifierName);
- DefinitionQualifier->print(Oss, *LangOpts);
+ DefinitionQualifier.print(Oss, *LangOpts);
return llvm::formatv("{0}{1}", QualifierName, Name);
}
diff --git a/clang-tools-extra/clangd/refactor/tweaks/PopulateSwitch.cpp b/clang-tools-extra/clangd/refactor/tweaks/PopulateSwitch.cpp
index 43cfc76..7e61696 100644
--- a/clang-tools-extra/clangd/refactor/tweaks/PopulateSwitch.cpp
+++ b/clang-tools-extra/clangd/refactor/tweaks/PopulateSwitch.cpp
@@ -116,7 +116,7 @@ bool PopulateSwitch::prepare(const Selection &Sel) {
EnumT = Cond->getType().getCanonicalType()->getAsAdjusted<EnumType>();
if (!EnumT)
return false;
- EnumD = EnumT->getDecl();
+ EnumD = EnumT->getOriginalDecl();
if (!EnumD || EnumD->isDependentType())
return false;
diff --git a/clang-tools-extra/clangd/support/Markup.cpp b/clang-tools-extra/clangd/support/Markup.cpp
index a130830..89bdc65 100644
--- a/clang-tools-extra/clangd/support/Markup.cpp
+++ b/clang-tools-extra/clangd/support/Markup.cpp
@@ -363,7 +363,12 @@ public:
void renderMarkdown(llvm::raw_ostream &OS) const override {
std::string Marker = getMarkerForCodeBlock(Contents);
// No need to pad from previous blocks, as they should end with a new line.
- OS << Marker << Language << '\n' << Contents << '\n' << Marker << '\n';
+ OS << Marker << Language << '\n' << Contents;
+ if (!Contents.empty() && Contents.back() != '\n')
+ OS << '\n';
+ // Always end with an empty line to separate code blocks from following
+ // paragraphs.
+ OS << Marker << "\n\n";
}
void renderPlainText(llvm::raw_ostream &OS) const override {
diff --git a/clang-tools-extra/clangd/unittests/ASTTests.cpp b/clang-tools-extra/clangd/unittests/ASTTests.cpp
index d0bc3c4..76d46ba 100644
--- a/clang-tools-extra/clangd/unittests/ASTTests.cpp
+++ b/clang-tools-extra/clangd/unittests/ASTTests.cpp
@@ -421,7 +421,7 @@ TEST(ClangdAST, GetQualification) {
{
R"cpp(
namespace ns1 { namespace ns2 { void Foo(); } }
- void insert(); // ns2::Foo
+ void insert(); // ns1::ns2::Foo
namespace ns1 {
void insert(); // ns2::Foo
namespace ns2 {
@@ -429,7 +429,7 @@ TEST(ClangdAST, GetQualification) {
}
}
)cpp",
- {"ns2::", "ns2::", ""},
+ {"ns1::ns2::", "ns2::", ""},
{"ns1::"},
},
{
@@ -531,7 +531,8 @@ TEST(ClangdAST, PrintType) {
ASSERT_EQ(InsertionPoints.size(), Case.Types.size());
for (size_t I = 0, E = InsertionPoints.size(); I != E; ++I) {
const auto *DC = InsertionPoints[I];
- EXPECT_EQ(printType(AST.getASTContext().getTypeDeclType(TargetDecl), *DC),
+ EXPECT_EQ(printType(AST.getASTContext().getTypeDeclType(TargetDecl), *DC,
+ /*Placeholder=*/"", /*FullyQualify=*/true),
Case.Types[I]);
}
}
diff --git a/clang-tools-extra/clangd/unittests/CMakeLists.txt b/clang-tools-extra/clangd/unittests/CMakeLists.txt
index d425070..9656eea 100644
--- a/clang-tools-extra/clangd/unittests/CMakeLists.txt
+++ b/clang-tools-extra/clangd/unittests/CMakeLists.txt
@@ -92,6 +92,7 @@ add_unittest(ClangdUnitTests ClangdTests
SourceCodeTests.cpp
StdLibTests.cpp
SymbolCollectorTests.cpp
+ SymbolDocumentationTests.cpp
SymbolInfoTests.cpp
SyncAPI.cpp
TUSchedulerTests.cpp
diff --git a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
index 61bd631..1a1c32c 100644
--- a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
+++ b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
@@ -938,7 +938,7 @@ TEST(CompletionTest, IncludeInsertionRespectsQuotedAngledConfig) {
{
Config C;
C.Style.AngledHeaders.push_back(
- [](auto header) { return header == "bar.h"; });
+ [](auto header) { return header.contains("bar.h"); });
WithContextValue WithCfg(Config::Key, std::move(C));
Results = completions(TU, Test.point(), {Sym});
EXPECT_THAT(Results.Completions,
@@ -947,7 +947,7 @@ TEST(CompletionTest, IncludeInsertionRespectsQuotedAngledConfig) {
{
Config C;
C.Style.QuotedHeaders.push_back(
- [](auto header) { return header == "bar.h"; });
+ [](auto header) { return header.contains("bar.h"); });
WithContextValue WithCfg(Config::Key, std::move(C));
Results = completions(TU, Test.point(), {Sym});
EXPECT_THAT(Results.Completions,
diff --git a/clang-tools-extra/clangd/unittests/DumpASTTests.cpp b/clang-tools-extra/clangd/unittests/DumpASTTests.cpp
index cb2c17a..5c857d0 100644
--- a/clang-tools-extra/clangd/unittests/DumpASTTests.cpp
+++ b/clang-tools-extra/clangd/unittests/DumpASTTests.cpp
@@ -72,15 +72,14 @@ declaration: Namespace - root
expression: BinaryOperator - +
expression: ImplicitCast - LValueToRValue
expression: DeclRef - x
- specifier: TypeSpec
+ specifier: Type
type: Record - S
expression: ImplicitCast - LValueToRValue
expression: Member - x
expression: CXXBindTemporary
expression: CXXTemporaryObject - S
- type: Elaborated
+ type: Record - S
specifier: Namespace - root::
- type: Record - S
)"},
{R"cpp(
namespace root {
@@ -104,14 +103,13 @@ declaration: Namespace - root
expression: BinaryOperator - +
expression: ImplicitCast - LValueToRValue
expression: DeclRef - x
- specifier: TypeSpec
+ specifier: Type
type: Record - S
expression: ImplicitCast - LValueToRValue
expression: Member - x
expression: CXXTemporaryObject - S
- type: Elaborated
+ type: Record - S
specifier: Namespace - root::
- type: Record - S
)"},
{R"cpp(
namespace root {
@@ -138,7 +136,7 @@ declaration: Namespace - root
type: Builtin - unsigned int
statement: Return
expression: DependentScopeDeclRef - value
- specifier: TypeSpec
+ specifier: Type
type: TemplateTypeParm - T
)"},
{R"cpp(
@@ -154,8 +152,7 @@ declaration: Var - root
expression: DeclRef - operator+
expression: MaterializeTemporary - lvalue
expression: CXXTemporaryObject - Foo
- type: Elaborated
- type: Record - Foo
+ type: Record - Foo
expression: IntegerLiteral - 42
)"},
{R"cpp(
diff --git a/clang-tools-extra/clangd/unittests/FindTargetTests.cpp b/clang-tools-extra/clangd/unittests/FindTargetTests.cpp
index 4d77f9d..20fd23e 100644
--- a/clang-tools-extra/clangd/unittests/FindTargetTests.cpp
+++ b/clang-tools-extra/clangd/unittests/FindTargetTests.cpp
@@ -992,7 +992,7 @@ TEST_F(TargetDeclTest, DependentTypes) {
)cpp";
EXPECT_DECLS("DependentNameTypeLoc", "struct B");
- // Heuristic resolution of dependent type name which doesn't get a TypeLoc
+ // Heuristic resolution of dependent type name within a NestedNameSpecifierLoc
Code = R"cpp(
template <typename>
struct A { struct B { struct C {}; }; };
@@ -1000,7 +1000,7 @@ TEST_F(TargetDeclTest, DependentTypes) {
template <typename T>
void foo(typename A<T>::[[B]]::C);
)cpp";
- EXPECT_DECLS("NestedNameSpecifierLoc", "struct B");
+ EXPECT_DECLS("DependentNameTypeLoc", "struct B");
// Heuristic resolution of dependent type name whose qualifier is also
// dependent
diff --git a/clang-tools-extra/clangd/unittests/HeadersTests.cpp b/clang-tools-extra/clangd/unittests/HeadersTests.cpp
index 751383e3..440582e 100644
--- a/clang-tools-extra/clangd/unittests/HeadersTests.cpp
+++ b/clang-tools-extra/clangd/unittests/HeadersTests.cpp
@@ -344,6 +344,17 @@ TEST_F(HeadersTest, ShortenIncludesInSearchPathBracketed) {
EXPECT_EQ(calculate(BarHeader), "<sub/bar.h>");
}
+TEST_F(HeadersTest, ShortenIncludesInSearchPathBracketedFilterByFullPath) {
+ // The filter receives the full path of the header, so it is able to filter by
+ // the parent directory, even if it is part of the include search path
+ AngledHeaders.push_back([](auto Path) {
+ llvm::Regex Pattern("sub/.*");
+ return Pattern.match(Path);
+ });
+ std::string BarHeader = testPath("sub/bar.h");
+ EXPECT_EQ(calculate(BarHeader), "<bar.h>");
+}
+
TEST_F(HeadersTest, ShortenedIncludeNotInSearchPath) {
std::string BarHeader =
llvm::sys::path::convert_to_slash(testPath("sub-2/bar.h"));
diff --git a/clang-tools-extra/clangd/unittests/HoverTests.cpp b/clang-tools-extra/clangd/unittests/HoverTests.cpp
index 12d260d..6407349 100644
--- a/clang-tools-extra/clangd/unittests/HoverTests.cpp
+++ b/clang-tools-extra/clangd/unittests/HoverTests.cpp
@@ -2894,7 +2894,7 @@ TEST(Hover, All) {
)cpp",
[](HoverInfo &HI) {
HI.Name = "this";
- HI.Definition = "const Foo<T> *";
+ HI.Definition = "const ns::Foo<T> *";
}},
{
R"cpp(// this expr for specialization class
@@ -2910,7 +2910,7 @@ TEST(Hover, All) {
)cpp",
[](HoverInfo &HI) {
HI.Name = "this";
- HI.Definition = "Foo<int> *";
+ HI.Definition = "ns::Foo<int> *";
}},
{
R"cpp(// this expr for partial specialization struct
@@ -2926,7 +2926,7 @@ TEST(Hover, All) {
)cpp",
[](HoverInfo &HI) {
HI.Name = "this";
- HI.Definition = "const Foo<int, F> *";
+ HI.Definition = "const ns::Foo<int, F> *";
}},
{
R"cpp(
@@ -3046,8 +3046,8 @@ TEST(Hover, All) {
HI.Kind = index::SymbolKind::Function;
HI.NamespaceScope = "";
HI.Definition = "MyRect foobar()";
- HI.Type = {"MyRect ()", "MyRect ()"};
- HI.ReturnType = {"MyRect", "MyRect"};
+ HI.Type = {"MyRect ()", "struct MyRect ()"};
+ HI.ReturnType = {"MyRect", "struct MyRect"};
HI.Parameters.emplace();
}},
{R"cpp(
@@ -3762,6 +3762,127 @@ provides Foo, Bar, Baz, Foobar, Qux and 1 more)"}};
}
}
+TEST(Hover, PresentDocumentation) {
+ struct {
+ const std::function<void(HoverInfo &)> Builder;
+ llvm::StringRef ExpectedRender;
+ } Cases[] = {
+ {[](HoverInfo &HI) {
+ HI.Kind = index::SymbolKind::Function;
+ HI.Documentation = "@brief brief doc\n\n"
+ "longer doc";
+ HI.Definition = "void foo()";
+ HI.Name = "foo";
+ },
+ R"(### function `foo`
+
+---
+**@brief** brief doc
+
+longer doc
+
+---
+```cpp
+void foo()
+```)"},
+ {[](HoverInfo &HI) {
+ HI.Kind = index::SymbolKind::Function;
+ HI.Documentation = "@brief brief doc\n\n"
+ "longer doc";
+ HI.Definition = "int foo()";
+ HI.ReturnType = "int";
+ HI.Name = "foo";
+ },
+ R"(### function `foo`
+
+---
+→ `int`
+
+**@brief** brief doc
+
+longer doc
+
+---
+```cpp
+int foo()
+```)"},
+ {[](HoverInfo &HI) {
+ HI.Kind = index::SymbolKind::Function;
+ HI.Documentation = "@brief brief doc\n\n"
+ "longer doc\n@param a this is a param\n@return it "
+ "returns something";
+ HI.Definition = "int foo(int a)";
+ HI.ReturnType = "int";
+ HI.Name = "foo";
+ HI.Parameters.emplace();
+ HI.Parameters->emplace_back();
+ HI.Parameters->back().Type = "int";
+ HI.Parameters->back().Name = "a";
+ },
+ R"(### function `foo`
+
+---
+→ `int`
+
+Parameters:
+
+- `int a` - this is a param
+
+**@brief** brief doc
+
+longer doc
+
+**@return** it returns something
+
+---
+```cpp
+int foo(int a)
+```)"},
+ {[](HoverInfo &HI) {
+ HI.Kind = index::SymbolKind::Function;
+ HI.Documentation = "@brief brief doc\n\n"
+ "longer doc\n@param a this is a param\n@param b "
+ "does not exist\n@return it returns something";
+ HI.Definition = "int foo(int a)";
+ HI.ReturnType = "int";
+ HI.Name = "foo";
+ HI.Parameters.emplace();
+ HI.Parameters->emplace_back();
+ HI.Parameters->back().Type = "int";
+ HI.Parameters->back().Name = "a";
+ },
+ R"(### function `foo`
+
+---
+→ `int`
+
+Parameters:
+
+- `int a` - this is a param
+
+**@brief** brief doc
+
+longer doc
+
+**@return** it returns something
+
+---
+```cpp
+int foo(int a)
+```)"},
+ };
+
+ for (const auto &C : Cases) {
+ HoverInfo HI;
+ C.Builder(HI);
+ Config Cfg;
+ Cfg.Hover.ShowAKA = true;
+ Cfg.Documentation.CommentFormat = Config::CommentFormatPolicy::Doxygen;
+ WithContextValue WithCfg(Config::Key, std::move(Cfg));
+ EXPECT_EQ(HI.present(MarkupKind::Markdown), C.ExpectedRender);
+ }
+}
+
TEST(Hover, ParseDocumentation) {
struct Case {
llvm::StringRef Documentation;
@@ -4339,6 +4460,149 @@ constexpr u64 pow_with_mod(u64 a, u64 b, u64 p) {
EXPECT_TRUE(H->Value);
EXPECT_TRUE(H->Type);
}
+
+TEST(Hover, FunctionParameters) {
+ struct {
+ const char *const Code;
+ const std::function<void(HoverInfo &)> ExpectedBuilder;
+ std::string ExpectedRender;
+ } Cases[] = {
+ {R"cpp(/// Function doc
+ void foo(int [[^a]]);
+ )cpp",
+ [](HoverInfo &HI) {
+ HI.Name = "a";
+ HI.Kind = index::SymbolKind::Parameter;
+ HI.NamespaceScope = "";
+ HI.LocalScope = "foo::";
+ HI.Type = "int";
+ HI.Definition = "int a";
+ HI.Documentation = "";
+ },
+ "### param `a`\n\n---\nType: `int`\n\n---\n```cpp\n// In foo\nint "
+ "a\n```"},
+ {R"cpp(/// Function doc
+ /// @param a this is doc for a
+ void foo(int [[^a]]);
+ )cpp",
+ [](HoverInfo &HI) {
+ HI.Name = "a";
+ HI.Kind = index::SymbolKind::Parameter;
+ HI.NamespaceScope = "";
+ HI.LocalScope = "foo::";
+ HI.Type = "int";
+ HI.Definition = "int a";
+ HI.Documentation = "this is doc for a";
+ },
+ "### param `a`\n\n---\nType: `int`\n\nthis is doc for "
+ "a\n\n---\n```cpp\n// In foo\nint a\n```"},
+ {R"cpp(/// Function doc
+ /// @param b this is doc for b
+ void foo(int [[^a]], int b);
+ )cpp",
+ [](HoverInfo &HI) {
+ HI.Name = "a";
+ HI.Kind = index::SymbolKind::Parameter;
+ HI.NamespaceScope = "";
+ HI.LocalScope = "foo::";
+ HI.Type = "int";
+ HI.Definition = "int a";
+ HI.Documentation = "";
+ },
+ "### param `a`\n\n---\nType: `int`\n\n---\n```cpp\n// In foo\nint "
+ "a\n```"},
+ {R"cpp(/// Function doc
+ /// @param b this is doc for \p b
+ void foo(int a, int [[^b]]);
+ )cpp",
+ [](HoverInfo &HI) {
+ HI.Name = "b";
+ HI.Kind = index::SymbolKind::Parameter;
+ HI.NamespaceScope = "";
+ HI.LocalScope = "foo::";
+ HI.Type = "int";
+ HI.Definition = "int b";
+ HI.Documentation = "this is doc for \\p b";
+ },
+ "### param `b`\n\n---\nType: `int`\n\nthis is doc for "
+ "`b`\n\n---\n```cpp\n// In foo\nint b\n```"},
+ {R"cpp(/// Function doc
+ /// @param b this is doc for \p b
+ template <typename T>
+ void foo(T a, T [[^b]]);
+ )cpp",
+ [](HoverInfo &HI) {
+ HI.Name = "b";
+ HI.Kind = index::SymbolKind::Parameter;
+ HI.NamespaceScope = "";
+ HI.LocalScope = "foo::";
+ HI.Type = "T";
+ HI.Definition = "T b";
+ HI.Documentation = "this is doc for \\p b";
+ },
+ "### param `b`\n\n---\nType: `T`\n\nthis is doc for "
+ "`b`\n\n---\n```cpp\n// In foo\nT b\n```"},
+ {R"cpp(/// Function doc
+ /// @param b this is <b>doc</b> <html-tag attribute/> <another-html-tag attribute="value">for</another-html-tag> \p b
+ void foo(int a, int [[^b]]);
+ )cpp",
+ [](HoverInfo &HI) {
+ HI.Name = "b";
+ HI.Kind = index::SymbolKind::Parameter;
+ HI.NamespaceScope = "";
+ HI.LocalScope = "foo::";
+ HI.Type = "int";
+ HI.Definition = "int b";
+ HI.Documentation =
+ "this is <b>doc</b> <html-tag attribute/> <another-html-tag "
+ "attribute=\"value\">for</another-html-tag> \\p b";
+ },
+ "### param `b`\n\n---\nType: `int`\n\nthis is \\<b>doc\\</b> "
+ "\\<html-tag attribute/> \\<another-html-tag "
+ "attribute=\"value\">for\\</another-html-tag> "
+ "`b`\n\n---\n```cpp\n// In foo\nint b\n```"},
+ };
+
+ // Create a tiny index, so tests above can verify documentation is fetched.
+ Symbol IndexSym = func("indexSymbol");
+ IndexSym.Documentation = "comment from index";
+ SymbolSlab::Builder Symbols;
+ Symbols.insert(IndexSym);
+ auto Index =
+ MemIndex::build(std::move(Symbols).build(), RefSlab(), RelationSlab());
+
+ for (const auto &Case : Cases) {
+ SCOPED_TRACE(Case.Code);
+
+ Annotations T(Case.Code);
+ TestTU TU = TestTU::withCode(T.code());
+ auto AST = TU.build();
+ Config Cfg;
+ Cfg.Hover.ShowAKA = true;
+ Cfg.Documentation.CommentFormat = Config::CommentFormatPolicy::Doxygen;
+ WithContextValue WithCfg(Config::Key, std::move(Cfg));
+ auto H = getHover(AST, T.point(), format::getLLVMStyle(), Index.get());
+ ASSERT_TRUE(H);
+ HoverInfo Expected;
+ Expected.SymRange = T.range();
+ Case.ExpectedBuilder(Expected);
+
+ EXPECT_EQ(H->present(MarkupKind::Markdown), Case.ExpectedRender);
+ EXPECT_EQ(H->NamespaceScope, Expected.NamespaceScope);
+ EXPECT_EQ(H->LocalScope, Expected.LocalScope);
+ EXPECT_EQ(H->Name, Expected.Name);
+ EXPECT_EQ(H->Kind, Expected.Kind);
+ EXPECT_EQ(H->Documentation, Expected.Documentation);
+ EXPECT_EQ(H->Definition, Expected.Definition);
+ EXPECT_EQ(H->Type, Expected.Type);
+ EXPECT_EQ(H->ReturnType, Expected.ReturnType);
+ EXPECT_EQ(H->Parameters, Expected.Parameters);
+ EXPECT_EQ(H->TemplateParameters, Expected.TemplateParameters);
+ EXPECT_EQ(H->SymRange, Expected.SymRange);
+ EXPECT_EQ(H->Value, Expected.Value);
+ }
+}
+
} // namespace
} // namespace clangd
} // namespace clang
diff --git a/clang-tools-extra/clangd/unittests/InlayHintTests.cpp b/clang-tools-extra/clangd/unittests/InlayHintTests.cpp
index e0cd955..99e728c 100644
--- a/clang-tools-extra/clangd/unittests/InlayHintTests.cpp
+++ b/clang-tools-extra/clangd/unittests/InlayHintTests.cpp
@@ -1295,14 +1295,7 @@ TEST(TypeHints, NoQualifiers) {
}
}
)cpp",
- ExpectedHint{": S1", "x"},
- // FIXME: We want to suppress scope specifiers
- // here because we are into the whole
- // brevity thing, but the ElaboratedType
- // printer does not honor the SuppressScope
- // flag by design, so we need to extend the
- // PrintingPolicy to support this use case.
- ExpectedHint{": S2::Inner<int>", "y"});
+ ExpectedHint{": S1", "x"}, ExpectedHint{": Inner<int>", "y"});
}
TEST(TypeHints, Lambda) {
diff --git a/clang-tools-extra/clangd/unittests/QualityTests.cpp b/clang-tools-extra/clangd/unittests/QualityTests.cpp
index 619ea32..4954659 100644
--- a/clang-tools-extra/clangd/unittests/QualityTests.cpp
+++ b/clang-tools-extra/clangd/unittests/QualityTests.cpp
@@ -121,7 +121,9 @@ TEST(QualityTests, SymbolRelevanceSignalExtraction) {
SymbolRelevanceSignals Relevance;
Relevance.merge(CodeCompletionResult(&findDecl(AST, "deprecated"),
- /*Priority=*/42, nullptr, false,
+ /*Priority=*/42,
+ /*Qualifier=*/std::nullopt,
+ /*QualifierIsInformative=*/false,
/*Accessible=*/false));
EXPECT_EQ(Relevance.NameMatch, SymbolRelevanceSignals().NameMatch);
EXPECT_TRUE(Relevance.Forbidden);
@@ -487,13 +489,15 @@ TEST(QualityTests, ItemWithFixItsRankedDown) {
auto AST = Header.build();
SymbolRelevanceSignals RelevanceWithFixIt;
- RelevanceWithFixIt.merge(CodeCompletionResult(&findDecl(AST, "x"), 0, nullptr,
- false, true, {FixItHint{}}));
+ RelevanceWithFixIt.merge(CodeCompletionResult(
+ &findDecl(AST, "x"), /*Priority=*/0, /*Qualifier=*/std::nullopt,
+ /*QualifierIsInformative=*/false, /*Accessible=*/true, {FixItHint{}}));
EXPECT_TRUE(RelevanceWithFixIt.NeedsFixIts);
SymbolRelevanceSignals RelevanceWithoutFixIt;
- RelevanceWithoutFixIt.merge(
- CodeCompletionResult(&findDecl(AST, "x"), 0, nullptr, false, true, {}));
+ RelevanceWithoutFixIt.merge(CodeCompletionResult(
+ &findDecl(AST, "x"), /*Priority=*/0, /*Qualifier=*/std::nullopt,
+ /*QualifierIsInformative=*/false, /*Accessible=*/true, {}));
EXPECT_FALSE(RelevanceWithoutFixIt.NeedsFixIts);
EXPECT_LT(RelevanceWithFixIt.evaluateHeuristics(),
diff --git a/clang-tools-extra/clangd/unittests/SelectionTests.cpp b/clang-tools-extra/clangd/unittests/SelectionTests.cpp
index aaaf758..3df19d8 100644
--- a/clang-tools-extra/clangd/unittests/SelectionTests.cpp
+++ b/clang-tools-extra/clangd/unittests/SelectionTests.cpp
@@ -104,9 +104,9 @@ TEST(SelectionTest, CommonAncestor) {
{
R"cpp(
template <typename T>
- int x = [[T::^U::]]ccc();
+ int x = T::[[^U]]::ccc();
)cpp",
- "NestedNameSpecifierLoc",
+ "DependentNameTypeLoc",
},
{
R"cpp(
diff --git a/clang-tools-extra/clangd/unittests/SymbolDocumentationTests.cpp b/clang-tools-extra/clangd/unittests/SymbolDocumentationTests.cpp
new file mode 100644
index 0000000..69eb13b
--- /dev/null
+++ b/clang-tools-extra/clangd/unittests/SymbolDocumentationTests.cpp
@@ -0,0 +1,161 @@
+//===-- SymbolDocumentationTests.cpp --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "SymbolDocumentation.h"
+
+#include "support/Markup.h"
+#include "clang/Basic/CommentOptions.h"
+#include "llvm/ADT/StringRef.h"
+#include "gtest/gtest.h"
+
+namespace clang {
+namespace clangd {
+
+TEST(SymbolDocumentation, Parse) {
+
+ CommentOptions CommentOpts;
+
+ struct Case {
+ llvm::StringRef Documentation;
+ llvm::StringRef ExpectedRenderEscapedMarkdown;
+ llvm::StringRef ExpectedRenderMarkdown;
+ llvm::StringRef ExpectedRenderPlainText;
+ } Cases[] = {
+ {
+ "foo bar",
+ "foo bar",
+ "foo bar",
+ "foo bar",
+ },
+ {
+ "foo\nbar\n",
+ "foo\nbar",
+ "foo\nbar",
+ "foo bar",
+ },
+ {
+ "foo\n\nbar\n",
+ "foo\n\nbar",
+ "foo\n\nbar",
+ "foo\n\nbar",
+ },
+ {
+ "foo \\p bar baz",
+ "foo `bar` baz",
+ "foo `bar` baz",
+ "foo bar baz",
+ },
+ {
+ "foo \\e bar baz",
+ "foo \\*bar\\* baz",
+ "foo *bar* baz",
+ "foo *bar* baz",
+ },
+ {
+ "foo \\b bar baz",
+ "foo \\*\\*bar\\*\\* baz",
+ "foo **bar** baz",
+ "foo **bar** baz",
+ },
+ {
+ "foo \\ref bar baz",
+ "foo \\*\\*\\\\ref\\*\\* \\*bar\\* baz",
+ "foo **\\ref** *bar* baz",
+ "foo **\\ref** *bar* baz",
+ },
+ {
+ "foo @ref bar baz",
+ "foo \\*\\*@ref\\*\\* \\*bar\\* baz",
+ "foo **@ref** *bar* baz",
+ "foo **@ref** *bar* baz",
+ },
+ {
+ "\\brief this is a \\n\nbrief description",
+ "\\*\\*\\\\brief\\*\\* this is a \nbrief description",
+ "**\\brief** this is a \nbrief description",
+ "**\\brief** this is a\nbrief description",
+ },
+ {
+ "\\throw exception foo",
+ "\\*\\*\\\\throw\\*\\* \\*exception\\* foo",
+ "**\\throw** *exception* foo",
+ "**\\throw** *exception* foo",
+ },
+ {
+ "\\brief this is a brief description\n\n\\li item 1\n\\li item "
+ "2\n\\arg item 3",
+ "\\*\\*\\\\brief\\*\\* this is a brief description\n\n- item 1\n\n- "
+ "item "
+ "2\n\n- "
+ "item 3",
+ "**\\brief** this is a brief description\n\n- item 1\n\n- item "
+ "2\n\n- "
+ "item 3",
+ "**\\brief** this is a brief description\n\n- item 1\n\n- item "
+ "2\n\n- "
+ "item 3",
+ },
+ {
+ "\\defgroup mygroup this is a group\nthis is not a group description",
+ "\\*\\*@defgroup\\*\\* `mygroup this is a group`\n\nthis is not a "
+ "group "
+ "description",
+ "**@defgroup** `mygroup this is a group`\n\nthis is not a group "
+ "description",
+ "**@defgroup** `mygroup this is a group`\n\nthis is not a group "
+ "description",
+ },
+ {
+ "\\verbatim\nthis is a\nverbatim block containing\nsome verbatim "
+ "text\n\\endverbatim",
+ "\\*\\*@verbatim\\*\\*\n\n```\nthis is a\nverbatim block "
+ "containing\nsome "
+ "verbatim text\n```\n\n\\*\\*@endverbatim\\*\\*",
+ "**@verbatim**\n\n```\nthis is a\nverbatim block containing\nsome "
+ "verbatim text\n```\n\n**@endverbatim**",
+ "**@verbatim**\n\nthis is a\nverbatim block containing\nsome "
+ "verbatim text\n\n**@endverbatim**",
+ },
+ {
+ "@param foo this is a parameter\n@param bar this is another "
+ "parameter",
+ "",
+ "",
+ "",
+ },
+ {
+ "@brief brief docs\n\n@param foo this is a parameter\n\nMore "
+ "description\ndocumentation",
+ "\\*\\*@brief\\*\\* brief docs\n\nMore description\ndocumentation",
+ "**@brief** brief docs\n\nMore description\ndocumentation",
+ "**@brief** brief docs\n\nMore description documentation",
+ },
+ {
+ "<b>this is a bold text</b>\nnormal text\n<i>this is an italic "
+ "text</i>\n<code>this is a code block</code>",
+ "\\<b>this is a bold text\\</b>\nnormal text\n\\<i>this is an italic "
+ "text\\</i>\n\\<code>this is a code block\\</code>",
+ "\\<b>this is a bold text\\</b>\nnormal text\n\\<i>this is an italic "
+ "text\\</i>\n\\<code>this is a code block\\</code>",
+ "<b>this is a bold text</b> normal text <i>this is an italic "
+ "text</i> <code>this is a code block</code>",
+ },
+ };
+ for (const auto &C : Cases) {
+ markup::Document Doc;
+ SymbolDocCommentVisitor SymbolDoc(C.Documentation, CommentOpts);
+
+ SymbolDoc.docToMarkup(Doc);
+
+ EXPECT_EQ(Doc.asPlainText(), C.ExpectedRenderPlainText);
+ EXPECT_EQ(Doc.asMarkdown(), C.ExpectedRenderMarkdown);
+ EXPECT_EQ(Doc.asEscapedMarkdown(), C.ExpectedRenderEscapedMarkdown);
+ }
+}
+
+} // namespace clangd
+} // namespace clang
diff --git a/clang-tools-extra/clangd/unittests/support/MarkupTests.cpp b/clang-tools-extra/clangd/unittests/support/MarkupTests.cpp
index 482f230..5f91f31 100644
--- a/clang-tools-extra/clangd/unittests/support/MarkupTests.cpp
+++ b/clang-tools-extra/clangd/unittests/support/MarkupTests.cpp
@@ -463,6 +463,7 @@ TEST(Document, Separators) {
```cpp
test
```
+
bar)md";
EXPECT_EQ(D.asEscapedMarkdown(), ExpectedMarkdown);
EXPECT_EQ(D.asMarkdown(), ExpectedMarkdown);
@@ -559,6 +560,7 @@ foo
bar
baz
```
+
```cpp
foo
```)md";
@@ -571,6 +573,12 @@ foo
foo)pt";
EXPECT_EQ(D.asPlainText(), ExpectedPlainText);
+
+ Document D2;
+ D2.addCodeBlock("");
+ EXPECT_EQ(D2.asEscapedMarkdown(), "```cpp\n```");
+ EXPECT_EQ(D2.asMarkdown(), "```cpp\n```");
+ EXPECT_EQ(D2.asPlainText(), "");
}
TEST(BulletList, Render) {
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index 2dc5c73..187aae2 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -134,6 +134,10 @@ Changes in existing checks
<clang-tidy/checks/bugprone/infinite-loop>` check by adding detection for
variables introduced by structured bindings.
+- Improved :doc:`bugprone-narrowing-conversions
+ <clang-tidy/checks/bugprone/narrowing-conversions>` check by fixing
+ false positive from analysis of a conditional expression in C.
+
- Improved :doc:`bugprone-reserved-identifier
<clang-tidy/checks/bugprone/reserved-identifier>` check by ignoring
declarations in system headers.
@@ -182,6 +186,10 @@ Changes in existing checks
<clang-tidy/checks/portability/template-virtual-member-function>` check to
avoid false positives on pure virtual member functions.
+- Improved :doc:`readability-container-size-empty
+ <clang-tidy/checks/readability/container-size-empty>` check by correctly
+ generating fix-it hints when size method is called from implicit ``this``.
+
- Improved :doc:`readability-identifier-naming
<clang-tidy/checks/readability/identifier-naming>` check by ignoring
declarations in system headers.
diff --git a/clang-tools-extra/include-cleaner/include/clang-include-cleaner/Types.h b/clang-tools-extra/include-cleaner/include/clang-include-cleaner/Types.h
index 2888e25..057b92c 100644
--- a/clang-tools-extra/include-cleaner/include/clang-include-cleaner/Types.h
+++ b/clang-tools-extra/include-cleaner/include/clang-include-cleaner/Types.h
@@ -136,7 +136,7 @@ struct Header {
}
StringRef verbatim() const { return std::get<Verbatim>(Storage); }
- /// For phiscal files, either absolute path or path relative to the execution
+ /// For physical files, either absolute path or path relative to the execution
/// root. Otherwise just the spelling without surrounding quotes/brackets.
llvm::StringRef resolvedPath() const;
diff --git a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
index 49cc136..0cbf9a0 100644
--- a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
+++ b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
@@ -60,14 +60,10 @@ class ASTWalker : public RecursiveASTVisitor<ASTWalker> {
NamedDecl *getMemberProvider(QualType Base) {
if (Base->isPointerType())
return getMemberProvider(Base->getPointeeType());
- // Unwrap the sugar ElaboratedType.
- if (const auto *ElTy = dyn_cast<ElaboratedType>(Base))
- return getMemberProvider(ElTy->getNamedType());
-
if (const auto *TT = dyn_cast<TypedefType>(Base))
return TT->getDecl();
if (const auto *UT = dyn_cast<UsingType>(Base))
- return UT->getFoundDecl();
+ return UT->getDecl();
// A heuristic: to resolve a template type to **only** its template name.
// We're only using this method for the base type of MemberExpr, in general
// the template provides the member, and the critical case `unique_ptr<Foo>`
@@ -135,16 +131,14 @@ public:
}
bool qualifierIsNamespaceOrNone(DeclRefExpr *DRE) {
- const auto *Qual = DRE->getQualifier();
- if (!Qual)
- return true;
- switch (Qual->getKind()) {
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
+ NestedNameSpecifier Qual = DRE->getQualifier();
+ switch (Qual.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::Global:
return true;
- case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::Super:
- case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Kind::Type:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return false;
}
llvm_unreachable("Unknown value for NestedNameSpecifierKind");
@@ -341,17 +335,17 @@ public:
}
bool VisitUsingTypeLoc(UsingTypeLoc TL) {
- reportType(TL.getNameLoc(), TL.getFoundDecl());
+ reportType(TL.getNameLoc(), TL.getDecl());
return true;
}
bool VisitTagTypeLoc(TagTypeLoc TTL) {
- reportType(TTL.getNameLoc(), TTL.getDecl());
+ reportType(TTL.getNameLoc(), TTL.getOriginalDecl());
return true;
}
bool VisitTypedefTypeLoc(TypedefTypeLoc TTL) {
- reportType(TTL.getNameLoc(), TTL.getTypedefNameDecl());
+ reportType(TTL.getNameLoc(), TTL.getDecl());
return true;
}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/copy-constructor-init.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/copy-constructor-init.cpp
index b42d3fc..35314d1 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/copy-constructor-init.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/copy-constructor-init.cpp
@@ -165,7 +165,6 @@ FROMMACRO
class X15 : public CopyableAlias2 {
X15(const X15 &other) {}
// CHECK-MESSAGES: :[[@LINE-1]]:3: warning: calling a base constructor
- // CHECK-FIXES: X15(const X15 &other) : Copyable5(other) {}
};
class X16 : public NonCopyable {
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.c b/clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.c
new file mode 100644
index 0000000..ce26b78
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.c
@@ -0,0 +1,22 @@
+// RUN: %check_clang_tidy %s bugprone-narrowing-conversions %t -- \
+// RUN: -- -target x86_64-unknown-linux
+
+char test_char(int cond, char c) {
+ char ret = cond > 0 ? ':' : c;
+ return ret;
+}
+
+short test_short(int cond, short s) {
+ short ret = cond > 0 ? ':' : s;
+ return ret;
+}
+
+int test_int(int cond, int i) {
+ int ret = cond > 0 ? ':' : i;
+ return ret;
+}
+
+void test(int cond, int i) {
+ char x = cond > 0 ? ':' : i;
+ // CHECK-MESSAGES: :[[@LINE-1]]:29: warning: narrowing conversion from 'int' to signed type 'char' is implementation-defined [bugprone-narrowing-conversions]
+}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.cpp
new file mode 100644
index 0000000..ce26b78
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/narrowing-conversions-conditional-expressions.cpp
@@ -0,0 +1,22 @@
+// RUN: %check_clang_tidy %s bugprone-narrowing-conversions %t -- \
+// RUN: -- -target x86_64-unknown-linux
+
+char test_char(int cond, char c) {
+ char ret = cond > 0 ? ':' : c;
+ return ret;
+}
+
+short test_short(int cond, short s) {
+ short ret = cond > 0 ? ':' : s;
+ return ret;
+}
+
+int test_int(int cond, int i) {
+ int ret = cond > 0 ? ':' : i;
+ return ret;
+}
+
+void test(int cond, int i) {
+ char x = cond > 0 ? ':' : i;
+ // CHECK-MESSAGES: :[[@LINE-1]]:29: warning: narrowing conversion from 'int' to signed type 'char' is implementation-defined [bugprone-narrowing-conversions]
+}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp
index 721c55b..5dd2387 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/unused-local-non-trivial-variable.cpp
@@ -69,14 +69,14 @@ template<typename T>
T qux(T Generic) {
async::Future<Units> PendingA = acquireUnits();
auto PendingB = acquireUnits();
- // CHECK-MESSAGES: :[[@LINE-1]]:10: warning: unused local variable 'PendingB' of type 'a::Future<Units>' (aka 'Future<Units>') [bugprone-unused-local-non-trivial-variable]
+ // CHECK-MESSAGES: :[[@LINE-1]]:10: warning: unused local variable 'PendingB' of type 'a::Future<Units>' (aka 'async::Future<Units>') [bugprone-unused-local-non-trivial-variable]
async::Future<Units> MustBeUsed;
// CHECK-MESSAGES: :[[@LINE-1]]:26: warning: unused local variable 'MustBeUsed' of type 'async::Future<Units>' [bugprone-unused-local-non-trivial-variable]
PendingA.get();
async::Future<T> TemplateType;
// CHECK-MESSAGES: :[[@LINE-1]]:22: warning: unused local variable 'TemplateType' of type 'async::Future<T>' [bugprone-unused-local-non-trivial-variable]
a::Future<T> AliasTemplateType;
- // CHECK-MESSAGES: :[[@LINE-1]]:18: warning: unused local variable 'AliasTemplateType' of type 'a::Future<T>' (aka 'Future<T>') [bugprone-unused-local-non-trivial-variable]
+ // CHECK-MESSAGES: :[[@LINE-1]]:18: warning: unused local variable 'AliasTemplateType' of type 'a::Future<T>' (aka 'async::Future<T>') [bugprone-unused-local-non-trivial-variable]
[[maybe_unused]] async::Future<Units> MaybeUnused;
return Generic;
}
@@ -86,7 +86,7 @@ async::Future<int> Global;
int bar(int Num) {
a::Future<Units> PendingA = acquireUnits();
a::Future<Units> PendingB = acquireUnits(); // not used at all, unused variable not fired because of destructor side effect
- // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: unused local variable 'PendingB' of type 'a::Future<Units>' (aka 'Future<Units>') [bugprone-unused-local-non-trivial-variable]
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: unused local variable 'PendingB' of type 'a::Future<Units>' (aka 'async::Future<Units>') [bugprone-unused-local-non-trivial-variable]
auto Num2 = PendingA.get();
auto Num3 = qux(Num);
async::Ptr<a::Future<Units>> Shared = async::Ptr<a::Future<Units>>(acquireUnits());
diff --git a/clang-tools-extra/test/clang-tidy/checkers/portability/std-allocator-const.cpp b/clang-tools-extra/test/clang-tidy/checkers/portability/std-allocator-const.cpp
index 732cf5d..a38594a 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/portability/std-allocator-const.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/portability/std-allocator-const.cpp
@@ -43,25 +43,25 @@ template <class T>
class allocator {};
void simple(const std::vector<const char> &v, std::deque<const short> *d) {
- // CHECK-MESSAGES: [[#@LINE-1]]:24: warning: container using std::allocator<const T> is a deprecated libc++ extension; remove const for compatibility with other standard libraries
- // CHECK-MESSAGES: [[#@LINE-2]]:52: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:19: warning: container using std::allocator<const T> is a deprecated libc++ extension; remove const for compatibility with other standard libraries
+ // CHECK-MESSAGES: [[#@LINE-2]]:47: warning: container
std::list<const long> l;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
std::multiset<int *const> ms;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
std::set<const std::hash<int>> s;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
std::unordered_multiset<int *const> ums;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
std::unordered_set<const int> us;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
absl::flat_hash_set<const int> fhs;
- // CHECK-MESSAGES: [[#@LINE-1]]:9: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
using my_vector = std::vector<const int>;
- // CHECK-MESSAGES: [[#@LINE-1]]:26: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:21: warning: container
my_vector v1;
using my_vector2 = my_vector;
@@ -76,7 +76,7 @@ void simple(const std::vector<const char> &v, std::deque<const short> *d) {
template <class T>
void temp1() {
std::vector<const T> v;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
std::vector<T> neg1;
std::forward_list<const T> neg2;
@@ -87,7 +87,7 @@ template <class T>
void temp2() {
// Match std::vector<const dependent> for the uninstantiated temp2.
std::vector<const T> v;
- // CHECK-MESSAGES: [[#@LINE-1]]:8: warning: container
+ // CHECK-MESSAGES: [[#@LINE-1]]:3: warning: container
std::vector<T> neg1;
std::forward_list<const T> neg2;
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/container-size-empty.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/container-size-empty.cpp
index 2fd0b222..b1e6867 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/readability/container-size-empty.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/container-size-empty.cpp
@@ -895,3 +895,16 @@ namespace PR94454 {
int operator""_ci() { return 0; }
auto eq = 0_ci == 0;
}
+
+namespace GH152387 {
+
+class foo : public std::string{
+ void doit() {
+ if (!size()) {
+ // CHECK-MESSAGES: :[[@LINE-1]]:10: warning: the 'empty' method should be used to check for emptiness instead of 'size'
+ // CHECK-FIXES: if (empty()) {
+ }
+ }
+};
+
+}
diff --git a/clang/bindings/python/tests/cindex/test_type.py b/clang/bindings/python/tests/cindex/test_type.py
index 34081bb..cc101be 100644
--- a/clang/bindings/python/tests/cindex/test_type.py
+++ b/clang/bindings/python/tests/cindex/test_type.py
@@ -63,7 +63,7 @@ class TestType(unittest.TestCase):
self.assertIsNotNone(fields[1].translation_unit)
self.assertEqual(fields[1].spelling, "b")
self.assertFalse(fields[1].type.is_const_qualified())
- self.assertEqual(fields[1].type.kind, TypeKind.ELABORATED)
+ self.assertEqual(fields[1].type.kind, TypeKind.TYPEDEF)
self.assertEqual(fields[1].type.get_canonical().kind, TypeKind.INT)
self.assertEqual(fields[1].type.get_declaration().spelling, "I")
self.assertEqual(fields[1].type.get_typedef_name(), "I")
diff --git a/clang/docs/ClangOffloadBundler.rst b/clang/docs/ClangOffloadBundler.rst
index 62cf164..5570dbb 100644
--- a/clang/docs/ClangOffloadBundler.rst
+++ b/clang/docs/ClangOffloadBundler.rst
@@ -525,15 +525,15 @@ The compressed offload bundle begins with a header followed by the compressed bi
This is a unique identifier to distinguish compressed offload bundles. The value is the string 'CCOB' (Compressed Clang Offload Bundle).
- **Version Number (16-bit unsigned int)**:
- This denotes the version of the compressed offload bundle format. The current version is `2`.
+ This denotes the version of the compressed offload bundle format. The current version is `3`.
- **Compression Method (16-bit unsigned int)**:
This field indicates the compression method used. The value corresponds to either `zlib` or `zstd`, represented as a 16-bit unsigned integer cast from the LLVM compression enumeration.
-- **Total File Size (32-bit unsigned int)**:
+- **Total File Size (unsigned int, 32-bit in v2, 64-bit in v3)**:
This is the total size (in bytes) of the file, including the header. Available in version 2 and above.
-- **Uncompressed Binary Size (32-bit unsigned int)**:
+- **Uncompressed Binary Size (unsigned int, 32-bit in v2, 64-bit in v3)**:
This is the size (in bytes) of the binary data before it was compressed.
- **Hash (64-bit unsigned int)**:
@@ -542,4 +542,4 @@ The compressed offload bundle begins with a header followed by the compressed bi
- **Compressed Data**:
The actual compressed binary data follows the header. Its size can be inferred from the total size of the file minus the header size.
- > **Note**: Version 3 of the format is under development. It uses 64-bit fields for Total File Size and Uncompressed Binary Size to support files larger than 4GB. To experiment with version 3, set the environment variable `COMPRESSED_BUNDLE_FORMAT_VERSION=3`. This support is experimental and not recommended for production use.
+ > **Note**: Version 3 is now the default format. For backward compatibility with older HIP runtimes that support version 2 only, set the environment variable `COMPRESSED_BUNDLE_FORMAT_VERSION=2`.
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 58cd10a..670eb82 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -256,7 +256,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | device-specific environment variables | :none:`unclaimed` | |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | omp_target_is_accessible routine | :none:`unclaimed` | |
+| device | omp_target_is_accessible routine | :part:`In Progress` | https://github.com/llvm/llvm-project/pull/138294 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | omp_get_mapped_ptr routine | :good:`done` | D141545 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
@@ -449,7 +449,7 @@ implementation.
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| Clarifications to Fortran map semantics | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| default clause at target construct | :part:`In Progress` | :none:`unclaimed` | |
+| default clause at target construct | :part:`In Progress` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| ref count update use_device_{ptr, addr} | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
@@ -476,6 +476,8 @@ implementation.
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| Local clause on declare target | :part:`In Progress` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Changes to omp_target_is_accessible | :part:`In Progress` | :part:`In Progress` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
OpenMP Extensions
=================
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 0e9fcaa..a8b7a29 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -70,12 +70,16 @@ ABI Changes in This Version
AST Dumping Potentially Breaking Changes
----------------------------------------
+- How nested name specifiers are dumped and printed changes, keeping track of clang AST changes.
Clang Frontend Potentially Breaking Changes
-------------------------------------------
Clang Python Bindings Potentially Breaking Changes
--------------------------------------------------
+- TypeKind ``ELABORATED`` is not used anymore, per clang AST changes removing
+ ElaboratedTypes. The value becomes unused, and all the existing users should
+ expect the former underlying type to be reported instead.
What's New in Clang |release|?
==============================
@@ -111,13 +115,13 @@ Non-comprehensive list of changes in this release
-------------------------------------------------
- Added ``__builtin_elementwise_minnumnum`` and ``__builtin_elementwise_maxnumnum``.
-- Trapping UBSan (e.g. ``-fsanitize-trap=undefined``) now emits a string describing the reason for
- trapping into the generated debug info. This feature allows debuggers (e.g. LLDB) to display
- the reason for trapping if the trap is reached. The string is currently encoded in the debug
- info as an artificial frame that claims to be inlined at the trap location. The function used
- for the artificial frame is an artificial function whose name encodes the reason for trapping.
- The encoding used is currently the same as ``__builtin_verbose_trap`` but might change in the future.
- This feature is enabled by default but can be disabled by compiling with
+- Trapping UBSan (e.g. ``-fsanitize-trap=undefined``) now emits a string describing the reason for
+ trapping into the generated debug info. This feature allows debuggers (e.g. LLDB) to display
+ the reason for trapping if the trap is reached. The string is currently encoded in the debug
+ info as an artificial frame that claims to be inlined at the trap location. The function used
+ for the artificial frame is an artificial function whose name encodes the reason for trapping.
+ The encoding used is currently the same as ``__builtin_verbose_trap`` but might change in the future.
+ This feature is enabled by default but can be disabled by compiling with
``-fno-sanitize-annotate-debug-info-traps``.
New Compiler Flags
@@ -143,7 +147,7 @@ Improvements to Clang's diagnostics
Moved the warning for a missing (though implied) attribute on a redeclaration into this group.
Added a new warning in this group for the case where the attribute is missing/implicit on
an override of a virtual method.
-- Fixed fix-it hint for fold expressions. Clang now correctly places the suggested right
+- Fixed fix-it hint for fold expressions. Clang now correctly places the suggested right
parenthesis when diagnosing malformed fold expressions. (#GH151787)
Improvements to Clang's time-trace
@@ -184,9 +188,13 @@ Bug Fixes to C++ Support
(``[[assume(expr)]]``) creates temporary objects.
- Fix the dynamic_cast to final class optimization to correctly handle
casts that are guaranteed to fail (#GH137518).
+- Fix bug rejecting partial specialization of variable templates with auto NTTPs (#GH118190).
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
+- Fix incorrect name qualifiers applied to alias CTAD. (#GH136624)
+- Fixed ElaboratedTypes appearing within NestedNameSpecifier, which was not a
+ legal representation. This is fixed because ElaboratedTypes don't exist anymore. (#GH43179) (#GH68670) (#GH92757)
Miscellaneous Bug Fixes
^^^^^^^^^^^^^^^^^^^^^^^
@@ -227,6 +235,9 @@ RISC-V Support
- Add support for `__attribute__((interrupt("rnmi")))` to be used with the `Smrnmi` extension.
With this the `Smrnmi` extension is fully supported.
+- Add `-march=unset` to clear any previous `-march=` value. This ISA string will
+ be computed from `-mcpu` or the platform default.
+
CUDA/HIP Language Changes
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -256,6 +267,8 @@ Fixed Point Support in Clang
AST Matchers
------------
+- Removed elaboratedType matchers, and related nested name specifier changes,
+ following the corresponding changes in the clang AST.
- Ensure ``hasBitWidth`` doesn't crash on bit widths that are dependent on template
parameters.
@@ -280,7 +293,7 @@ New features
Crash and bug fixes
^^^^^^^^^^^^^^^^^^^
-- Fixed a crash in the static analyzer that when the expression in an
+- Fixed a crash in the static analyzer that when the expression in an
``[[assume(expr)]]`` attribute was enclosed in parentheses. (#GH151529)
Improvements
diff --git a/clang/include/clang/AST/ASTConcept.h b/clang/include/clang/AST/ASTConcept.h
index 7ccac44..72da005 100644
--- a/clang/include/clang/AST/ASTConcept.h
+++ b/clang/include/clang/AST/ASTConcept.h
@@ -15,7 +15,7 @@
#define LLVM_CLANG_AST_ASTCONCEPT_H
#include "clang/AST/DeclarationName.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateBase.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/UnsignedOrNone.h"
@@ -177,12 +177,7 @@ public:
SourceLocation getLocation() const { return getConceptNameLoc(); }
- SourceLocation getBeginLoc() const LLVM_READONLY {
- // Note that if the qualifier is null the template KW must also be null.
- if (auto QualifierLoc = getNestedNameSpecifierLoc())
- return QualifierLoc.getBeginLoc();
- return getConceptNameInfo().getBeginLoc();
- }
+ SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY {
return getTemplateArgsAsWritten() &&
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index 2b7ba41..7c2566a 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -233,10 +233,11 @@ class ASTContext : public RefCountedBase<ASTContext> {
mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
TemplateSpecializationTypes;
mutable llvm::FoldingSet<ParenType> ParenTypes{GeneralTypesLog2InitSize};
+ mutable llvm::FoldingSet<TagTypeFoldingSetPlaceholder> TagTypes;
+ mutable llvm::FoldingSet<FoldingSetPlaceholder<UnresolvedUsingType>>
+ UnresolvedUsingTypes;
mutable llvm::FoldingSet<UsingType> UsingTypes;
- mutable llvm::FoldingSet<TypedefType> TypedefTypes;
- mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes{
- GeneralTypesLog2InitSize};
+ mutable llvm::FoldingSet<FoldingSetPlaceholder<TypedefType>> TypedefTypes;
mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
mutable llvm::DenseMap<llvm::FoldingSetNodeID,
DependentTemplateSpecializationType *>
@@ -282,11 +283,11 @@ class ASTContext : public RefCountedBase<ASTContext> {
llvm::to_underlying(PredefinedSugarType::Kind::Last) + 1>
PredefinedSugarTypes{};
- /// The set of nested name specifiers.
+ /// Internal storage for NestedNameSpecifiers.
///
/// This set is managed by the NestedNameSpecifier class.
- mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
- mutable NestedNameSpecifier *GlobalNestedNameSpecifier = nullptr;
+ mutable llvm::FoldingSet<NamespaceAndPrefixStorage>
+ NamespaceAndPrefixStorages;
/// A cache mapping from RecordDecls to ASTRecordLayouts.
///
@@ -1386,8 +1387,6 @@ private:
/// Return a type with extended qualifiers.
QualType getExtQualType(const Type *Base, Qualifiers Quals) const;
- QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const;
-
QualType getPipeType(QualType T, bool ReadOnly) const;
public:
@@ -1630,7 +1629,7 @@ public:
/// Return the uniqued reference to the type for a member pointer to
/// the specified type in the specified nested name.
- QualType getMemberPointerType(QualType T, NestedNameSpecifier *Qualifier,
+ QualType getMemberPointerType(QualType T, NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls) const;
/// Return a non-unique reference to the type for a variable array of
@@ -1767,34 +1766,53 @@ private:
bool IsCanon = false) const;
public:
+ QualType getTypeDeclType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypeDecl *Decl) const;
+
/// Return the unique reference to the type for the specified type
/// declaration.
- QualType getTypeDeclType(const TypeDecl *Decl,
- const TypeDecl *PrevDecl = nullptr) const {
- assert(Decl && "Passed null for Decl param");
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
-
- if (PrevDecl) {
- assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
- Decl->TypeForDecl = PrevDecl->TypeForDecl;
- return QualType(PrevDecl->TypeForDecl, 0);
- }
+ QualType getTypeDeclType(const TypeDecl *Decl) const;
- return getTypeDeclTypeSlow(Decl);
- }
+ /// Use the normal 'getFooBarType' constructors to obtain these types.
+ QualType getTypeDeclType(const TagDecl *) const = delete;
+ QualType getTypeDeclType(const TypedefDecl *) const = delete;
+ QualType getTypeDeclType(const TypeAliasDecl *) const = delete;
+ QualType getTypeDeclType(const UnresolvedUsingTypenameDecl *) const = delete;
+
+ CanQualType getCanonicalTypeDeclType(const TypeDecl *TD) const;
- QualType getUsingType(const UsingShadowDecl *Found,
- QualType Underlying) const;
+ QualType getUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const UsingShadowDecl *D,
+ QualType UnderlyingType = QualType()) const;
/// Return the unique reference to the type for the specified
/// typedef-name decl.
- QualType getTypedefType(const TypedefNameDecl *Decl,
- QualType Underlying = QualType()) const;
+ /// FIXME: TypeMatchesDeclOrNone is a workaround for a serialization issue:
+ /// The decl underlying type might still not be available.
+ QualType getTypedefType(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *Decl, QualType UnderlyingType = QualType(),
+ std::optional<bool> TypeMatchesDeclOrNone = std::nullopt) const;
+
+ CanQualType getCanonicalTagType(const TagDecl *TD) const;
+ QualType getTagType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *TD,
+ bool OwnsTag) const;
- QualType getRecordType(const RecordDecl *Decl) const;
+private:
+ UnresolvedUsingType *getUnresolvedUsingTypeInternal(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D, void *InsertPos,
+ const Type *CanonicalType) const;
- QualType getEnumType(const EnumDecl *Decl) const;
+ TagType *getTagTypeInternal(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *Tag,
+ bool OwnsTag, bool IsInjected,
+ const Type *CanonicalType,
+ bool WithFoldingSetNode) const;
+public:
/// Compute BestType and BestPromotionType for an enum based on the highest
/// number of negative and positive bits of its elements.
/// Returns true if enum width is too large.
@@ -1843,10 +1861,11 @@ public:
return MembersRepresentableByInt;
}
- QualType
- getUnresolvedUsingType(const UnresolvedUsingTypenameDecl *Decl) const;
-
- QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const;
+ CanQualType
+ getCanonicalUnresolvedUsingType(const UnresolvedUsingTypenameDecl *D) const;
+ QualType getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D) const;
QualType getAttributedType(attr::Kind attrKind, QualType modifiedType,
QualType equivalentType,
@@ -1886,18 +1905,20 @@ public:
TemplateName T, ArrayRef<TemplateArgument> CanonicalArgs) const;
QualType
- getTemplateSpecializationType(TemplateName T,
+ getTemplateSpecializationType(ElaboratedTypeKeyword Keyword, TemplateName T,
ArrayRef<TemplateArgument> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs,
QualType Underlying = QualType()) const;
QualType
- getTemplateSpecializationType(TemplateName T,
+ getTemplateSpecializationType(ElaboratedTypeKeyword Keyword, TemplateName T,
ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs,
QualType Canon = QualType()) const;
TypeSourceInfo *getTemplateSpecializationTypeInfo(
+ ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
TemplateName T, SourceLocation TLoc,
const TemplateArgumentListInfo &SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs,
@@ -1908,11 +1929,8 @@ public:
QualType getMacroQualifiedType(QualType UnderlyingTy,
const IdentifierInfo *MacroII) const;
- QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, QualType NamedType,
- TagDecl *OwnedTagDecl = nullptr) const;
QualType getDependentNameType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
+ NestedNameSpecifier NNS,
const IdentifierInfo *Name) const;
QualType getDependentTemplateSpecializationType(
@@ -1999,21 +2017,17 @@ public:
QualType getUnconstrainedType(QualType T) const;
/// C++17 deduced class template specialization type.
- QualType getDeducedTemplateSpecializationType(TemplateName Template,
+ QualType getDeducedTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
QualType DeducedType,
bool IsDependent) const;
private:
- QualType getDeducedTemplateSpecializationTypeInternal(TemplateName Template,
- QualType DeducedType,
- bool IsDependent,
- QualType Canon) const;
+ QualType getDeducedTemplateSpecializationTypeInternal(
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ QualType DeducedType, bool IsDependent, QualType Canon) const;
public:
- /// Return the unique reference to the type for the specified TagDecl
- /// (struct/union/class/enum) decl.
- QualType getTagDeclType(const TagDecl *Decl) const;
-
/// Return the unique type for "size_t" (C99 7.17), defined in
/// <stddef.h>.
///
@@ -2089,7 +2103,9 @@ public:
/// if it hasn't yet been built.
QualType getRawCFConstantStringType() const {
if (CFConstantStringTypeDecl)
- return getTypedefType(CFConstantStringTypeDecl);
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt,
+ CFConstantStringTypeDecl);
return QualType();
}
void setCFConstantStringType(QualType T);
@@ -2186,10 +2202,11 @@ public:
}
#include "clang/Basic/BuiltinTemplates.inc"
- /// Retrieve the Objective-C "instancetype" type, if already known;
- /// otherwise, returns a NULL type;
+ /// Retrieve the Objective-C "instancetype" type.
QualType getObjCInstanceType() {
- return getTypeDeclType(getObjCInstanceTypeDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt,
+ getObjCInstanceTypeDecl());
}
/// Retrieve the typedef declaration corresponding to the Objective-C
@@ -2202,7 +2219,8 @@ public:
/// Retrieve the C FILE type.
QualType getFILEType() const {
if (FILEDecl)
- return getTypeDeclType(FILEDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, FILEDecl);
return QualType();
}
@@ -2214,7 +2232,8 @@ public:
/// Retrieve the C jmp_buf type.
QualType getjmp_bufType() const {
if (jmp_bufDecl)
- return getTypeDeclType(jmp_bufDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, jmp_bufDecl);
return QualType();
}
@@ -2226,7 +2245,8 @@ public:
/// Retrieve the C sigjmp_buf type.
QualType getsigjmp_bufType() const {
if (sigjmp_bufDecl)
- return getTypeDeclType(sigjmp_bufDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, sigjmp_bufDecl);
return QualType();
}
@@ -2238,12 +2258,13 @@ public:
/// Retrieve the C ucontext_t type.
QualType getucontext_tType() const {
if (ucontext_tDecl)
- return getTypeDeclType(ucontext_tDecl);
+ return getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ucontext_tDecl);
return QualType();
}
/// The result type of logical operations, '<', '>', '!=', etc.
- QualType getLogicalOperationType() const {
+ CanQualType getLogicalOperationType() const {
return getLangOpts().CPlusPlus ? BoolTy : IntTy;
}
@@ -2308,7 +2329,8 @@ public:
/// This is set up lazily, by Sema. \c id is always a (typedef for a)
/// pointer type, a pointer to a struct.
QualType getObjCIdType() const {
- return getTypeDeclType(getObjCIdDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getObjCIdDecl());
}
/// Retrieve the typedef corresponding to the predefined 'SEL' type
@@ -2318,7 +2340,8 @@ public:
/// Retrieve the type that corresponds to the predefined Objective-C
/// 'SEL' type.
QualType getObjCSelType() const {
- return getTypeDeclType(getObjCSelDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getObjCSelDecl());
}
PointerAuthQualifier getObjCMemberSelTypePtrAuth();
@@ -2332,7 +2355,8 @@ public:
/// This is set up lazily, by Sema. \c Class is always a (typedef for a)
/// pointer type, a pointer to a struct.
QualType getObjCClassType() const {
- return getTypeDeclType(getObjCClassDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getObjCClassDecl());
}
/// Retrieve the Objective-C class declaration corresponding to
@@ -2351,7 +2375,8 @@ public:
/// type of 'BOOL' type.
QualType getBOOLType() const {
- return getTypeDeclType(getBOOLDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getBOOLDecl());
}
/// Retrieve the type of the Objective-C \c Protocol class.
@@ -2365,7 +2390,8 @@ public:
/// Retrieve the type of the \c __builtin_va_list type.
QualType getBuiltinVaListType() const {
- return getTypeDeclType(getBuiltinVaListDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getBuiltinVaListDecl());
}
/// Retrieve the C type declaration corresponding to the predefined
@@ -2379,16 +2405,17 @@ public:
/// Retrieve the type of the \c __builtin_ms_va_list type.
QualType getBuiltinMSVaListType() const {
- return getTypeDeclType(getBuiltinMSVaListDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, getBuiltinMSVaListDecl());
}
/// Retrieve the implicitly-predeclared 'struct _GUID' declaration.
TagDecl *getMSGuidTagDecl() const { return MSGuidTagDecl; }
/// Retrieve the implicitly-predeclared 'struct _GUID' type.
- QualType getMSGuidType() const {
+ CanQualType getMSGuidType() const {
assert(MSGuidTagDecl && "asked for GUID type but MS extensions disabled");
- return getTagDeclType(MSGuidTagDecl);
+ return getCanonicalTagType(MSGuidTagDecl);
}
/// Retrieve the implicitly-predeclared 'struct type_info' declaration.
@@ -2477,7 +2504,7 @@ public:
UnresolvedSetIterator End) const;
TemplateName getAssumedTemplateName(DeclarationName Name) const;
- TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ TemplateName getQualifiedTemplateName(NestedNameSpecifier Qualifier,
bool TemplateKeyword,
TemplateName Template) const;
TemplateName
@@ -2919,32 +2946,6 @@ public:
/// Determine if two types are similar, ignoring only CVR qualifiers.
bool hasCvrSimilarType(QualType T1, QualType T2);
- /// Retrieves the "canonical" nested name specifier for a
- /// given nested name specifier.
- ///
- /// The canonical nested name specifier is a nested name specifier
- /// that uniquely identifies a type or namespace within the type
- /// system. For example, given:
- ///
- /// \code
- /// namespace N {
- /// struct S {
- /// template<typename T> struct X { typename T* type; };
- /// };
- /// }
- ///
- /// template<typename T> struct Y {
- /// typename N::S::X<T>::type member;
- /// };
- /// \endcode
- ///
- /// Here, the nested-name-specifier for N::S::X<T>:: will be
- /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined
- /// by declarations in the type system and the canonical type for
- /// the template type parameter 'T' is template-param-0-0.
- NestedNameSpecifier *
- getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const;
-
/// Retrieves the default calling convention for the current context.
///
/// The context's default calling convention may differ from the current
@@ -3158,7 +3159,7 @@ public:
mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
FunctionProtoType::ExceptionSpecInfo ESI2,
SmallVectorImpl<QualType> &ExceptionTypeStorage,
- bool AcceptDependent);
+ bool AcceptDependent) const;
// For two "same" types, return a type which has
// the common sugar between them. If Unqualified is true,
@@ -3166,7 +3167,7 @@ public:
// The result will drop the qualifiers which do not occur
// in both types.
QualType getCommonSugaredType(QualType X, QualType Y,
- bool Unqualified = false);
+ bool Unqualified = false) const;
private:
// Helper for integer ordering
@@ -3184,23 +3185,11 @@ public:
bool propertyTypesAreCompatible(QualType, QualType);
bool typesAreBlockPointerCompatible(QualType, QualType);
- bool isObjCIdType(QualType T) const {
- if (const auto *ET = dyn_cast<ElaboratedType>(T))
- T = ET->getNamedType();
- return T == getObjCIdType();
- }
+ bool isObjCIdType(QualType T) const { return T == getObjCIdType(); }
- bool isObjCClassType(QualType T) const {
- if (const auto *ET = dyn_cast<ElaboratedType>(T))
- T = ET->getNamedType();
- return T == getObjCClassType();
- }
+ bool isObjCClassType(QualType T) const { return T == getObjCClassType(); }
- bool isObjCSelType(QualType T) const {
- if (const auto *ET = dyn_cast<ElaboratedType>(T))
- T = ET->getNamedType();
- return T == getObjCSelType();
- }
+ bool isObjCSelType(QualType T) const { return T == getObjCSelType(); }
bool ObjCQualifiedIdTypesAreCompatible(const ObjCObjectPointerType *LHS,
const ObjCObjectPointerType *RHS,
diff --git a/clang/include/clang/AST/ASTImporter.h b/clang/include/clang/AST/ASTImporter.h
index c40b926..4a0ca45 100644
--- a/clang/include/clang/AST/ASTImporter.h
+++ b/clang/include/clang/AST/ASTImporter.h
@@ -404,7 +404,7 @@ class TypeSourceInfo;
///
/// \returns The equivalent nested-name-specifier in the "to"
/// context, or the import error.
- llvm::Expected<NestedNameSpecifier *> Import(NestedNameSpecifier *FromNNS);
+ llvm::Expected<NestedNameSpecifier> Import(NestedNameSpecifier FromNNS);
/// Import the given nested-name-specifier-loc from the "from"
/// context into the "to" context.
diff --git a/clang/include/clang/AST/ASTNodeTraverser.h b/clang/include/clang/AST/ASTNodeTraverser.h
index 8ebabb2..d9dc829 100644
--- a/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/clang/include/clang/AST/ASTNodeTraverser.h
@@ -394,12 +394,14 @@ public:
}
void VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Provide a NestedNameSpecifier visitor.
- NestedNameSpecifier *Qualifier = T->getQualifier();
- if (NestedNameSpecifier::SpecifierKind K = Qualifier->getKind();
- K == NestedNameSpecifier::TypeSpec)
- Visit(Qualifier->getAsType());
+ NestedNameSpecifier Qualifier = T->getQualifier();
+ if (NestedNameSpecifier::Kind K = Qualifier.getKind();
+ K == NestedNameSpecifier::Kind::Type)
+ Visit(Qualifier.getAsType());
if (T->isSugared())
- Visit(T->getMostRecentCXXRecordDecl()->getTypeForDecl());
+ Visit(cast<MemberPointerType>(T->getCanonicalTypeUnqualified())
+ ->getQualifier()
+ .getAsType());
Visit(T->getPointeeType());
}
void VisitArrayType(const ArrayType *T) { Visit(T->getElementType()); }
@@ -510,7 +512,7 @@ public:
}
void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
// FIXME: Provide NestedNamespecifierLoc visitor.
- Visit(TL.getQualifierLoc().getTypeLoc());
+ Visit(TL.getQualifierLoc().castAsTypeLoc());
}
void VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
Visit(TL.getSizeExpr());
@@ -772,17 +774,16 @@ public:
}
void VisitUsingShadowDecl(const UsingShadowDecl *D) {
- if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
- Visit(TD->getTypeForDecl());
+ Visit(D->getTargetDecl());
}
void VisitFriendDecl(const FriendDecl *D) {
if (D->getFriendType()) {
// Traverse any CXXRecordDecl owned by this type, since
// it will not be in the parent context:
- if (auto *ET = D->getFriendType()->getType()->getAs<ElaboratedType>())
- if (auto *TD = ET->getOwnedTagDecl())
- Visit(TD);
+ if (auto *TT = D->getFriendType()->getType()->getAs<TagType>())
+ if (TT->isTagOwned())
+ Visit(TT->getOriginalDecl());
} else {
Visit(D->getFriendDecl());
}
diff --git a/clang/include/clang/AST/ASTTypeTraits.h b/clang/include/clang/AST/ASTTypeTraits.h
index 3988a15..6f40705 100644
--- a/clang/include/clang/AST/ASTTypeTraits.h
+++ b/clang/include/clang/AST/ASTTypeTraits.h
@@ -307,7 +307,7 @@ public:
/// For nodes which represent textual entities in the source code,
/// return their SourceRange. For all other nodes, return SourceRange().
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange(bool IncludeQualifier = false) const;
/// @{
/// Imposes an order on \c DynTypedNode.
@@ -336,9 +336,9 @@ public:
NodeKind)) {
auto NNSLA = getUnchecked<NestedNameSpecifierLoc>();
auto NNSLB = Other.getUnchecked<NestedNameSpecifierLoc>();
- return std::make_pair(NNSLA.getNestedNameSpecifier(),
+ return std::make_pair(NNSLA.getNestedNameSpecifier().getAsVoidPointer(),
NNSLA.getOpaqueData()) <
- std::make_pair(NNSLB.getNestedNameSpecifier(),
+ std::make_pair(NNSLB.getNestedNameSpecifier().getAsVoidPointer(),
NNSLB.getOpaqueData());
}
@@ -393,8 +393,9 @@ public:
if (ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>().isSame(
Val.NodeKind)) {
auto NNSL = Val.getUnchecked<NestedNameSpecifierLoc>();
- return llvm::hash_combine(NNSL.getNestedNameSpecifier(),
- NNSL.getOpaqueData());
+ return llvm::hash_combine(
+ NNSL.getNestedNameSpecifier().getAsVoidPointer(),
+ NNSL.getOpaqueData());
}
assert(Val.getMemoizationData());
@@ -539,8 +540,8 @@ struct DynTypedNode::BaseConverter<
: public DynCastPtrConverter<T, Attr> {};
template <>
-struct DynTypedNode::BaseConverter<
- NestedNameSpecifier, void> : public PtrConverter<NestedNameSpecifier> {};
+struct DynTypedNode::BaseConverter<NestedNameSpecifier, void>
+ : public ValueConverter<NestedNameSpecifier> {};
template <>
struct DynTypedNode::BaseConverter<
diff --git a/clang/include/clang/AST/AbstractBasicReader.h b/clang/include/clang/AST/AbstractBasicReader.h
index 0a2db9e..26052b8 100644
--- a/clang/include/clang/AST/AbstractBasicReader.h
+++ b/clang/include/clang/AST/AbstractBasicReader.h
@@ -197,7 +197,7 @@ public:
unsigned int_ = asImpl().readUInt32();
Decl *decl = asImpl().template readDeclAs<Decl>();
if (auto *recordDecl = dyn_cast<CXXRecordDecl>(decl))
- elemTy = getASTContext().getRecordType(recordDecl);
+ elemTy = getASTContext().getCanonicalTagType(recordDecl);
else
elemTy = cast<ValueDecl>(decl)->getType();
path.push_back(
@@ -252,39 +252,34 @@ public:
return EffectConditionExpr{asImpl().readExprRef()};
}
- NestedNameSpecifier *readNestedNameSpecifier() {
+ NestedNameSpecifier readNestedNameSpecifier() {
auto &ctx = getASTContext();
// We build this up iteratively.
- NestedNameSpecifier *cur = nullptr;
+ NestedNameSpecifier cur = std::nullopt;
uint32_t depth = asImpl().readUInt32();
for (uint32_t i = 0; i != depth; ++i) {
auto kind = asImpl().readNestedNameSpecifierKind();
switch (kind) {
- case NestedNameSpecifier::Identifier:
- cur = NestedNameSpecifier::Create(ctx, cur,
- asImpl().readIdentifier());
+ case NestedNameSpecifier::Kind::Namespace:
+ cur =
+ NestedNameSpecifier(ctx, asImpl().readNamespaceBaseDeclRef(), cur);
continue;
-
- case NestedNameSpecifier::Namespace:
- cur = NestedNameSpecifier::Create(ctx, cur,
- asImpl().readNamespaceBaseDeclRef());
+ case NestedNameSpecifier::Kind::Type:
+ assert(!cur);
+ cur = NestedNameSpecifier(asImpl().readQualType().getTypePtr());
continue;
-
- case NestedNameSpecifier::TypeSpec:
- cur = NestedNameSpecifier::Create(ctx, cur,
- asImpl().readQualType().getTypePtr());
+ case NestedNameSpecifier::Kind::Global:
+ assert(!cur);
+ cur = NestedNameSpecifier::getGlobal();
continue;
-
- case NestedNameSpecifier::Global:
- cur = NestedNameSpecifier::GlobalSpecifier(ctx);
- continue;
-
- case NestedNameSpecifier::Super:
- cur = NestedNameSpecifier::SuperSpecifier(ctx,
- asImpl().readCXXRecordDeclRef());
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ assert(!cur);
+ cur = NestedNameSpecifier(asImpl().readCXXRecordDeclRef());
continue;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("bad nested name specifier kind");
}
diff --git a/clang/include/clang/AST/AbstractBasicWriter.h b/clang/include/clang/AST/AbstractBasicWriter.h
index c105bbb..d41e655 100644
--- a/clang/include/clang/AST/AbstractBasicWriter.h
+++ b/clang/include/clang/AST/AbstractBasicWriter.h
@@ -181,7 +181,7 @@ public:
const Decl *baseOrMember = elem.getAsBaseOrMember().getPointer();
if (const auto *recordDecl = dyn_cast<CXXRecordDecl>(baseOrMember)) {
asImpl().writeDeclRef(recordDecl);
- elemTy = ctx.getRecordType(recordDecl);
+ elemTy = ctx.getCanonicalTagType(recordDecl);
} else {
const auto *valueDecl = cast<ValueDecl>(baseOrMember);
asImpl().writeDeclRef(valueDecl);
@@ -229,42 +229,43 @@ public:
asImpl().writeExprRef(CE.getCondition());
}
- void writeNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ void writeNestedNameSpecifier(NestedNameSpecifier NNS) {
// Nested name specifiers usually aren't too long. I think that 8 would
// typically accommodate the vast majority.
- SmallVector<NestedNameSpecifier *, 8> nestedNames;
+ SmallVector<NestedNameSpecifier, 8> nestedNames;
// Push each of the NNS's onto a stack for serialization in reverse order.
while (NNS) {
nestedNames.push_back(NNS);
- NNS = NNS->getPrefix();
+ NNS = NNS.getKind() == NestedNameSpecifier::Kind::Namespace
+ ? NNS.getAsNamespaceAndPrefix().Prefix
+ : std::nullopt;
}
asImpl().writeUInt32(nestedNames.size());
while (!nestedNames.empty()) {
NNS = nestedNames.pop_back_val();
- NestedNameSpecifier::SpecifierKind kind = NNS->getKind();
+ NestedNameSpecifier::Kind kind = NNS.getKind();
asImpl().writeNestedNameSpecifierKind(kind);
switch (kind) {
- case NestedNameSpecifier::Identifier:
- asImpl().writeIdentifier(NNS->getAsIdentifier());
+ case NestedNameSpecifier::Kind::Namespace:
+ asImpl().writeNamespaceBaseDeclRef(
+ NNS.getAsNamespaceAndPrefix().Namespace);
continue;
-
- case NestedNameSpecifier::Namespace:
- asImpl().writeNamespaceBaseDeclRef(NNS->getAsNamespace());
- continue;
-
- case NestedNameSpecifier::TypeSpec:
- asImpl().writeQualType(QualType(NNS->getAsType(), 0));
+ case NestedNameSpecifier::Kind::Type:
+ asImpl().writeQualType(QualType(NNS.getAsType(), 0));
continue;
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// Don't need to write an associated value.
continue;
- case NestedNameSpecifier::Super:
- asImpl().writeDeclRef(NNS->getAsRecordDecl());
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ asImpl().writeDeclRef(NNS.getAsMicrosoftSuper());
continue;
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("bad nested name specifier kind");
}
diff --git a/clang/include/clang/AST/CanonicalType.h b/clang/include/clang/AST/CanonicalType.h
index 35db689..b5a4e94e13 100644
--- a/clang/include/clang/AST/CanonicalType.h
+++ b/clang/include/clang/AST/CanonicalType.h
@@ -453,7 +453,7 @@ template<>
struct CanProxyAdaptor<MemberPointerType>
: public CanProxyBase<MemberPointerType> {
LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(NestedNameSpecifier *, getQualifier)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(NestedNameSpecifier, getQualifier)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const CXXRecordDecl *,
getMostRecentCXXRecordDecl)
};
@@ -551,21 +551,18 @@ struct CanProxyAdaptor<UnaryTransformType>
template<>
struct CanProxyAdaptor<TagType> : public CanProxyBase<TagType> {
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(TagDecl *, getDecl)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(TagDecl *, getOriginalDecl)
};
template<>
struct CanProxyAdaptor<RecordType> : public CanProxyBase<RecordType> {
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(RecordDecl *, getDecl)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(RecordDecl *, getOriginalDecl)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasConstFields)
};
template<>
struct CanProxyAdaptor<EnumType> : public CanProxyBase<EnumType> {
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(EnumDecl *, getDecl)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(EnumDecl *, getOriginalDecl)
};
template<>
diff --git a/clang/include/clang/AST/Comment.h b/clang/include/clang/AST/Comment.h
index dd99067..5ba95c8 100644
--- a/clang/include/clang/AST/Comment.h
+++ b/clang/include/clang/AST/Comment.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class Decl;
@@ -119,6 +120,11 @@ protected:
LLVM_PREFERRED_TYPE(CommandTraits::KnownCommandIDs)
unsigned CommandID : CommandInfo::NumCommandIDBits;
+
+ /// Describes the syntax that was used in a documentation command.
+ /// Contains values from CommandMarkerKind enum.
+ LLVM_PREFERRED_TYPE(CommandMarkerKind)
+ unsigned CommandMarker : 1;
};
enum { NumInlineCommandCommentBits = NumInlineContentCommentBits + 3 +
CommandInfo::NumCommandIDBits };
@@ -347,6 +353,16 @@ public:
InlineCommandCommentBits.RenderKind = llvm::to_underlying(RK);
InlineCommandCommentBits.CommandID = CommandID;
}
+ InlineCommandComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, InlineCommandRenderKind RK,
+ CommandMarkerKind CommandMarker, ArrayRef<Argument> Args)
+ : InlineContentComment(CommentKind::InlineCommandComment, LocBegin,
+ LocEnd),
+ Args(Args) {
+ InlineCommandCommentBits.RenderKind = llvm::to_underlying(RK);
+ InlineCommandCommentBits.CommandID = CommandID;
+ InlineCommandCommentBits.CommandMarker = llvm::to_underlying(CommandMarker);
+ }
static bool classof(const Comment *C) {
return C->getCommentKind() == CommentKind::InlineCommandComment;
@@ -384,6 +400,11 @@ public:
SourceRange getArgRange(unsigned Idx) const {
return Args[Idx].Range;
}
+
+ CommandMarkerKind getCommandMarker() const {
+ return static_cast<CommandMarkerKind>(
+ InlineCommandCommentBits.CommandMarker);
+ }
};
/// Abstract class for opening and closing HTML tags. HTML tags are always
diff --git a/clang/include/clang/AST/CommentSema.h b/clang/include/clang/AST/CommentSema.h
index 916d794..3169e2b 100644
--- a/clang/include/clang/AST/CommentSema.h
+++ b/clang/include/clang/AST/CommentSema.h
@@ -131,6 +131,7 @@ public:
InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
unsigned CommandID,
+ CommandMarkerKind CommandMarker,
ArrayRef<Comment::Argument> Args);
InlineContentComment *actOnUnknownCommand(SourceLocation LocBegin,
diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h
index 08fe1f8..400d368 100644
--- a/clang/include/clang/AST/Decl.h
+++ b/clang/include/clang/AST/Decl.h
@@ -20,7 +20,7 @@
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Type.h"
#include "clang/Basic/AddressSpaces.h"
@@ -833,9 +833,9 @@ public:
/// Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
- : nullptr;
+ : std::nullopt;
}
/// Retrieve the nested-name-specifier (with source-location
@@ -3528,8 +3528,14 @@ public:
// check out ASTContext::getTypeDeclType or one of
// ASTContext::getTypedefType, ASTContext::getRecordType, etc. if you
// already know the specific kind of node this is.
- const Type *getTypeForDecl() const { return TypeForDecl; }
- void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
+ const Type *getTypeForDecl() const {
+ assert(!isa<TagDecl>(this));
+ return TypeForDecl;
+ }
+ void setTypeForDecl(const Type *TD) {
+ assert(!isa<TagDecl>(this));
+ TypeForDecl = TD;
+ }
SourceLocation getBeginLoc() const LLVM_READONLY { return LocStart; }
void setLocStart(SourceLocation L) { LocStart = L; }
@@ -3635,6 +3641,10 @@ public:
return isTransparentTagSlow();
}
+ // These types are created lazily, use the ASTContext methods to obtain them.
+ const Type *getTypeForDecl() const = delete;
+ void setTypeForDecl(const Type *TD) = delete;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
@@ -3754,14 +3764,6 @@ protected:
/// True if this decl is currently being defined.
void setBeingDefined(bool V = true) { TagDeclBits.IsBeingDefined = V; }
- /// Indicates whether it is possible for declarations of this kind
- /// to have an out-of-date definition.
- ///
- /// This option is only enabled when modules are enabled.
- void setMayHaveOutOfDateDef(bool V = true) {
- TagDeclBits.MayHaveOutOfDateDef = V;
- }
-
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
@@ -3842,12 +3844,6 @@ public:
TagDeclBits.IsFreeStanding = isFreeStanding;
}
- /// Indicates whether it is possible for declarations of this kind
- /// to have an out-of-date definition.
- ///
- /// This option is only enabled when modules are enabled.
- bool mayHaveOutOfDateDef() const { return TagDeclBits.MayHaveOutOfDateDef; }
-
/// Whether this declaration declares a type that is
/// dependent, i.e., a type that somehow depends on template
/// parameters.
@@ -3888,6 +3884,19 @@ public:
/// the struct/union/class/enum.
TagDecl *getDefinition() const;
+ TagDecl *getDefinitionOrSelf() const {
+ if (TagDecl *Def = getDefinition())
+ return Def;
+ return const_cast<TagDecl *>(this);
+ }
+
+ /// Determines whether this entity is in the process of being defined.
+ bool isEntityBeingDefined() const {
+ if (const TagDecl *Def = getDefinition())
+ return Def->isBeingDefined();
+ return false;
+ }
+
StringRef getKindName() const {
return TypeWithKeyword::getTagTypeKindName(getTagKind());
}
@@ -3934,9 +3943,9 @@ public:
/// Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
- : nullptr;
+ : std::nullopt;
}
/// Retrieve the nested-name-specifier (with source-location
@@ -3958,6 +3967,10 @@ public:
return getExtInfo()->TemplParamLists[i];
}
+ // These types are created lazily, use the ASTContext methods to obtain them.
+ const Type *getTypeForDecl() const = delete;
+ void setTypeForDecl(const Type *TD) = delete;
+
using TypeDecl::printName;
void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override;
@@ -4087,6 +4100,10 @@ public:
return cast_or_null<EnumDecl>(TagDecl::getDefinition());
}
+ EnumDecl *getDefinitionOrSelf() const {
+ return cast_or_null<EnumDecl>(TagDecl::getDefinitionOrSelf());
+ }
+
static EnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, EnumDecl *PrevDecl,
@@ -4469,6 +4486,10 @@ public:
return cast_or_null<RecordDecl>(TagDecl::getDefinition());
}
+ RecordDecl *getDefinitionOrSelf() const {
+ return cast_or_null<RecordDecl>(TagDecl::getDefinitionOrSelf());
+ }
+
/// Returns whether this record is a union, or contains (at any nesting level)
/// a union member. This is used by CMSE to warn about possible information
/// leaks.
@@ -5299,6 +5320,8 @@ void Redeclarable<decl_type>::setPreviousDecl(decl_type *PrevDecl) {
/// We use this function to break a cycle between the inline definitions in
/// Type.h and Decl.h.
inline bool IsEnumDeclComplete(EnumDecl *ED) {
+ if (const auto *Def = ED->getDefinition())
+ return Def->isComplete();
return ED->isComplete();
}
diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h
index dd67ebc..c6326a8 100644
--- a/clang/include/clang/AST/DeclBase.h
+++ b/clang/include/clang/AST/DeclBase.h
@@ -410,9 +410,6 @@ protected:
virtual ~Decl();
- /// Update a potentially out-of-date declaration.
- void updateOutOfDate(IdentifierInfo &II) const;
-
Linkage getCachedLinkage() const {
return static_cast<Linkage>(CacheValidAndLinkage);
}
@@ -625,6 +622,12 @@ public:
void setReferenced(bool R = true) { Referenced = R; }
+ /// When doing manipulations which might change the computed linkage,
+ /// such as changing the DeclContext after the declaration has already been
+ /// used, invalidating the cache will make sure its linkage will be
+ /// recomputed.
+ void invalidateCachedLinkage() { setCachedLinkage(Linkage::Invalid); }
+
/// Whether this declaration is a top-level declaration (function,
/// global variable, etc.) that is lexically inside an objc container
/// definition.
@@ -1564,13 +1567,6 @@ protected:
LLVM_PREFERRED_TYPE(bool)
uint64_t IsFreeStanding : 1;
- /// Indicates whether it is possible for declarations of this kind
- /// to have an out-of-date definition.
- ///
- /// This option is only enabled when modules are enabled.
- LLVM_PREFERRED_TYPE(bool)
- uint64_t MayHaveOutOfDateDef : 1;
-
/// Has the full definition of this type been required by a use somewhere in
/// the TU.
LLVM_PREFERRED_TYPE(bool)
diff --git a/clang/include/clang/AST/DeclCXX.h b/clang/include/clang/AST/DeclCXX.h
index 33ae3d6..1d2ef0f 100644
--- a/clang/include/clang/AST/DeclCXX.h
+++ b/clang/include/clang/AST/DeclCXX.h
@@ -545,20 +545,6 @@ public:
return const_cast<CXXRecordDecl*>(this)->getMostRecentDecl();
}
- CXXRecordDecl *getMostRecentNonInjectedDecl() {
- CXXRecordDecl *Recent = getMostRecentDecl();
- while (Recent->isInjectedClassName()) {
- // FIXME: Does injected class name need to be in the redeclarations chain?
- assert(Recent->getPreviousDecl());
- Recent = Recent->getPreviousDecl();
- }
- return Recent;
- }
-
- const CXXRecordDecl *getMostRecentNonInjectedDecl() const {
- return const_cast<CXXRecordDecl*>(this)->getMostRecentNonInjectedDecl();
- }
-
CXXRecordDecl *getDefinition() const {
// We only need an update if we don't already know which
// declaration is the definition.
@@ -566,13 +552,18 @@ public:
return DD ? DD->Definition : nullptr;
}
+ CXXRecordDecl *getDefinitionOrSelf() const {
+ if (auto *Def = getDefinition())
+ return Def;
+ return const_cast<CXXRecordDecl *>(this);
+ }
+
bool hasDefinition() const { return DefinitionData || dataPtr(); }
static CXXRecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id,
- CXXRecordDecl *PrevDecl = nullptr,
- bool DelayTypeCreation = false);
+ CXXRecordDecl *PrevDecl = nullptr);
static CXXRecordDecl *CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
unsigned DependencyKind, bool IsGeneric,
@@ -1903,6 +1894,20 @@ public:
/// \endcode
bool isInjectedClassName() const;
+ /// Determines whether this declaration has is canonically of an injected
+ /// class type. These are non-instantiated class template patterns, which can
+ /// be used from within the class template itself. For example:
+ ///
+ /// \code
+ /// template<class T> struct C {
+ /// C *t; // Here `C *` is a pointer to an injected class type.
+ /// };
+ /// \endcode
+ bool hasInjectedClassType() const;
+
+ CanQualType
+ getCanonicalTemplateSpecializationType(const ASTContext &Ctx) const;
+
// Determine whether this type is an Interface Like type for
// __interface inheritance purposes.
bool isInterfaceLike() const;
@@ -3131,7 +3136,7 @@ public:
/// Retrieve the nested-name-specifier that qualifies the
/// name of the namespace.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3252,7 +3257,7 @@ public:
/// Retrieve the nested-name-specifier that qualifies the
/// name of the namespace.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3614,7 +3619,7 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3804,13 +3809,11 @@ public:
/// The source location of the 'enum' keyword.
SourceLocation getEnumLoc() const { return EnumLocation; }
void setEnumLoc(SourceLocation L) { EnumLocation = L; }
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return getQualifierLoc().getNestedNameSpecifier();
}
NestedNameSpecifierLoc getQualifierLoc() const {
- if (auto ETL = EnumType->getTypeLoc().getAs<ElaboratedTypeLoc>())
- return ETL.getQualifierLoc();
- return NestedNameSpecifierLoc();
+ return getEnumTypeLoc().getPrefix();
}
// Returns the "qualifier::Name" part as a TypeLoc.
TypeLoc getEnumTypeLoc() const {
@@ -3970,7 +3973,7 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -4060,7 +4063,7 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
diff --git a/clang/include/clang/AST/DeclObjC.h b/clang/include/clang/AST/DeclObjC.h
index 9014d76..2541edb 100644
--- a/clang/include/clang/AST/DeclObjC.h
+++ b/clang/include/clang/AST/DeclObjC.h
@@ -643,6 +643,9 @@ public:
/// from the explicitly-specified bound.
SourceLocation getColonLoc() const { return ColonLoc; }
+ using TypeDecl::getTypeForDecl;
+ using TypeDecl::setTypeForDecl;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ObjCTypeParam; }
diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h
index 32de203..f298ed8 100644
--- a/clang/include/clang/AST/DeclTemplate.h
+++ b/clang/include/clang/AST/DeclTemplate.h
@@ -1898,14 +1898,14 @@ public:
void getNameForDiagnostic(raw_ostream &OS, const PrintingPolicy &Policy,
bool Qualified) const override;
- // FIXME: This is broken. CXXRecordDecl::getMostRecentDecl() returns a
- // different "most recent" declaration from this function for the same
- // declaration, because we don't override getMostRecentDeclImpl(). But
- // it's not clear that we should override that, because the most recent
- // declaration as a CXXRecordDecl sometimes is the injected-class-name.
ClassTemplateSpecializationDecl *getMostRecentDecl() {
return cast<ClassTemplateSpecializationDecl>(
- getMostRecentNonInjectedDecl());
+ CXXRecordDecl::getMostRecentDecl());
+ }
+
+ ClassTemplateSpecializationDecl *getDefinitionOrSelf() const {
+ return cast<ClassTemplateSpecializationDecl>(
+ CXXRecordDecl::getDefinitionOrSelf());
}
/// Retrieve the template that this specialization specializes.
@@ -2123,10 +2123,13 @@ class ClassTemplatePartialSpecializationDecl
llvm::PointerIntPair<ClassTemplatePartialSpecializationDecl *, 1, bool>
InstantiatedFromMember;
+ mutable CanQualType CanonInjectedTST;
+
ClassTemplatePartialSpecializationDecl(
ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
+ CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl);
ClassTemplatePartialSpecializationDecl(ASTContext &C)
@@ -2143,7 +2146,7 @@ public:
Create(ASTContext &Context, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
TemplateParameterList *Params, ClassTemplateDecl *SpecializedTemplate,
- ArrayRef<TemplateArgument> Args, QualType CanonInjectedType,
+ ArrayRef<TemplateArgument> Args, CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl);
static ClassTemplatePartialSpecializationDecl *
@@ -2160,12 +2163,6 @@ public:
return TemplateParams;
}
- /// Get the template argument list of the template parameter list.
- ArrayRef<TemplateArgument>
- getInjectedTemplateArgs(const ASTContext &Context) const {
- return getTemplateParameters()->getInjectedTemplateArgs(Context);
- }
-
/// \brief All associated constraints of this partial specialization,
/// including the requires clause and any constraints derived from
/// constrained-parameters.
@@ -2247,14 +2244,10 @@ public:
return First->InstantiatedFromMember.setInt(true);
}
- /// Retrieves the injected specialization type for this partial
- /// specialization. This is not the same as the type-decl-type for
- /// this partial specialization, which is an InjectedClassNameType.
- QualType getInjectedSpecializationType() const {
- assert(getTypeForDecl() && "partial specialization has no type set!");
- return cast<InjectedClassNameType>(getTypeForDecl())
- ->getInjectedSpecializationType();
- }
+ /// Retrieves the canonical injected specialization type for this partial
+ /// specialization.
+ CanQualType
+ getCanonicalInjectedSpecializationType(const ASTContext &Ctx) const;
SourceRange getSourceRange() const override LLVM_READONLY;
@@ -2289,8 +2282,8 @@ protected:
llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>
PartialSpecializations;
- /// The injected-class-name type for this class template.
- QualType InjectedClassNameType;
+ /// The Injected Template Specialization Type for this declaration.
+ CanQualType CanonInjectedTST;
Common() = default;
};
@@ -2427,7 +2420,7 @@ public:
findPartialSpecInstantiatedFromMember(
ClassTemplatePartialSpecializationDecl *D);
- /// Retrieve the template specialization type of the
+ /// Retrieve the canonical template specialization type of the
/// injected-class-name for this class template.
///
/// The injected-class-name for a class template \c X is \c
@@ -2441,7 +2434,8 @@ public:
/// typedef array this_type; // "array" is equivalent to "array<T, N>"
/// };
/// \endcode
- QualType getInjectedClassNameSpecialization();
+ CanQualType
+ getCanonicalInjectedSpecializationType(const ASTContext &Ctx) const;
using spec_iterator = SpecIterator<ClassTemplateSpecializationDecl>;
using spec_range = llvm::iterator_range<spec_iterator>;
diff --git a/clang/include/clang/AST/DependenceFlags.h b/clang/include/clang/AST/DependenceFlags.h
index bdcaabc..c439525 100644
--- a/clang/include/clang/AST/DependenceFlags.h
+++ b/clang/include/clang/AST/DependenceFlags.h
@@ -293,7 +293,7 @@ inline TypeDependence toSemanticDependence(TypeDependence D) {
}
inline NestedNameSpecifierDependence
-toNestedNameSpecifierDependendence(TypeDependence D) {
+toNestedNameSpecifierDependence(TypeDependence D) {
return Dependence(D).nestedNameSpecifier();
}
diff --git a/clang/include/clang/AST/DynamicRecursiveASTVisitor.h b/clang/include/clang/AST/DynamicRecursiveASTVisitor.h
index 703cca2..7b5bdca 100644
--- a/clang/include/clang/AST/DynamicRecursiveASTVisitor.h
+++ b/clang/include/clang/AST/DynamicRecursiveASTVisitor.h
@@ -134,8 +134,7 @@ public:
/// Recursively visit a C++ nested-name-specifier.
///
/// \returns false if the visitation was terminated early, true otherwise.
- virtual bool
- TraverseNestedNameSpecifier(MaybeConst<NestedNameSpecifier> *NNS);
+ virtual bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS);
/// Recursively visit a C++ nested-name-specifier with location
/// information.
@@ -181,14 +180,14 @@ public:
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type).
- virtual bool TraverseType(QualType T);
+ virtual bool TraverseType(QualType T, bool TraverseQualifier = true);
/// Recursively visit a type with location, by dispatching to
/// Traverse*TypeLoc() based on the argument type's getTypeClass() property.
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type location).
- virtual bool TraverseTypeLoc(TypeLoc TL);
+ virtual bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true);
/// Recursively visit an Objective-C protocol reference with location
/// information.
@@ -273,7 +272,8 @@ public:
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
bool WalkUpFrom##CLASS##Type(MaybeConst<CLASS##Type> *T); \
- virtual bool Traverse##CLASS##Type(MaybeConst<CLASS##Type> *T);
+ virtual bool Traverse##CLASS##Type(MaybeConst<CLASS##Type> *T, \
+ bool TraverseQualifier = true);
#include "clang/AST/TypeNodes.inc"
#define TYPE(CLASS, BASE) \
@@ -283,7 +283,8 @@ public:
// TypeLocs.
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
- virtual bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL);
+ virtual bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL, \
+ bool TraverseQualifier);
#include "clang/AST/TypeLocNodes.def"
#define TYPELOC(CLASS, BASE) \
diff --git a/clang/include/clang/AST/Expr.h b/clang/include/clang/AST/Expr.h
index 237b3b2..77d0912 100644
--- a/clang/include/clang/AST/Expr.h
+++ b/clang/include/clang/AST/Expr.h
@@ -1369,7 +1369,7 @@ public:
/// If the name was qualified, retrieves the nested-name-specifier
/// that precedes the name. Otherwise, returns NULL.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return getQualifierLoc().getNestedNameSpecifier();
}
@@ -3398,7 +3398,7 @@ public:
/// If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name. Otherwise, returns
/// NULL.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return getQualifierLoc().getNestedNameSpecifier();
}
@@ -3548,6 +3548,7 @@ public:
QualType T, ExprValueKind VK, Expr *init, bool fileScope)
: Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary),
LParenLoc(lparenloc), TInfoAndScope(tinfo, fileScope), Init(init) {
+ assert(Init && "Init is a nullptr");
setDependence(computeDependence(this));
}
@@ -3577,19 +3578,11 @@ public:
APValue &getStaticValue() const;
SourceLocation getBeginLoc() const LLVM_READONLY {
- // FIXME: Init should never be null.
- if (!Init)
- return SourceLocation();
if (LParenLoc.isInvalid())
return Init->getBeginLoc();
return LParenLoc;
}
- SourceLocation getEndLoc() const LLVM_READONLY {
- // FIXME: Init should never be null.
- if (!Init)
- return SourceLocation();
- return Init->getEndLoc();
- }
+ SourceLocation getEndLoc() const LLVM_READONLY { return Init->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundLiteralExprClass;
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index 7a26934..5508890 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -2781,7 +2781,7 @@ public:
/// If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name. Otherwise, returns
/// null.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3222,7 +3222,7 @@ public:
SourceLocation getNameLoc() const { return NameInfo.getLoc(); }
/// Fetches the nested-name qualifier, if one was given.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3540,7 +3540,7 @@ public:
/// Retrieve the nested-name-specifier that qualifies this
/// declaration.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
@@ -3955,7 +3955,7 @@ public:
}
/// Retrieve the nested-name-specifier that qualifies the member name.
- NestedNameSpecifier *getQualifier() const {
+ NestedNameSpecifier getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
diff --git a/clang/include/clang/AST/JSONNodeDumper.h b/clang/include/clang/AST/JSONNodeDumper.h
index 570662b..8640780 100644
--- a/clang/include/clang/AST/JSONNodeDumper.h
+++ b/clang/include/clang/AST/JSONNodeDumper.h
@@ -240,7 +240,6 @@ public:
void VisitInjectedClassNameType(const InjectedClassNameType *ICNT);
void VisitObjCInterfaceType(const ObjCInterfaceType *OIT);
void VisitPackExpansionType(const PackExpansionType *PET);
- void VisitElaboratedType(const ElaboratedType *ET);
void VisitMacroQualifiedType(const MacroQualifiedType *MQT);
void VisitMemberPointerType(const MemberPointerType *MPT);
diff --git a/clang/include/clang/AST/NestedNameSpecifier.h b/clang/include/clang/AST/NestedNameSpecifier.h
index 1614f9d..f198a8b 100644
--- a/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/clang/include/clang/AST/NestedNameSpecifier.h
@@ -6,507 +6,266 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the NestedNameSpecifier class, which represents
-// a C++ nested-name-specifier.
+// This file completes the definition of the NestedNameSpecifier class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
#define LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
-#include "clang/AST/DependenceFlags.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/SourceLocation.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/Support/Compiler.h"
-#include <cstdint>
-#include <cstdlib>
-#include <utility>
namespace clang {
-class ASTContext;
-class CXXRecordDecl;
-class IdentifierInfo;
-class LangOptions;
-class NamespaceBaseDecl;
-struct PrintingPolicy;
-class Type;
-class TypeLoc;
-
-/// Represents a C++ nested name specifier, such as
-/// "\::std::vector<int>::".
-///
-/// C++ nested name specifiers are the prefixes to qualified
-/// names. For example, "foo::" in "foo::x" is a nested name
-/// specifier. Nested name specifiers are made up of a sequence of
-/// specifiers, each of which can be a namespace, type, identifier
-/// (for dependent names), decltype specifier, or the global specifier ('::').
-/// The last two specifiers can only appear at the start of a
-/// nested-namespace-specifier.
-class NestedNameSpecifier : public llvm::FoldingSetNode {
- /// Enumeration describing
- enum StoredSpecifierKind {
- StoredIdentifier = 0,
- StoredDecl = 1,
- StoredTypeSpec = 2
- };
-
- /// The nested name specifier that precedes this nested name
- /// specifier.
- ///
- /// The pointer is the nested-name-specifier that precedes this
- /// one. The integer stores one of the first four values of type
- /// SpecifierKind.
- llvm::PointerIntPair<NestedNameSpecifier *, 2, StoredSpecifierKind> Prefix;
-
- /// The last component in the nested name specifier, which
- /// can be an identifier, a declaration, or a type.
- ///
- /// When the pointer is NULL, this specifier represents the global
- /// specifier '::'. Otherwise, the pointer is one of
- /// IdentifierInfo*, Namespace*, or Type*, depending on the kind of
- /// specifier as encoded within the prefix.
- void* Specifier = nullptr;
-
-public:
- /// The kind of specifier that completes this nested name
- /// specifier.
- enum SpecifierKind {
- /// An identifier, stored as an IdentifierInfo*.
- Identifier,
-
- /// A namespace-like entity, stored as a NamespaceBaseDecl*.
- Namespace,
-
- /// A type, stored as a Type*.
- TypeSpec,
-
- /// The global specifier '::'. There is no stored value.
- Global,
-
- /// Microsoft's '__super' specifier, stored as a CXXRecordDecl* of
- /// the class it appeared in.
- Super
- };
-
-private:
- /// Builds the global specifier.
- NestedNameSpecifier() : Prefix(nullptr, StoredIdentifier) {}
-
- /// Copy constructor used internally to clone nested name
- /// specifiers.
- NestedNameSpecifier(const NestedNameSpecifier &Other) = default;
-
- /// Either find or insert the given nested name specifier
- /// mockup in the given context.
- static NestedNameSpecifier *FindOrInsert(const ASTContext &Context,
- const NestedNameSpecifier &Mockup);
-
-public:
- NestedNameSpecifier &operator=(const NestedNameSpecifier &) = delete;
-
- /// Builds a specifier combining a prefix and an identifier.
- ///
- /// The prefix must be dependent, since nested name specifiers
- /// referencing an identifier are only permitted when the identifier
- /// cannot be resolved.
- static NestedNameSpecifier *Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const IdentifierInfo *II);
-
- /// Builds a nested name specifier that names a namespace or namespace alias.
- static NestedNameSpecifier *Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const NamespaceBaseDecl *NS);
-
- /// Builds a nested name specifier that names a type.
- static NestedNameSpecifier *
- Create(const ASTContext &Context, NestedNameSpecifier *Prefix, const Type *T);
-
- /// Builds a specifier that consists of just an identifier.
- ///
- /// The nested-name-specifier is assumed to be dependent, but has no
- /// prefix because the prefix is implied by something outside of the
- /// nested name specifier, e.g., in "x->Base::f", the "x" has a dependent
- /// type.
- static NestedNameSpecifier *Create(const ASTContext &Context,
- const IdentifierInfo *II);
-
- /// Returns the nested name specifier representing the global
- /// scope.
- static NestedNameSpecifier *GlobalSpecifier(const ASTContext &Context);
-
- /// Returns the nested name specifier representing the __super scope
- /// for the given CXXRecordDecl.
- static NestedNameSpecifier *SuperSpecifier(const ASTContext &Context,
- CXXRecordDecl *RD);
-
- /// Return the prefix of this nested name specifier.
- ///
- /// The prefix contains all of the parts of the nested name
- /// specifier that precede this current specifier. For example, for a
- /// nested name specifier that represents "foo::bar::", the current
- /// specifier will contain "bar::" and the prefix will contain
- /// "foo::".
- NestedNameSpecifier *getPrefix() const { return Prefix.getPointer(); }
-
- /// Determine what kind of nested name specifier is stored.
- SpecifierKind getKind() const;
-
- /// Retrieve the identifier stored in this nested name
- /// specifier.
- IdentifierInfo *getAsIdentifier() const {
- if (Prefix.getInt() == StoredIdentifier)
- return (IdentifierInfo *)Specifier;
-
- return nullptr;
+auto NestedNameSpecifier::getKind() const -> Kind {
+ if (!isStoredKind()) {
+ switch (getFlagKind()) {
+ case FlagKind::Null:
+ return Kind::Null;
+ case FlagKind::Global:
+ return Kind::Global;
+ case FlagKind::Invalid:
+ llvm_unreachable("use of invalid NestedNameSpecifier");
+ }
+ llvm_unreachable("unhandled FlagKind");
}
+ switch (auto [K, Ptr] = getStored(); K) {
+ case StoredKind::Type:
+ return Kind::Type;
+ case StoredKind::NamespaceWithGlobal:
+ case StoredKind::NamespaceWithNamespace:
+ return Kind::Namespace;
+ case StoredKind::NamespaceOrSuper:
+ switch (static_cast<const Decl *>(Ptr)->getKind()) {
+ case Decl::Namespace:
+ case Decl::NamespaceAlias:
+ return Kind::Namespace;
+ case Decl::CXXRecord:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ return Kind::MicrosoftSuper;
+ default:
+ llvm_unreachable("unexpected decl kind");
+ }
+ }
+ llvm_unreachable("unknown StoredKind");
+}
- /// Retrieve the namespace or namespace alias stored in this nested name
- /// specifier.
- NamespaceBaseDecl *getAsNamespace() const;
-
- /// Retrieve the record declaration stored in this nested name
- /// specifier.
- CXXRecordDecl *getAsRecordDecl() const;
-
- /// Retrieve the type stored in this nested name specifier.
- const Type *getAsType() const {
- if (Prefix.getInt() == StoredTypeSpec)
- return (const Type *)Specifier;
+NestedNameSpecifier::NestedNameSpecifier(const Type *T)
+ : NestedNameSpecifier({StoredKind::Type, T}) {
+ assert(getKind() == Kind::Type);
+}
- return nullptr;
+auto NestedNameSpecifier::MakeNamespacePtrKind(
+ const ASTContext &Ctx, const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix) -> PtrKind {
+ switch (Prefix.getKind()) {
+ case Kind::Null:
+ return {StoredKind::NamespaceOrSuper, Namespace};
+ case Kind::Global:
+ return {StoredKind::NamespaceWithGlobal, Namespace};
+ case Kind::Namespace:
+ return {StoredKind::NamespaceWithNamespace,
+ MakeNamespaceAndPrefixStorage(Ctx, Namespace, Prefix)};
+ case Kind::MicrosoftSuper:
+ case Kind::Type:
+ llvm_unreachable("invalid prefix for namespace");
}
+ llvm_unreachable("unhandled kind");
+}
- /// Fully translate this nested name specifier to a type.
- /// Unlike getAsType, this will convert this entire nested
- /// name specifier chain into its equivalent type.
- const Type *translateToType(const ASTContext &Context) const;
+/// Builds a nested name specifier that names a namespace.
+NestedNameSpecifier::NestedNameSpecifier(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix)
+ : NestedNameSpecifier(MakeNamespacePtrKind(Ctx, Namespace, Prefix)) {
+ assert(getKind() == Kind::Namespace);
+}
- NestedNameSpecifierDependence getDependence() const;
+/// Builds a nested name specifier that names a class through microsoft's
+/// __super specifier.
+NestedNameSpecifier::NestedNameSpecifier(CXXRecordDecl *RD)
+ : NestedNameSpecifier({StoredKind::NamespaceOrSuper, RD}) {
+ assert(getKind() == Kind::MicrosoftSuper);
+}
- /// Whether this nested name specifier refers to a dependent
- /// type or not.
- bool isDependent() const;
+CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
+ switch (getKind()) {
+ case Kind::MicrosoftSuper:
+ return getAsMicrosoftSuper();
+ case Kind::Type:
+ return getAsType()->getAsCXXRecordDecl();
+ case Kind::Global:
+ case Kind::Namespace:
+ case Kind::Null:
+ return nullptr;
+ }
+ llvm_unreachable("Invalid NNS Kind!");
+}
- /// Whether this nested name specifier involves a template
- /// parameter.
- bool isInstantiationDependent() const;
+NestedNameSpecifier NestedNameSpecifier::getCanonical() const {
+ switch (getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ // These are canonical and unique.
+ return *this;
+ case NestedNameSpecifier::Kind::Namespace: {
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ const NamespaceBaseDecl *ND = getAsNamespaceAndPrefix().Namespace;
+ return NestedNameSpecifier(
+ {StoredKind::NamespaceOrSuper, ND->getNamespace()->getCanonicalDecl()});
+ }
+ case NestedNameSpecifier::Kind::Type:
+ return NestedNameSpecifier(
+ getAsType()->getCanonicalTypeInternal().getTypePtr());
+ }
+ llvm_unreachable("unhandled kind");
+}
- /// Whether this nested-name-specifier contains an unexpanded
- /// parameter pack (for C++11 variadic templates).
- bool containsUnexpandedParameterPack() const;
+bool NestedNameSpecifier::isCanonical() const {
+ return *this == getCanonical();
+}
- /// Whether this nested name specifier contains an error.
- bool containsErrors() const;
+TypeLoc NestedNameSpecifierLoc::castAsTypeLoc() const {
+ return TypeLoc(Qualifier.getAsType(), LoadPointer(/*Offset=*/0));
+}
- /// Print this nested name specifier to the given output stream. If
- /// `ResolveTemplateArguments` is true, we'll print actual types, e.g.
- /// `ns::SomeTemplate<int, MyClass>` instead of
- /// `ns::SomeTemplate<Container::value_type, T>`.
- void print(raw_ostream &OS, const PrintingPolicy &Policy,
- bool ResolveTemplateArguments = false,
- bool PrintFinalScopeResOp = true) const;
+TypeLoc NestedNameSpecifierLoc::getAsTypeLoc() const {
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Type)
+ return TypeLoc();
+ return castAsTypeLoc();
+}
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddPointer(Prefix.getOpaqueValue());
- ID.AddPointer(Specifier);
+unsigned
+NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier Qualifier) {
+ // Location of the trailing '::'.
+ unsigned Length = sizeof(SourceLocation::UIntTy);
+
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Global:
+ // Nothing more to add.
+ break;
+
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ // The location of the identifier or namespace name.
+ Length += sizeof(SourceLocation::UIntTy);
+ break;
+
+ case NestedNameSpecifier::Kind::Type:
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ Length += sizeof(void *);
+ break;
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("Expected a non-NULL qualifier");
}
- /// Dump the nested name specifier to standard output to aid
- /// in debugging.
- void dump(const LangOptions &LO) const;
- void dump() const;
- void dump(llvm::raw_ostream &OS) const;
- void dump(llvm::raw_ostream &OS, const LangOptions &LO) const;
-};
+ return Length;
+}
-/// A C++ nested-name-specifier augmented with source location
-/// information.
-class NestedNameSpecifierLoc {
- NestedNameSpecifier *Qualifier = nullptr;
- void *Data = nullptr;
-
- /// Determines the data length for the last component in the
- /// given nested-name-specifier.
- static unsigned getLocalDataLength(NestedNameSpecifier *Qualifier);
-
- /// Determines the data length for the entire
- /// nested-name-specifier.
- static unsigned getDataLength(NestedNameSpecifier *Qualifier);
-
-public:
- /// Construct an empty nested-name-specifier.
- NestedNameSpecifierLoc() = default;
-
- /// Construct a nested-name-specifier with source location information
- /// from
- NestedNameSpecifierLoc(NestedNameSpecifier *Qualifier, void *Data)
- : Qualifier(Qualifier), Data(Data) {}
-
- /// Evaluates true when this nested-name-specifier location is
- /// non-empty.
- explicit operator bool() const { return Qualifier; }
-
- /// Evaluates true when this nested-name-specifier location is
- /// non-empty.
- bool hasQualifier() const { return Qualifier; }
-
- /// Retrieve the nested-name-specifier to which this instance
- /// refers.
- NestedNameSpecifier *getNestedNameSpecifier() const {
- return Qualifier;
- }
+NamespaceAndPrefixLoc NestedNameSpecifierLoc::castAsNamespaceAndPrefix() const {
+ auto [Namespace, Prefix] = Qualifier.getAsNamespaceAndPrefix();
+ return {Namespace, NestedNameSpecifierLoc(Prefix, Data)};
+}
- /// Retrieve the opaque pointer that refers to source-location data.
- void *getOpaqueData() const { return Data; }
-
- /// Retrieve the source range covering the entirety of this
- /// nested-name-specifier.
- ///
- /// For example, if this instance refers to a nested-name-specifier
- /// \c \::std::vector<int>::, the returned source range would cover
- /// from the initial '::' to the last '::'.
- SourceRange getSourceRange() const LLVM_READONLY {
- return SourceRange(getBeginLoc(), getEndLoc());
- }
+NamespaceAndPrefixLoc NestedNameSpecifierLoc::getAsNamespaceAndPrefix() const {
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Namespace)
+ return {};
+ return castAsNamespaceAndPrefix();
+}
- /// Retrieve the source range covering just the last part of
- /// this nested-name-specifier, not including the prefix.
- ///
- /// For example, if this instance refers to a nested-name-specifier
- /// \c \::std::vector<int>::, the returned source range would cover
- /// from "vector" to the last '::'.
- SourceRange getLocalSourceRange() const;
-
- /// Retrieve the location of the beginning of this
- /// nested-name-specifier.
- SourceLocation getBeginLoc() const {
- if (!Qualifier)
- return SourceLocation();
-
- NestedNameSpecifierLoc First = *this;
- while (NestedNameSpecifierLoc Prefix = First.getPrefix())
- First = Prefix;
- return First.getLocalSourceRange().getBegin();
+unsigned NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier Qualifier) {
+ unsigned Length = 0;
+ for (; Qualifier; Qualifier = Qualifier.getAsNamespaceAndPrefix().Prefix) {
+ Length += getLocalDataLength(Qualifier);
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Namespace)
+ break;
}
+ return Length;
+}
- /// Retrieve the location of the end of this
- /// nested-name-specifier.
- SourceLocation getEndLoc() const { return getLocalSourceRange().getEnd(); }
+unsigned NestedNameSpecifierLoc::getDataLength() const {
+ return getDataLength(Qualifier);
+}
- /// Retrieve the location of the beginning of this
- /// component of the nested-name-specifier.
- SourceLocation getLocalBeginLoc() const {
- return getLocalSourceRange().getBegin();
+SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+ switch (auto Kind = Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ return SourceRange();
+ case NestedNameSpecifier::Kind::Global:
+ return LoadSourceLocation(/*Offset=*/0);
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
+ unsigned Offset =
+ Kind == NestedNameSpecifier::Kind::Namespace
+ ? getDataLength(Qualifier.getAsNamespaceAndPrefix().Prefix)
+ : 0;
+ return SourceRange(
+ LoadSourceLocation(Offset),
+ LoadSourceLocation(Offset + sizeof(SourceLocation::UIntTy)));
}
-
- /// Retrieve the location of the end of this component of the
- /// nested-name-specifier.
- SourceLocation getLocalEndLoc() const {
- return getLocalSourceRange().getEnd();
+ case NestedNameSpecifier::Kind::Type: {
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ void *TypeData = LoadPointer(/*Offset=*/0);
+ TypeLoc TL(Qualifier.getAsType(), TypeData);
+ return SourceRange(TL.getBeginLoc(), LoadSourceLocation(sizeof(void *)));
}
-
- /// Return the prefix of this nested-name-specifier.
- ///
- /// For example, if this instance refers to a nested-name-specifier
- /// \c \::std::vector<int>::, the prefix is \c \::std::. Note that the
- /// returned prefix may be empty, if this is the first component of
- /// the nested-name-specifier.
- NestedNameSpecifierLoc getPrefix() const {
- if (!Qualifier)
- return *this;
-
- return NestedNameSpecifierLoc(Qualifier->getPrefix(), Data);
}
- /// For a nested-name-specifier that refers to a type,
- /// retrieve the type with source-location information.
- TypeLoc getTypeLoc() const;
+ llvm_unreachable("Invalid NNS Kind!");
+}
- /// Determines the data length for the entire
- /// nested-name-specifier.
- unsigned getDataLength() const { return getDataLength(Qualifier); }
+SourceRange NestedNameSpecifierLoc::getSourceRange() const {
+ return SourceRange(getBeginLoc(), getEndLoc());
+}
- friend bool operator==(NestedNameSpecifierLoc X,
- NestedNameSpecifierLoc Y) {
- return X.Qualifier == Y.Qualifier && X.Data == Y.Data;
- }
+SourceLocation NestedNameSpecifierLoc::getEndLoc() const {
+ return getLocalSourceRange().getEnd();
+}
- friend bool operator!=(NestedNameSpecifierLoc X,
- NestedNameSpecifierLoc Y) {
- return !(X == Y);
- }
-};
+/// Retrieve the location of the beginning of this
+/// component of the nested-name-specifier.
+SourceLocation NestedNameSpecifierLoc::getLocalBeginLoc() const {
+ return getLocalSourceRange().getBegin();
+}
-/// Class that aids in the construction of nested-name-specifiers along
-/// with source-location information for all of the components of the
+/// Retrieve the location of the end of this component of the
/// nested-name-specifier.
-class NestedNameSpecifierLocBuilder {
- /// The current representation of the nested-name-specifier we're
- /// building.
- NestedNameSpecifier *Representation = nullptr;
-
- /// Buffer used to store source-location information for the
- /// nested-name-specifier.
- ///
- /// Note that we explicitly manage the buffer (rather than using a
- /// SmallVector) because \c Declarator expects it to be possible to memcpy()
- /// a \c CXXScopeSpec, and CXXScopeSpec uses a NestedNameSpecifierLocBuilder.
- char *Buffer = nullptr;
-
- /// The size of the buffer used to store source-location information
- /// for the nested-name-specifier.
- unsigned BufferSize = 0;
-
- /// The capacity of the buffer used to store source-location
- /// information for the nested-name-specifier.
- unsigned BufferCapacity = 0;
-
-public:
- NestedNameSpecifierLocBuilder() = default;
- NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other);
-
- NestedNameSpecifierLocBuilder &
- operator=(const NestedNameSpecifierLocBuilder &Other);
-
- ~NestedNameSpecifierLocBuilder() {
- if (BufferCapacity)
- free(Buffer);
- }
+SourceLocation NestedNameSpecifierLoc::getLocalEndLoc() const {
+ return getLocalSourceRange().getEnd();
+}
- /// Retrieve the representation of the nested-name-specifier.
- NestedNameSpecifier *getRepresentation() const { return Representation; }
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'type::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param TL The TypeLoc that describes the type preceding the '::'.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'identifier::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param Identifier The identifier.
- ///
- /// \param IdentifierLoc The location of the identifier.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'namespace::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param Namespace The namespace or namespace alias.
- ///
- /// \param NamespaceLoc The location of the namespace name or the namespace
- // alias.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, NamespaceBaseDecl *Namespace,
- SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
-
- /// Turn this (empty) nested-name-specifier into the global
- /// nested-name-specifier '::'.
- void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
-
- /// Turns this (empty) nested-name-specifier into '__super'
- /// nested-name-specifier.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param RD The declaration of the class in which nested-name-specifier
- /// appeared.
- ///
- /// \param SuperLoc The location of the '__super' keyword.
- /// name.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
- SourceLocation SuperLoc, SourceLocation ColonColonLoc);
-
- /// Make a new nested-name-specifier from incomplete source-location
- /// information.
- ///
- /// This routine should be used very, very rarely, in cases where we
- /// need to synthesize a nested-name-specifier. Most code should instead use
- /// \c Adopt() with a proper \c NestedNameSpecifierLoc.
- void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
- SourceRange R);
-
- /// Adopt an existing nested-name-specifier (with source-range
- /// information).
- void Adopt(NestedNameSpecifierLoc Other);
-
- /// Retrieve the source range covered by this nested-name-specifier.
- SourceRange getSourceRange() const LLVM_READONLY {
- return NestedNameSpecifierLoc(Representation, Buffer).getSourceRange();
- }
+SourceRange NestedNameSpecifierLocBuilder::getSourceRange() const {
+ return NestedNameSpecifierLoc(Representation, Buffer).getSourceRange();
+}
- /// Retrieve a nested-name-specifier with location information,
- /// copied into the given AST context.
- ///
- /// \param Context The context into which this nested-name-specifier will be
- /// copied.
- NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
-
- /// Retrieve a nested-name-specifier with location
- /// information based on the information in this builder.
- ///
- /// This loc will contain references to the builder's internal data and may
- /// be invalidated by any change to the builder.
- NestedNameSpecifierLoc getTemporary() const {
- return NestedNameSpecifierLoc(Representation, Buffer);
- }
+} // namespace clang
+
+namespace llvm {
+
+template <> struct DenseMapInfo<clang::NestedNameSpecifier> {
+ static clang::NestedNameSpecifier getEmptyKey() { return std::nullopt; }
- /// Clear out this builder, and prepare it to build another
- /// nested-name-specifier with source-location information.
- void Clear() {
- Representation = nullptr;
- BufferSize = 0;
+ static clang::NestedNameSpecifier getTombstoneKey() {
+ return clang::NestedNameSpecifier::getInvalid();
}
- /// Retrieve the underlying buffer.
- ///
- /// \returns A pair containing a pointer to the buffer of source-location
- /// data and the size of the source-location data that resides in that
- /// buffer.
- std::pair<char *, unsigned> getBuffer() const {
- return std::make_pair(Buffer, BufferSize);
+ static unsigned getHashValue(const clang::NestedNameSpecifier &V) {
+ return hash_combine(V.getAsVoidPointer());
}
};
-/// Insertion operator for diagnostics. This allows sending
-/// NestedNameSpecifiers into a diagnostic with <<.
-inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
- NestedNameSpecifier *NNS) {
- DB.AddTaggedVal(reinterpret_cast<uint64_t>(NNS),
- DiagnosticsEngine::ak_nestednamespec);
- return DB;
-}
-
-} // namespace clang
-
-namespace llvm {
-
template <> struct DenseMapInfo<clang::NestedNameSpecifierLoc> {
- using FirstInfo = DenseMapInfo<clang::NestedNameSpecifier *>;
+ using FirstInfo = DenseMapInfo<clang::NestedNameSpecifier>;
using SecondInfo = DenseMapInfo<void *>;
static clang::NestedNameSpecifierLoc getEmptyKey() {
diff --git a/clang/include/clang/AST/NestedNameSpecifierBase.h b/clang/include/clang/AST/NestedNameSpecifierBase.h
new file mode 100644
index 0000000..73c60ba
--- /dev/null
+++ b/clang/include/clang/AST/NestedNameSpecifierBase.h
@@ -0,0 +1,586 @@
+//===- NestedNameSpecifier.h - C++ nested name specifiers -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_NESTEDNAMESPECIFIERBASE_H
+#define LLVM_CLANG_AST_NESTEDNAMESPECIFIERBASE_H
+
+#include "clang/AST/DependenceFlags.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+
+namespace clang {
+
+class ASTContext;
+class CXXRecordDecl;
+class NamedDecl;
+class IdentifierInfo;
+class LangOptions;
+class NamespaceBaseDecl;
+struct PrintingPolicy;
+class Type;
+class TypeLoc;
+
+struct NamespaceAndPrefix;
+struct alignas(8) NamespaceAndPrefixStorage;
+
+/// Represents a C++ nested name specifier, such as
+/// "\::std::vector<int>::".
+///
+/// C++ nested name specifiers are the prefixes to qualified
+/// names. For example, "foo::" in "foo::x" is a nested name
+/// specifier. Nested name specifiers are made up of a sequence of
+/// specifiers, each of which can be a namespace, type, decltype specifier, or
+/// the global specifier ('::'). The last two specifiers can only appear at the
+/// start of a nested-namespace-specifier.
+class NestedNameSpecifier {
+ enum class FlagKind { Null, Global, Invalid };
+ enum class StoredKind {
+ Type,
+ NamespaceOrSuper,
+ NamespaceWithGlobal,
+ NamespaceWithNamespace
+ };
+ static constexpr uintptr_t FlagBits = 2, FlagMask = (1u << FlagBits) - 1u,
+ FlagOffset = 1, PtrOffset = FlagBits + FlagOffset,
+ PtrMask = (1u << PtrOffset) - 1u;
+
+ uintptr_t StoredOrFlag;
+
+ explicit NestedNameSpecifier(uintptr_t StoredOrFlag)
+ : StoredOrFlag(StoredOrFlag) {}
+ struct PtrKind {
+ StoredKind SK;
+ const void *Ptr;
+ };
+ explicit NestedNameSpecifier(PtrKind PK)
+ : StoredOrFlag(uintptr_t(PK.Ptr) | (uintptr_t(PK.SK) << FlagOffset)) {
+ assert(PK.Ptr != nullptr);
+ assert((uintptr_t(PK.Ptr) & ((1u << PtrOffset) - 1u)) == 0);
+ assert((uintptr_t(PK.Ptr) >> PtrOffset) != 0);
+ }
+
+ explicit constexpr NestedNameSpecifier(FlagKind K)
+ : StoredOrFlag(uintptr_t(K) << FlagOffset) {}
+
+ bool isStoredKind() const { return (StoredOrFlag >> PtrOffset) != 0; }
+
+ std::pair<StoredKind, const void *> getStored() const {
+ assert(isStoredKind());
+ return {StoredKind(StoredOrFlag >> FlagOffset & FlagMask),
+ reinterpret_cast<const void *>(StoredOrFlag & ~PtrMask)};
+ }
+
+ FlagKind getFlagKind() const {
+ assert(!isStoredKind());
+ return FlagKind(StoredOrFlag >> FlagOffset);
+ }
+
+ static const NamespaceAndPrefixStorage *
+ MakeNamespaceAndPrefixStorage(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix);
+ static inline PtrKind MakeNamespacePtrKind(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix);
+
+public:
+ static constexpr NestedNameSpecifier getInvalid() {
+ return NestedNameSpecifier(FlagKind::Invalid);
+ }
+
+ static constexpr NestedNameSpecifier getGlobal() {
+ return NestedNameSpecifier(FlagKind::Global);
+ }
+
+ NestedNameSpecifier() : NestedNameSpecifier(FlagKind::Invalid) {}
+
+ /// The kind of specifier that completes this nested name
+ /// specifier.
+ enum class Kind {
+ /// Empty.
+ Null,
+
+ /// The global specifier '::'. There is no stored value.
+ Global,
+
+ /// A type, stored as a Type*.
+ Type,
+
+ /// A namespace-like entity, stored as a NamespaceBaseDecl*.
+ Namespace,
+
+ /// Microsoft's '__super' specifier, stored as a CXXRecordDecl* of
+ /// the class it appeared in.
+ MicrosoftSuper,
+ };
+
+ inline Kind getKind() const;
+
+ NestedNameSpecifier(std::nullopt_t) : StoredOrFlag(0) {}
+
+ explicit inline NestedNameSpecifier(const Type *T);
+
+ /// Builds a nested name specifier that names a namespace.
+ inline NestedNameSpecifier(const ASTContext &Ctx,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix);
+
+ /// Builds a nested name specifier that names a class through microsoft's
+ /// __super specifier.
+ explicit inline NestedNameSpecifier(CXXRecordDecl *RD);
+
+ explicit operator bool() const { return StoredOrFlag != 0; }
+
+ void *getAsVoidPointer() const {
+ return reinterpret_cast<void *>(StoredOrFlag);
+ }
+ static NestedNameSpecifier getFromVoidPointer(const void *Ptr) {
+ return NestedNameSpecifier(reinterpret_cast<uintptr_t>(Ptr));
+ }
+
+ const Type *getAsType() const {
+ auto [Kind, Ptr] = getStored();
+ assert(Kind == StoredKind::Type);
+ assert(Ptr != nullptr);
+ return static_cast<const Type *>(Ptr);
+ }
+
+ inline NamespaceAndPrefix getAsNamespaceAndPrefix() const;
+
+ CXXRecordDecl *getAsMicrosoftSuper() const {
+ auto [Kind, Ptr] = getStored();
+ assert(Kind == StoredKind::NamespaceOrSuper);
+ assert(Ptr != nullptr);
+ return static_cast<CXXRecordDecl *>(const_cast<void *>(Ptr));
+ }
+
+ /// Retrieve the record declaration stored in this nested name
+ /// specifier, or null.
+ inline CXXRecordDecl *getAsRecordDecl() const;
+
+ friend bool operator==(NestedNameSpecifier LHS, NestedNameSpecifier RHS) {
+ return LHS.StoredOrFlag == RHS.StoredOrFlag;
+ }
+ friend bool operator!=(NestedNameSpecifier LHS, NestedNameSpecifier RHS) {
+ return LHS.StoredOrFlag != RHS.StoredOrFlag;
+ }
+
+ /// Retrieves the "canonical" nested name specifier for a
+ /// given nested name specifier.
+ ///
+ /// The canonical nested name specifier is a nested name specifier
+ /// that uniquely identifies a type or namespace within the type
+ /// system. For example, given:
+ ///
+ /// \code
+ /// namespace N {
+ /// struct S {
+ /// template<typename T> struct X { typename T* type; };
+ /// };
+ /// }
+ ///
+ /// template<typename T> struct Y {
+ /// typename N::S::X<T>::type member;
+ /// };
+ /// \endcode
+ ///
+ /// Here, the nested-name-specifier for N::S::X<T>:: will be
+ /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined
+ /// by declarations in the type system and the canonical type for
+ /// the template type parameter 'T' is template-param-0-0.
+ inline NestedNameSpecifier getCanonical() const;
+
+ /// Whether this nested name specifier is canonical.
+ inline bool isCanonical() const;
+
+ /// Whether this nested name specifier starts with a '::'.
+ bool isFullyQualified() const;
+
+ NestedNameSpecifierDependence getDependence() const;
+
+ /// Whether this nested name specifier refers to a dependent
+ /// type or not.
+ bool isDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Dependent;
+ }
+
+ /// Whether this nested name specifier involves a template
+ /// parameter.
+ bool isInstantiationDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Instantiation;
+ }
+
+ /// Whether this nested-name-specifier contains an unexpanded
+ /// parameter pack (for C++11 variadic templates).
+ bool containsUnexpandedParameterPack() const {
+ return getDependence() & NestedNameSpecifierDependence::UnexpandedPack;
+ }
+
+ /// Whether this nested name specifier contains an error.
+ bool containsErrors() const {
+ return getDependence() & NestedNameSpecifierDependence::Error;
+ }
+
+ /// Print this nested name specifier to the given output stream. If
+ /// `ResolveTemplateArguments` is true, we'll print actual types, e.g.
+ /// `ns::SomeTemplate<int, MyClass>` instead of
+ /// `ns::SomeTemplate<Container::value_type, T>`.
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool ResolveTemplateArguments = false,
+ bool PrintFinalScopeResOp = true) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(StoredOrFlag);
+ }
+
+ /// Dump the nested name specifier to aid in debugging.
+ void dump(llvm::raw_ostream *OS = nullptr,
+ const LangOptions *LO = nullptr) const;
+ void dump(const LangOptions &LO) const;
+ void dump(llvm::raw_ostream &OS) const;
+ void dump(llvm::raw_ostream &OS, const LangOptions &LO) const;
+
+ static constexpr auto NumLowBitsAvailable = FlagOffset;
+};
+
+struct NamespaceAndPrefix {
+ const NamespaceBaseDecl *Namespace;
+ NestedNameSpecifier Prefix;
+};
+
+struct alignas(8) NamespaceAndPrefixStorage : NamespaceAndPrefix,
+ llvm::FoldingSetNode {
+ NamespaceAndPrefixStorage(const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix)
+ : NamespaceAndPrefix{Namespace, Prefix} {}
+ void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Namespace, Prefix); }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix) {
+ ID.AddPointer(Namespace);
+ Prefix.Profile(ID);
+ }
+};
+
+NamespaceAndPrefix NestedNameSpecifier::getAsNamespaceAndPrefix() const {
+ auto [Kind, Ptr] = getStored();
+ switch (Kind) {
+ case StoredKind::NamespaceOrSuper:
+ case StoredKind::NamespaceWithGlobal:
+ return {static_cast<const NamespaceBaseDecl *>(Ptr),
+ Kind == StoredKind::NamespaceWithGlobal
+ ? NestedNameSpecifier::getGlobal()
+ : std::nullopt};
+ case StoredKind::NamespaceWithNamespace:
+ return *static_cast<const NamespaceAndPrefixStorage *>(Ptr);
+ case StoredKind::Type:;
+ }
+ llvm_unreachable("unexpected stored kind");
+}
+
+struct NamespaceAndPrefixLoc;
+
+/// A C++ nested-name-specifier augmented with source location
+/// information.
+class NestedNameSpecifierLoc {
+ NestedNameSpecifier Qualifier = std::nullopt;
+ void *Data = nullptr;
+
+ /// Load a (possibly unaligned) source location from a given address
+ /// and offset.
+ SourceLocation LoadSourceLocation(unsigned Offset) const {
+ SourceLocation::UIntTy Raw;
+ memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(Raw));
+ return SourceLocation::getFromRawEncoding(Raw);
+ }
+
+ /// Load a (possibly unaligned) pointer from a given address and
+ /// offset.
+ void *LoadPointer(unsigned Offset) const {
+ void *Result;
+ memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void *));
+ return Result;
+ }
+
+ /// Determines the data length for the last component in the
+ /// given nested-name-specifier.
+ static inline unsigned getLocalDataLength(NestedNameSpecifier Qualifier);
+
+ /// Determines the data length for the entire
+ /// nested-name-specifier.
+ static inline unsigned getDataLength(NestedNameSpecifier Qualifier);
+
+public:
+ /// Construct an empty nested-name-specifier.
+ NestedNameSpecifierLoc() = default;
+
+ /// Construct a nested-name-specifier with source location information
+ /// from
+ NestedNameSpecifierLoc(NestedNameSpecifier Qualifier, void *Data)
+ : Qualifier(Qualifier), Data(Data) {}
+
+ /// Evaluates true when this nested-name-specifier location is
+ /// non-empty.
+ explicit operator bool() const { return bool(Qualifier); }
+
+ /// Evaluates true when this nested-name-specifier location is
+ /// non-empty.
+ bool hasQualifier() const { return bool(Qualifier); }
+
+ /// Retrieve the nested-name-specifier to which this instance
+ /// refers.
+ NestedNameSpecifier getNestedNameSpecifier() const { return Qualifier; }
+
+ /// Retrieve the opaque pointer that refers to source-location data.
+ void *getOpaqueData() const { return Data; }
+
+ /// Retrieve the source range covering the entirety of this
+ /// nested-name-specifier.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c \::std::vector<int>::, the returned source range would cover
+ /// from the initial '::' to the last '::'.
+ inline SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// Retrieve the source range covering just the last part of
+ /// this nested-name-specifier, not including the prefix.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c \::std::vector<int>::, the returned source range would cover
+ /// from "vector" to the last '::'.
+ inline SourceRange getLocalSourceRange() const;
+
+ /// Retrieve the location of the beginning of this
+ /// nested-name-specifier.
+ SourceLocation getBeginLoc() const;
+
+ /// Retrieve the location of the end of this
+ /// nested-name-specifier.
+ inline SourceLocation getEndLoc() const;
+
+ /// Retrieve the location of the beginning of this
+ /// component of the nested-name-specifier.
+ inline SourceLocation getLocalBeginLoc() const;
+
+ /// Retrieve the location of the end of this component of the
+ /// nested-name-specifier.
+ inline SourceLocation getLocalEndLoc() const;
+
+ /// For a nested-name-specifier that refers to a namespace,
+ /// retrieve the namespace and its prefix.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c \::std::chrono::, the prefix is \c \::std::. Note that the
+ /// returned prefix may be empty, if this is the first component of
+ /// the nested-name-specifier.
+ inline NamespaceAndPrefixLoc castAsNamespaceAndPrefix() const;
+ inline NamespaceAndPrefixLoc getAsNamespaceAndPrefix() const;
+
+ /// For a nested-name-specifier that refers to a type,
+ /// retrieve the type with source-location information.
+ inline TypeLoc castAsTypeLoc() const;
+ inline TypeLoc getAsTypeLoc() const;
+
+ /// Determines the data length for the entire
+ /// nested-name-specifier.
+ inline unsigned getDataLength() const;
+
+ friend bool operator==(NestedNameSpecifierLoc X, NestedNameSpecifierLoc Y) {
+ return X.Qualifier == Y.Qualifier && X.Data == Y.Data;
+ }
+
+ friend bool operator!=(NestedNameSpecifierLoc X, NestedNameSpecifierLoc Y) {
+ return !(X == Y);
+ }
+};
+
+struct NamespaceAndPrefixLoc {
+ const NamespaceBaseDecl *Namespace = nullptr;
+ NestedNameSpecifierLoc Prefix;
+
+ explicit operator bool() const { return Namespace != nullptr; }
+};
+
+/// Class that aids in the construction of nested-name-specifiers along
+/// with source-location information for all of the components of the
+/// nested-name-specifier.
+class NestedNameSpecifierLocBuilder {
+ /// The current representation of the nested-name-specifier we're
+ /// building.
+ NestedNameSpecifier Representation = std::nullopt;
+
+ /// Buffer used to store source-location information for the
+ /// nested-name-specifier.
+ ///
+ /// Note that we explicitly manage the buffer (rather than using a
+ /// SmallVector) because \c Declarator expects it to be possible to memcpy()
+ /// a \c CXXScopeSpec, and CXXScopeSpec uses a NestedNameSpecifierLocBuilder.
+ char *Buffer = nullptr;
+
+ /// The size of the buffer used to store source-location information
+ /// for the nested-name-specifier.
+ unsigned BufferSize = 0;
+
+ /// The capacity of the buffer used to store source-location
+ /// information for the nested-name-specifier.
+ unsigned BufferCapacity = 0;
+
+ void PushTrivial(ASTContext &Context, NestedNameSpecifier Qualifier,
+ SourceRange R);
+
+public:
+ NestedNameSpecifierLocBuilder() = default;
+ NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other);
+
+ NestedNameSpecifierLocBuilder &
+ operator=(const NestedNameSpecifierLocBuilder &Other);
+
+ ~NestedNameSpecifierLocBuilder() {
+ if (BufferCapacity)
+ free(Buffer);
+ }
+
+ /// Retrieve the representation of the nested-name-specifier.
+ NestedNameSpecifier getRepresentation() const { return Representation; }
+
+ /// Make a nested-name-specifier of the form 'type::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param TL The TypeLoc that describes the type preceding the '::'.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Make(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
+
+ /// Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'namespace::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Namespace The namespace.
+ ///
+ /// \param NamespaceLoc The location of the namespace name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, const NamespaceBaseDecl *Namespace,
+ SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
+
+ /// Turn this (empty) nested-name-specifier into the global
+ /// nested-name-specifier '::'.
+ void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
+
+ /// Turns this (empty) nested-name-specifier into '__super'
+ /// nested-name-specifier.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param RD The declaration of the class in which nested-name-specifier
+ /// appeared.
+ ///
+ /// \param SuperLoc The location of the '__super' keyword.
+ /// name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void MakeMicrosoftSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc);
+
+ /// Make a new nested-name-specifier from incomplete source-location
+ /// information.
+ ///
+ /// This routine should be used very, very rarely, in cases where we
+ /// need to synthesize a nested-name-specifier. Most code should instead use
+ /// \c Adopt() with a proper \c NestedNameSpecifierLoc.
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier Qualifier,
+ SourceRange R) {
+ Representation = Qualifier;
+ BufferSize = 0;
+ PushTrivial(Context, Qualifier, R);
+ }
+
+ /// Adopt an existing nested-name-specifier (with source-range
+ /// information).
+ void Adopt(NestedNameSpecifierLoc Other);
+
+ /// Retrieve the source range covered by this nested-name-specifier.
+ inline SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// Retrieve a nested-name-specifier with location information,
+ /// copied into the given AST context.
+ ///
+ /// \param Context The context into which this nested-name-specifier will be
+ /// copied.
+ NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
+
+ /// Retrieve a nested-name-specifier with location
+ /// information based on the information in this builder.
+ ///
+ /// This loc will contain references to the builder's internal data and may
+ /// be invalidated by any change to the builder.
+ NestedNameSpecifierLoc getTemporary() const {
+ return NestedNameSpecifierLoc(Representation, Buffer);
+ }
+
+ /// Clear out this builder, and prepare it to build another
+ /// nested-name-specifier with source-location information.
+ void Clear() {
+ Representation = std::nullopt;
+ BufferSize = 0;
+ }
+
+ /// Retrieve the underlying buffer.
+ ///
+ /// \returns A pair containing a pointer to the buffer of source-location
+ /// data and the size of the source-location data that resides in that
+ /// buffer.
+ std::pair<char *, unsigned> getBuffer() const {
+ return std::make_pair(Buffer, BufferSize);
+ }
+};
+
+/// Insertion operator for diagnostics. This allows sending
+/// NestedNameSpecifiers into a diagnostic with <<.
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ NestedNameSpecifier NNS) {
+ DB.AddTaggedVal(reinterpret_cast<uintptr_t>(NNS.getAsVoidPointer()),
+ DiagnosticsEngine::ak_nestednamespec);
+ return DB;
+}
+
+} // namespace clang
+
+namespace llvm {
+
+template <> struct PointerLikeTypeTraits<clang::NestedNameSpecifier> {
+ static void *getAsVoidPointer(clang::NestedNameSpecifier P) {
+ return P.getAsVoidPointer();
+ }
+ static clang::NestedNameSpecifier getFromVoidPointer(const void *P) {
+ return clang::NestedNameSpecifier::getFromVoidPointer(P);
+ }
+ static constexpr int NumLowBitsAvailable =
+ clang::NestedNameSpecifier::NumLowBitsAvailable;
+};
+
+} // namespace llvm
+
+#endif // LLVM_CLANG_AST_NESTEDNAMESPECIFIERBASE_H
diff --git a/clang/include/clang/AST/ODRHash.h b/clang/include/clang/AST/ODRHash.h
index 11f917a..ae3fab6 100644
--- a/clang/include/clang/AST/ODRHash.h
+++ b/clang/include/clang/AST/ODRHash.h
@@ -93,7 +93,7 @@ public:
void AddQualType(QualType T);
void AddStmt(const Stmt *S);
void AddIdentifierInfo(const IdentifierInfo *II);
- void AddNestedNameSpecifier(const NestedNameSpecifier *NNS);
+ void AddNestedNameSpecifier(NestedNameSpecifier NNS);
void AddDependentTemplateName(const DependentTemplateStorage &Name);
void AddTemplateName(TemplateName Name);
void AddDeclarationNameInfo(DeclarationNameInfo NameInfo,
diff --git a/clang/include/clang/AST/PrettyPrinter.h b/clang/include/clang/AST/PrettyPrinter.h
index 875769c..fd995a6 100644
--- a/clang/include/clang/AST/PrettyPrinter.h
+++ b/clang/include/clang/AST/PrettyPrinter.h
@@ -63,9 +63,9 @@ struct PrintingPolicy {
SuppressTagKeyword(LO.CPlusPlus), IncludeTagDefinition(false),
SuppressScope(false), SuppressUnwrittenScope(false),
SuppressInlineNamespace(SuppressInlineNamespaceMode::Redundant),
- SuppressElaboration(false), SuppressInitializers(false),
- ConstantArraySizeAsWritten(false), AnonymousTagLocations(true),
- SuppressStrongLifetime(false), SuppressLifetimeQualifiers(false),
+ SuppressInitializers(false), ConstantArraySizeAsWritten(false),
+ AnonymousTagLocations(true), SuppressStrongLifetime(false),
+ SuppressLifetimeQualifiers(false),
SuppressTemplateArgsInCXXConstructors(false),
SuppressDefaultTemplateArgs(true), Bool(LO.Bool),
Nullptr(LO.CPlusPlus11 || LO.C23), NullptrTypeInNamespace(LO.CPlusPlus),
@@ -150,11 +150,6 @@ struct PrintingPolicy {
LLVM_PREFERRED_TYPE(SuppressInlineNamespaceMode)
unsigned SuppressInlineNamespace : 2;
- /// Ignore qualifiers and tag keywords as specified by elaborated type sugar,
- /// instead letting the underlying type print as normal.
- LLVM_PREFERRED_TYPE(bool)
- unsigned SuppressElaboration : 1;
-
/// Suppress printing of variable initializers.
///
/// This flag is used when printing the loop variable in a for-range
diff --git a/clang/include/clang/AST/PropertiesBase.td b/clang/include/clang/AST/PropertiesBase.td
index 0438e4d..5b10127 100644
--- a/clang/include/clang/AST/PropertiesBase.td
+++ b/clang/include/clang/AST/PropertiesBase.td
@@ -127,9 +127,8 @@ def LValuePathSerializationHelper :
PropertyType<"APValue::LValuePathSerializationHelper"> {
let BufferElementTypes = [ LValuePathEntry ];
}
-def NestedNameSpecifier : PropertyType<"NestedNameSpecifier *">;
-def NestedNameSpecifierKind :
- EnumPropertyType<"NestedNameSpecifier::SpecifierKind">;
+def NestedNameSpecifier : PropertyType<"NestedNameSpecifier">;
+def NestedNameSpecifierKind : EnumPropertyType<"NestedNameSpecifier::Kind">;
def OverloadedOperatorKind : EnumPropertyType;
def Qualifiers : PropertyType;
def QualType : DefaultValuePropertyType;
diff --git a/clang/include/clang/AST/QualTypeNames.h b/clang/include/clang/AST/QualTypeNames.h
index daa86cd..9f5cf04 100644
--- a/clang/include/clang/AST/QualTypeNames.h
+++ b/clang/include/clang/AST/QualTypeNames.h
@@ -87,6 +87,16 @@ std::string getFullyQualifiedName(QualType QT, const ASTContext &Ctx,
/// specifier "::" should be prepended or not.
QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
bool WithGlobalNsPrefix = false);
+
+/// Get the fully qualified name for the declared context of a declaration.
+///
+/// \param[in] Ctx - the ASTContext to be used.
+/// \param[in] Decl - the declaration for which to get the fully qualified name.
+/// \param[in] WithGlobalNsPrefix - If true, then the global namespace
+/// specifier "::" will be prepended to the fully qualified name.
+NestedNameSpecifier
+getFullyQualifiedDeclaredContext(const ASTContext &Ctx, const Decl *Decl,
+ bool WithGlobalNsPrefix = false);
} // end namespace TypeName
} // end namespace clang
#endif // LLVM_CLANG_AST_QUALTYPENAMES_H
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 62991d9..248b892 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -216,14 +216,14 @@ public:
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type).
- bool TraverseType(QualType T);
+ bool TraverseType(QualType T, bool TraverseQualifier = true);
/// Recursively visit a type with location, by dispatching to
/// Traverse*TypeLoc() based on the argument type's getTypeClass() property.
///
/// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type location).
- bool TraverseTypeLoc(TypeLoc TL);
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true);
/// Recursively visit an attribute, by dispatching to
/// Traverse*Attr() based on the argument's dynamic type.
@@ -242,7 +242,7 @@ public:
/// Recursively visit a C++ nested-name-specifier.
///
/// \returns false if the visitation was terminated early, true otherwise.
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS);
/// Recursively visit a C++ nested-name-specifier with location
/// information.
@@ -389,7 +389,8 @@ public:
// Declare Traverse*() for all concrete Type classes.
#define ABSTRACT_TYPE(CLASS, BASE)
-#define TYPE(CLASS, BASE) bool Traverse##CLASS##Type(CLASS##Type *T);
+#define TYPE(CLASS, BASE) \
+ bool Traverse##CLASS##Type(CLASS##Type *T, bool TraverseQualifier);
#include "clang/AST/TypeNodes.inc"
// The above header #undefs ABSTRACT_TYPE and TYPE upon exit.
@@ -410,7 +411,8 @@ public:
// Declare Traverse*() for all concrete TypeLoc classes.
#define ABSTRACT_TYPELOC(CLASS, BASE)
-#define TYPELOC(CLASS, BASE) bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL);
+#define TYPELOC(CLASS, BASE) \
+ bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL, bool TraverseQualifier);
#include "clang/AST/TypeLocNodes.def"
// The above header #undefs ABSTRACT_TYPELOC and TYPELOC upon exit.
@@ -499,6 +501,8 @@ private:
bool TraverseOMPExecutableDirective(OMPExecutableDirective *S);
bool TraverseOMPLoopDirective(OMPLoopDirective *S);
bool TraverseOMPClause(OMPClause *C);
+ bool TraverseTagType(TagType *T, bool TraverseQualifier);
+ bool TraverseTagTypeLoc(TagTypeLoc TL, bool TraverseQualifier);
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) bool Visit##Class(Class *C);
#include "llvm/Frontend/OpenMP/OMP.inc"
@@ -698,7 +702,8 @@ RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S, DataRecursionQueue *Queue) {
}
template <typename Derived>
-bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
+bool RecursiveASTVisitor<Derived>::TraverseType(QualType T,
+ bool TraverseQualifier) {
if (T.isNull())
return true;
@@ -707,7 +712,8 @@ bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
#define TYPE(CLASS, BASE) \
case Type::CLASS: \
return getDerived().Traverse##CLASS##Type( \
- static_cast<CLASS##Type *>(const_cast<Type *>(T.getTypePtr())));
+ static_cast<CLASS##Type *>(const_cast<Type *>(T.getTypePtr())), \
+ TraverseQualifier);
#include "clang/AST/TypeNodes.inc"
}
@@ -715,7 +721,8 @@ bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
}
template <typename Derived>
-bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL) {
+bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL,
+ bool TraverseQualifier) {
if (TL.isNull())
return true;
@@ -723,7 +730,8 @@ bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL) {
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
case TypeLoc::CLASS: \
- return getDerived().Traverse##CLASS##TypeLoc(TL.castAs<CLASS##TypeLoc>());
+ return getDerived().Traverse##CLASS##TypeLoc(TL.castAs<CLASS##TypeLoc>(), \
+ TraverseQualifier);
#include "clang/AST/TypeLocNodes.def"
}
@@ -779,46 +787,43 @@ bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifier(
- NestedNameSpecifier *NNS) {
- if (!NNS)
+ NestedNameSpecifier NNS) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return true;
-
- if (NNS->getPrefix())
- TRY_TO(TraverseNestedNameSpecifier(NNS->getPrefix()));
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Namespace:
+ TRY_TO(TraverseNestedNameSpecifier(NNS.getAsNamespaceAndPrefix().Prefix));
+ return true;
+ case NestedNameSpecifier::Kind::Type: {
+ auto *T = const_cast<Type *>(NNS.getAsType());
+ TRY_TO(TraverseNestedNameSpecifier(T->getPrefix()));
+ TRY_TO(TraverseType(QualType(T, 0), /*TraverseQualifier=*/false));
return true;
-
- case NestedNameSpecifier::TypeSpec:
- TRY_TO(TraverseType(QualType(NNS->getAsType(), 0)));
}
-
- return true;
+ }
+ llvm_unreachable("unhandled kind");
}
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifierLoc(
NestedNameSpecifierLoc NNS) {
- if (!NNS)
+ switch (NNS.getNestedNameSpecifier().getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return true;
-
- if (NestedNameSpecifierLoc Prefix = NNS.getPrefix())
- TRY_TO(TraverseNestedNameSpecifierLoc(Prefix));
-
- switch (NNS.getNestedNameSpecifier()->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Namespace:
+ TRY_TO(
+ TraverseNestedNameSpecifierLoc(NNS.castAsNamespaceAndPrefix().Prefix));
return true;
-
- case NestedNameSpecifier::TypeSpec:
- TRY_TO(TraverseTypeLoc(NNS.getTypeLoc()));
- break;
+ case NestedNameSpecifier::Kind::Type: {
+ TypeLoc TL = NNS.castAsTypeLoc();
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getPrefix()));
+ TRY_TO(TraverseTypeLoc(TL, /*TraverseQualifier=*/false));
+ return true;
+ }
}
return true;
@@ -975,10 +980,13 @@ RecursiveASTVisitor<Derived>::TraverseLambdaCapture(LambdaExpr *LE,
// This macro makes available a variable T, the passed-in type.
#define DEF_TRAVERSE_TYPE(TYPE, CODE) \
template <typename Derived> \
- bool RecursiveASTVisitor<Derived>::Traverse##TYPE(TYPE *T) { \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE(TYPE *T, \
+ bool TraverseQualifier) { \
if (!getDerived().shouldTraversePostOrder()) \
TRY_TO(WalkUpFrom##TYPE(T)); \
- { CODE; } \
+ { \
+ CODE; \
+ } \
if (getDerived().shouldTraversePostOrder()) \
TRY_TO(WalkUpFrom##TYPE(T)); \
return true; \
@@ -1000,10 +1008,11 @@ DEF_TRAVERSE_TYPE(RValueReferenceType,
{ TRY_TO(TraverseType(T->getPointeeType())); })
DEF_TRAVERSE_TYPE(MemberPointerType, {
- TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
- if (T->isSugared())
- TRY_TO(TraverseType(
- QualType(T->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0)));
+ NestedNameSpecifier Qualifier =
+ T->isSugared() ? cast<MemberPointerType>(T->getCanonicalTypeUnqualified())
+ ->getQualifier()
+ : T->getQualifier();
+ TRY_TO(TraverseNestedNameSpecifier(Qualifier));
TRY_TO(TraverseType(T->getPointeeType()));
})
@@ -1087,9 +1096,18 @@ DEF_TRAVERSE_TYPE(FunctionProtoType, {
TRY_TO(TraverseStmt(NE));
})
-DEF_TRAVERSE_TYPE(UsingType, {})
-DEF_TRAVERSE_TYPE(UnresolvedUsingType, {})
-DEF_TRAVERSE_TYPE(TypedefType, {})
+DEF_TRAVERSE_TYPE(UsingType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
+DEF_TRAVERSE_TYPE(UnresolvedUsingType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
+DEF_TRAVERSE_TYPE(TypedefType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
DEF_TRAVERSE_TYPE(TypeOfExprType,
{ TRY_TO(TraverseStmt(T->getUnderlyingExpr())); })
@@ -1115,13 +1133,7 @@ DEF_TRAVERSE_TYPE(AutoType, {
TRY_TO(TraverseTemplateArguments(T->getTypeConstraintArguments()));
}
})
-DEF_TRAVERSE_TYPE(DeducedTemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(T->getTemplateName()));
- TRY_TO(TraverseType(T->getDeducedType()));
-})
-DEF_TRAVERSE_TYPE(RecordType, {})
-DEF_TRAVERSE_TYPE(EnumType, {})
DEF_TRAVERSE_TYPE(TemplateTypeParmType, {})
DEF_TRAVERSE_TYPE(SubstTemplateTypeParmType, {
TRY_TO(TraverseType(T->getReplacementType()));
@@ -1130,13 +1142,6 @@ DEF_TRAVERSE_TYPE(SubstTemplateTypeParmPackType, {
TRY_TO(TraverseTemplateArgument(T->getArgumentPack()));
})
-DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(T->getTemplateName()));
- TRY_TO(TraverseTemplateArguments(T->template_arguments()));
-})
-
-DEF_TRAVERSE_TYPE(InjectedClassNameType, {})
-
DEF_TRAVERSE_TYPE(AttributedType,
{ TRY_TO(TraverseType(T->getModifiedType())); })
@@ -1165,22 +1170,54 @@ DEF_TRAVERSE_TYPE(ParenType, { TRY_TO(TraverseType(T->getInnerType())); })
DEF_TRAVERSE_TYPE(MacroQualifiedType,
{ TRY_TO(TraverseType(T->getUnderlyingType())); })
-DEF_TRAVERSE_TYPE(ElaboratedType, {
- if (T->getQualifier()) {
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTagType(TagType *T,
+ bool TraverseQualifier) {
+ if (TraverseQualifier)
TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
- }
- TRY_TO(TraverseType(T->getNamedType()));
-})
+ return true;
+}
+
+DEF_TRAVERSE_TYPE(EnumType, { TRY_TO(TraverseTagType(T, TraverseQualifier)); })
+DEF_TRAVERSE_TYPE(RecordType,
+ { TRY_TO(TraverseTagType(T, TraverseQualifier)); })
+DEF_TRAVERSE_TYPE(InjectedClassNameType,
+ { TRY_TO(TraverseTagType(T, TraverseQualifier)); })
-DEF_TRAVERSE_TYPE(DependentNameType,
- { TRY_TO(TraverseNestedNameSpecifier(T->getQualifier())); })
+DEF_TRAVERSE_TYPE(DependentNameType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+})
DEF_TRAVERSE_TYPE(DependentTemplateSpecializationType, {
const DependentTemplateStorage &S = T->getDependentTemplateName();
- TRY_TO(TraverseNestedNameSpecifier(S.getQualifier()));
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifier(S.getQualifier()));
TRY_TO(TraverseTemplateArguments(T->template_arguments()));
})
+DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
+ if (TraverseQualifier) {
+ TRY_TO(TraverseTemplateName(T->getTemplateName()));
+ } else {
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(TraverseTemplateName(TemplateName(
+ T->getTemplateName().getAsTemplateDecl(/*IgnoreDeduced=*/true))));
+ }
+ TRY_TO(TraverseTemplateArguments(T->template_arguments()));
+})
+
+DEF_TRAVERSE_TYPE(DeducedTemplateSpecializationType, {
+ if (TraverseQualifier) {
+ TRY_TO(TraverseTemplateName(T->getTemplateName()));
+ } else {
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(TraverseTemplateName(TemplateName(
+ T->getTemplateName().getAsTemplateDecl(/*IgnoreDeduced=*/true))));
+ }
+ TRY_TO(TraverseType(T->getDeducedType()));
+})
+
DEF_TRAVERSE_TYPE(PackExpansionType, { TRY_TO(TraverseType(T->getPattern())); })
DEF_TRAVERSE_TYPE(ObjCTypeParamType, {})
@@ -1221,13 +1258,16 @@ DEF_TRAVERSE_TYPE(PredefinedSugarType, {})
// continue to work.
#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
template <typename Derived> \
- bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc(TYPE##Loc TL) { \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc( \
+ TYPE##Loc TL, bool TraverseQualifier) { \
if (!getDerived().shouldTraversePostOrder()) { \
TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
if (getDerived().shouldWalkTypesOfTypeLocs()) \
TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE *>(TL.getTypePtr()))); \
} \
- { CODE; } \
+ { \
+ CODE; \
+ } \
if (getDerived().shouldTraversePostOrder()) { \
TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
if (getDerived().shouldWalkTypesOfTypeLocs()) \
@@ -1237,8 +1277,10 @@ DEF_TRAVERSE_TYPE(PredefinedSugarType, {})
}
template <typename Derived>
-bool
-RecursiveASTVisitor<Derived>::TraverseQualifiedTypeLoc(QualifiedTypeLoc TL) {
+bool RecursiveASTVisitor<Derived>::TraverseQualifiedTypeLoc(
+ QualifiedTypeLoc TL, bool TraverseQualifier) {
+ assert(TraverseQualifier &&
+ "Qualifiers should never occur within NestedNameSpecifiers");
// Move this over to the 'main' typeloc tree. Note that this is a
// move -- we pretend that we were really looking at the unqualified
// typeloc all along -- rather than a recursion, so we don't follow
@@ -1391,9 +1433,21 @@ DEF_TRAVERSE_TYPELOC(FunctionProtoType, {
TRY_TO(TraverseStmt(NE));
})
-DEF_TRAVERSE_TYPELOC(UsingType, {})
-DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, {})
-DEF_TRAVERSE_TYPELOC(TypedefType, {})
+DEF_TRAVERSE_TYPELOC(UsingType, {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+})
+DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+})
+DEF_TRAVERSE_TYPELOC(TypedefType, {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+})
DEF_TRAVERSE_TYPELOC(TypeOfExprType,
{ TRY_TO(TraverseStmt(TL.getUnderlyingExpr())); })
@@ -1423,13 +1477,6 @@ DEF_TRAVERSE_TYPELOC(AutoType, {
}
})
-DEF_TRAVERSE_TYPELOC(DeducedTemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(TL.getTypePtr()->getTemplateName()));
- TRY_TO(TraverseType(TL.getTypePtr()->getDeducedType()));
-})
-
-DEF_TRAVERSE_TYPELOC(RecordType, {})
-DEF_TRAVERSE_TYPELOC(EnumType, {})
DEF_TRAVERSE_TYPELOC(TemplateTypeParmType, {})
DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmType, {
TRY_TO(TraverseType(TL.getTypePtr()->getReplacementType()));
@@ -1438,16 +1485,6 @@ DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmPackType, {
TRY_TO(TraverseTemplateArgument(TL.getTypePtr()->getArgumentPack()));
})
-// FIXME: use the loc for the template name?
-DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
- TRY_TO(TraverseTemplateName(TL.getTypePtr()->getTemplateName()));
- for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
- TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
- }
-})
-
-DEF_TRAVERSE_TYPELOC(InjectedClassNameType, {})
-
DEF_TRAVERSE_TYPELOC(ParenType, { TRY_TO(TraverseTypeLoc(TL.getInnerLoc())); })
DEF_TRAVERSE_TYPELOC(MacroQualifiedType,
@@ -1468,27 +1505,63 @@ DEF_TRAVERSE_TYPELOC(HLSLAttributedResourceType,
DEF_TRAVERSE_TYPELOC(HLSLInlineSpirvType,
{ TRY_TO(TraverseType(TL.getType())); })
-DEF_TRAVERSE_TYPELOC(ElaboratedType, {
- if (TL.getQualifierLoc()) {
- TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
- }
- TRY_TO(TraverseTypeLoc(TL.getNamedTypeLoc()));
-})
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTagTypeLoc(TagTypeLoc TL,
+ bool TraverseQualifier) {
+ if (NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TraverseQualifier && QualifierLoc)
+ TRY_TO(TraverseNestedNameSpecifierLoc(QualifierLoc));
+ return true;
+}
+
+DEF_TRAVERSE_TYPELOC(EnumType,
+ { TRY_TO(TraverseTagTypeLoc(TL, TraverseQualifier)); })
+DEF_TRAVERSE_TYPELOC(RecordType,
+ { TRY_TO(TraverseTagTypeLoc(TL, TraverseQualifier)); })
+DEF_TRAVERSE_TYPELOC(InjectedClassNameType,
+ { TRY_TO(TraverseTagTypeLoc(TL, TraverseQualifier)); })
DEF_TRAVERSE_TYPELOC(DependentNameType, {
- TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
})
DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
- if (TL.getQualifierLoc()) {
+ if (TraverseQualifier)
TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
}
+})
+
+DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(TraverseTemplateName(
+ TemplateName(TL.getTypePtr()->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true))));
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
}
})
+DEF_TRAVERSE_TYPELOC(DeducedTemplateSpecializationType, {
+ if (TraverseQualifier)
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+
+ const auto *T = TL.getTypePtr();
+ // FIXME: Try to preserve the rest of the template name.
+ TRY_TO(
+ TraverseTemplateName(TemplateName(T->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true))));
+
+ TRY_TO(TraverseType(T->getDeducedType()));
+})
+
DEF_TRAVERSE_TYPELOC(PackExpansionType,
{ TRY_TO(TraverseTypeLoc(TL.getPatternLoc())); })
@@ -1631,8 +1704,9 @@ DEF_TRAVERSE_DECL(FriendDecl, {
TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
// Traverse any CXXRecordDecl owned by this type, since
// it will not be in the parent context:
- if (auto *ET = D->getFriendType()->getType()->getAs<ElaboratedType>())
- TRY_TO(TraverseDecl(ET->getOwnedTagDecl()));
+ if (auto *TT = D->getFriendType()->getType()->getAs<TagType>();
+ TT && TT->isTagOwned())
+ TRY_TO(TraverseDecl(TT->getOriginalDecl()));
} else {
TRY_TO(TraverseDecl(D->getFriendDecl()));
}
diff --git a/clang/include/clang/AST/TemplateBase.h b/clang/include/clang/AST/TemplateBase.h
index eb384ea..69481e8 100644
--- a/clang/include/clang/AST/TemplateBase.h
+++ b/clang/include/clang/AST/TemplateBase.h
@@ -15,7 +15,7 @@
#define LLVM_CLANG_AST_TEMPLATEBASE_H
#include "clang/AST/DependenceFlags.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
@@ -478,31 +478,25 @@ public:
/// Location information for a TemplateArgument.
struct TemplateArgumentLocInfo {
-private:
struct TemplateTemplateArgLocInfo {
- // FIXME: We'd like to just use the qualifier in the TemplateName,
- // but template arguments get canonicalized too quickly.
- NestedNameSpecifier *Qualifier;
void *QualifierLocData;
+ SourceLocation TemplateKwLoc;
SourceLocation TemplateNameLoc;
SourceLocation EllipsisLoc;
};
- llvm::PointerUnion<TemplateTemplateArgLocInfo *, Expr *, TypeSourceInfo *>
- Pointer;
-
TemplateTemplateArgLocInfo *getTemplate() const {
return cast<TemplateTemplateArgLocInfo *>(Pointer);
}
-public:
TemplateArgumentLocInfo() {}
TemplateArgumentLocInfo(TypeSourceInfo *Declarator) { Pointer = Declarator; }
TemplateArgumentLocInfo(Expr *E) { Pointer = E; }
// Ctx is used for allocation -- this case is unusually large and also rare,
// so we store the payload out-of-line.
- TemplateArgumentLocInfo(ASTContext &Ctx, NestedNameSpecifierLoc QualifierLoc,
+ TemplateArgumentLocInfo(ASTContext &Ctx, SourceLocation TemplateKwLoc,
+ NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateNameLoc,
SourceLocation EllipsisLoc);
@@ -512,10 +506,8 @@ public:
Expr *getAsExpr() const { return cast<Expr *>(Pointer); }
- NestedNameSpecifierLoc getTemplateQualifierLoc() const {
- const auto *Template = getTemplate();
- return NestedNameSpecifierLoc(Template->Qualifier,
- Template->QualifierLocData);
+ SourceLocation getTemplateKwLoc() const {
+ return getTemplate()->TemplateKwLoc;
}
SourceLocation getTemplateNameLoc() const {
@@ -525,6 +517,10 @@ public:
SourceLocation getTemplateEllipsisLoc() const {
return getTemplate()->EllipsisLoc;
}
+
+private:
+ llvm::PointerUnion<TemplateTemplateArgLocInfo *, Expr *, TypeSourceInfo *>
+ Pointer;
};
/// Location wrapper for a TemplateArgument. TemplateArgument is to
@@ -558,14 +554,10 @@ public:
}
TemplateArgumentLoc(ASTContext &Ctx, const TemplateArgument &Argument,
+ SourceLocation TemplateKWLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateNameLoc,
- SourceLocation EllipsisLoc = SourceLocation())
- : Argument(Argument),
- LocInfo(Ctx, QualifierLoc, TemplateNameLoc, EllipsisLoc) {
- assert(Argument.getKind() == TemplateArgument::Template ||
- Argument.getKind() == TemplateArgument::TemplateExpansion);
- }
+ SourceLocation EllipsisLoc = SourceLocation());
/// - Fetches the primary location of the argument.
SourceLocation getLocation() const {
@@ -614,13 +606,15 @@ public:
return LocInfo.getAsExpr();
}
- NestedNameSpecifierLoc getTemplateQualifierLoc() const {
+ SourceLocation getTemplateKWLoc() const {
if (Argument.getKind() != TemplateArgument::Template &&
Argument.getKind() != TemplateArgument::TemplateExpansion)
- return NestedNameSpecifierLoc();
- return LocInfo.getTemplateQualifierLoc();
+ return SourceLocation();
+ return LocInfo.getTemplateKwLoc();
}
+ NestedNameSpecifierLoc getTemplateQualifierLoc() const;
+
SourceLocation getTemplateNameLoc() const {
if (Argument.getKind() != TemplateArgument::Template &&
Argument.getKind() != TemplateArgument::TemplateExpansion)
diff --git a/clang/include/clang/AST/TemplateName.h b/clang/include/clang/AST/TemplateName.h
index 63949f8..37ea401 100644
--- a/clang/include/clang/AST/TemplateName.h
+++ b/clang/include/clang/AST/TemplateName.h
@@ -14,7 +14,7 @@
#define LLVM_CLANG_AST_TEMPLATENAME_H
#include "clang/AST/DependenceFlags.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/UnsignedOrNone.h"
@@ -335,10 +335,18 @@ public:
/// structure, if any.
QualifiedTemplateName *getAsQualifiedTemplateName() const;
+ /// Retrieve the underlying qualified template name,
+ /// looking through underlying nodes.
+ QualifiedTemplateName *getAsAdjustedQualifiedTemplateName() const;
+
/// Retrieve the underlying dependent template name
/// structure, if any.
DependentTemplateName *getAsDependentTemplateName() const;
+ // Retrieve the qualifier stored in either a underlying DependentTemplateName
+ // or QualifiedTemplateName.
+ NestedNameSpecifier getQualifier() const;
+
/// Retrieve the using shadow declaration through which the underlying
/// template declaration is introduced, if any.
UsingShadowDecl *getAsUsingShadowDecl() const;
@@ -503,7 +511,7 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
/// "template" keyword is always redundant in this case (otherwise,
/// the template name would be a dependent name and we would express
/// this name with DependentTemplateName).
- llvm::PointerIntPair<NestedNameSpecifier *, 1> Qualifier;
+ llvm::PointerIntPair<NestedNameSpecifier, 1, bool> Qualifier;
/// The underlying template name, it is either
/// 1) a Template -- a template declaration that this qualified name refers
@@ -512,7 +520,7 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
/// using-shadow declaration.
TemplateName UnderlyingTemplate;
- QualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword,
+ QualifiedTemplateName(NestedNameSpecifier NNS, bool TemplateKeyword,
TemplateName Template)
: Qualifier(NNS, TemplateKeyword ? 1 : 0), UnderlyingTemplate(Template) {
assert(UnderlyingTemplate.getKind() == TemplateName::Template ||
@@ -521,7 +529,7 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
public:
/// Return the nested name specifier that qualifies this name.
- NestedNameSpecifier *getQualifier() const { return Qualifier.getPointer(); }
+ NestedNameSpecifier getQualifier() const { return Qualifier.getPointer(); }
/// Whether the template name was prefixed by the "template"
/// keyword.
@@ -534,9 +542,9 @@ public:
Profile(ID, getQualifier(), hasTemplateKeyword(), UnderlyingTemplate);
}
- static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier NNS,
bool TemplateKeyword, TemplateName TN) {
- ID.AddPointer(NNS);
+ NNS.Profile(ID);
ID.AddBoolean(TemplateKeyword);
ID.AddPointer(TN.getAsVoidPointer());
}
@@ -585,18 +593,18 @@ class DependentTemplateStorage {
///
/// The bit stored in this qualifier describes whether the \c Name field
/// was preceeded by a template keyword.
- llvm::PointerIntPair<NestedNameSpecifier *, 1, bool> Qualifier;
+ llvm::PointerIntPair<NestedNameSpecifier, 1, bool> Qualifier;
/// The dependent template name.
IdentifierOrOverloadedOperator Name;
public:
- DependentTemplateStorage(NestedNameSpecifier *Qualifier,
+ DependentTemplateStorage(NestedNameSpecifier Qualifier,
IdentifierOrOverloadedOperator Name,
bool HasTemplateKeyword);
/// Return the nested name specifier that qualifies this name.
- NestedNameSpecifier *getQualifier() const { return Qualifier.getPointer(); }
+ NestedNameSpecifier getQualifier() const { return Qualifier.getPointer(); }
IdentifierOrOverloadedOperator getName() const { return Name; }
@@ -609,10 +617,10 @@ public:
Profile(ID, getQualifier(), getName(), hasTemplateKeyword());
}
- static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier NNS,
IdentifierOrOverloadedOperator Name,
bool HasTemplateKeyword) {
- ID.AddPointer(NNS);
+ NNS.Profile(ID);
ID.AddBoolean(HasTemplateKeyword);
Name.Profile(ID);
}
diff --git a/clang/include/clang/AST/TextNodeDumper.h b/clang/include/clang/AST/TextNodeDumper.h
index 1917a8a..6d279511 100644
--- a/clang/include/clang/AST/TextNodeDumper.h
+++ b/clang/include/clang/AST/TextNodeDumper.h
@@ -211,7 +211,7 @@ public:
void dumpAccessSpecifier(AccessSpecifier AS);
void dumpCleanupObject(const ExprWithCleanups::CleanupObject &C);
void dumpTemplateSpecializationKind(TemplateSpecializationKind TSK);
- void dumpNestedNameSpecifier(const NestedNameSpecifier *NNS);
+ void dumpNestedNameSpecifier(NestedNameSpecifier NNS);
void dumpConceptReference(const ConceptReference *R);
void dumpTemplateArgument(const TemplateArgument &TA);
void dumpBareTemplateName(TemplateName TN);
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index 12dce30..c4c23c8 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -18,7 +18,7 @@
#define LLVM_CLANG_AST_TYPE_H
#include "clang/AST/DependenceFlags.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/AttrKinds.h"
@@ -133,6 +133,7 @@ struct PrintingPolicy;
class RecordDecl;
class Stmt;
class TagDecl;
+class ClassTemplateDecl;
class TemplateArgument;
class TemplateArgumentListInfo;
class TemplateArgumentLoc;
@@ -2044,8 +2045,8 @@ protected:
unsigned InnerRef : 1;
};
- class TypeWithKeywordBitfields {
- friend class TypeWithKeyword;
+ class KeywordWrapperBitfields {
+ template <class> friend class KeywordWrapper;
LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
@@ -2057,15 +2058,23 @@ protected:
enum { NumTypeWithKeywordBits = NumTypeBits + 8 };
- class ElaboratedTypeBitfields {
- friend class ElaboratedType;
+ class TagTypeBitfields {
+ friend class TagType;
- LLVM_PREFERRED_TYPE(TypeWithKeywordBitfields)
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
unsigned : NumTypeWithKeywordBits;
- /// Whether the ElaboratedType has a trailing OwnedTagDecl.
+ /// Whether the TagType has a trailing Qualifier.
LLVM_PREFERRED_TYPE(bool)
- unsigned HasOwnedTagDecl : 1;
+ unsigned HasQualifier : 1;
+
+ /// Whether the TagType owns the Tag.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned OwnsTag : 1;
+
+ /// Whether the TagType was created from an injected name.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsInjected : 1;
};
class VectorTypeBitfields {
@@ -2124,22 +2133,37 @@ protected:
unsigned Kind : 1;
};
+ class UnresolvedUsingBitfields {
+ friend class UnresolvedUsingType;
+
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// True if there is a non-null qualifier.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasQualifier : 1;
+ };
+
class UsingBitfields {
friend class UsingType;
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
- /// True if the underlying type is different from the declared one.
+ /// True if there is a non-null qualifier.
LLVM_PREFERRED_TYPE(bool)
- unsigned hasTypeDifferentFromDecl : 1;
+ unsigned hasQualifier : 1;
};
class TypedefBitfields {
friend class TypedefType;
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
+
+ /// True if there is a non-null qualifier.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasQualifier : 1;
/// True if the underlying type is different from the declared one.
LLVM_PREFERRED_TYPE(bool)
@@ -2205,8 +2229,8 @@ protected:
class TemplateSpecializationTypeBitfields {
friend class TemplateSpecializationType;
- LLVM_PREFERRED_TYPE(TypeBitfields)
- unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
+ unsigned : NumTypeWithKeywordBits;
/// Whether this template specialization type is a substituted type alias.
LLVM_PREFERRED_TYPE(bool)
@@ -2225,7 +2249,7 @@ protected:
class DependentTemplateSpecializationTypeBitfields {
friend class DependentTemplateSpecializationType;
- LLVM_PREFERRED_TYPE(TypeWithKeywordBitfields)
+ LLVM_PREFERRED_TYPE(KeywordWrapperBitfields)
unsigned : NumTypeWithKeywordBits;
/// The number of template arguments named in this class template
@@ -2305,13 +2329,14 @@ protected:
AutoTypeBitfields AutoTypeBits;
TypeOfBitfields TypeOfBits;
TypedefBitfields TypedefBits;
+ UnresolvedUsingBitfields UnresolvedUsingBits;
UsingBitfields UsingBits;
BuiltinTypeBitfields BuiltinTypeBits;
FunctionTypeBitfields FunctionTypeBits;
ObjCObjectTypeBitfields ObjCObjectTypeBits;
ReferenceTypeBitfields ReferenceTypeBits;
- TypeWithKeywordBitfields TypeWithKeywordBits;
- ElaboratedTypeBitfields ElaboratedTypeBits;
+ KeywordWrapperBitfields KeywordWrapperBits;
+ TagTypeBitfields TagTypeBits;
VectorTypeBitfields VectorTypeBits;
TemplateTypeParmTypeBitfields TemplateTypeParmTypeBits;
SubstTemplateTypeParmTypeBitfields SubstTemplateTypeParmTypeBits;
@@ -2724,6 +2749,7 @@ public:
bool isHLSLAttributedResourceType() const;
bool isHLSLInlineSpirvType() const;
bool isHLSLResourceRecord() const;
+ bool isHLSLResourceRecordArray() const;
bool isHLSLIntangibleType()
const; // Any HLSL intangible type (builtin, array, class)
@@ -2931,6 +2957,11 @@ public:
/// qualifiers from the outermost type.
const ArrayType *castAsArrayTypeUnsafe() const;
+ /// If this type represents a qualified-id, this returns its nested name
+ /// specifier. For example, for the qualified-id "foo::bar::baz", this returns
+ /// "foo::bar". Returns null if this type represents an unqualified-id.
+ NestedNameSpecifier getPrefix() const;
+
/// Determine whether this type had the specified attribute applied to it
/// (looking through top-level type sugar).
bool hasAttr(attr::Kind AK) const;
@@ -3608,12 +3639,12 @@ class MemberPointerType : public Type, public llvm::FoldingSetNode {
/// The class of which the pointee is a member. Must ultimately be a
/// CXXRecordType, but could be a typedef or a template parameter too.
- NestedNameSpecifier *Qualifier;
+ NestedNameSpecifier Qualifier;
- MemberPointerType(QualType Pointee, NestedNameSpecifier *Qualifier,
+ MemberPointerType(QualType Pointee, NestedNameSpecifier Qualifier,
QualType CanonicalPtr)
: Type(MemberPointer, CanonicalPtr,
- (toTypeDependence(Qualifier->getDependence()) &
+ (toTypeDependence(Qualifier.getDependence()) &
~TypeDependence::VariablyModified) |
Pointee->getDependence()),
PointeeType(Pointee), Qualifier(Qualifier) {}
@@ -3633,7 +3664,7 @@ public:
return !PointeeType->isFunctionProtoType();
}
- NestedNameSpecifier *getQualifier() const { return Qualifier; }
+ NestedNameSpecifier getQualifier() const { return Qualifier; }
/// Note: this can trigger extra deserialization when external AST sources are
/// used. Prefer `getCXXRecordDecl()` unless you really need the most recent
/// decl.
@@ -3652,7 +3683,7 @@ public:
}
static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
- const NestedNameSpecifier *Qualifier,
+ const NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls);
static bool classof(const Type *T) {
@@ -5767,84 +5798,254 @@ public:
bool Canonical);
};
+/// The elaboration keyword that precedes a qualified type name or
+/// introduces an elaborated-type-specifier.
+enum class ElaboratedTypeKeyword {
+ /// The "struct" keyword introduces the elaborated-type-specifier.
+ Struct,
+
+ /// The "__interface" keyword introduces the elaborated-type-specifier.
+ Interface,
+
+ /// The "union" keyword introduces the elaborated-type-specifier.
+ Union,
+
+ /// The "class" keyword introduces the elaborated-type-specifier.
+ Class,
+
+ /// The "enum" keyword introduces the elaborated-type-specifier.
+ Enum,
+
+ /// The "typename" keyword precedes the qualified type name, e.g.,
+ /// \c typename T::type.
+ Typename,
+
+ /// No keyword precedes the qualified type name.
+ None
+};
+
+/// The kind of a tag type.
+enum class TagTypeKind {
+ /// The "struct" keyword.
+ Struct,
+
+ /// The "__interface" keyword.
+ Interface,
+
+ /// The "union" keyword.
+ Union,
+
+ /// The "class" keyword.
+ Class,
+
+ /// The "enum" keyword.
+ Enum
+};
+
+/// Provides a few static helpers for converting and printing
+/// elaborated type keyword and tag type kind enumerations.
+struct KeywordHelpers {
+ /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
+ static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
+
+ /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
+ /// It is an error to provide a type specifier which *isn't* a tag kind here.
+ static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
+
+ /// Converts a TagTypeKind into an elaborated type keyword.
+ static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
+
+ /// Converts an elaborated type keyword into a TagTypeKind.
+ /// It is an error to provide an elaborated type keyword
+ /// which *isn't* a tag kind here.
+ static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
+
+ static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
+
+ static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
+
+ static StringRef getTagTypeKindName(TagTypeKind Kind) {
+ return getKeywordName(getKeywordForTagTypeKind(Kind));
+ }
+};
+
+template <class T> class KeywordWrapper : public T, public KeywordHelpers {
+protected:
+ template <class... As>
+ KeywordWrapper(ElaboratedTypeKeyword Keyword, As &&...as)
+ : T(std::forward<As>(as)...) {
+ this->KeywordWrapperBits.Keyword = llvm::to_underlying(Keyword);
+ }
+
+public:
+ ElaboratedTypeKeyword getKeyword() const {
+ return static_cast<ElaboratedTypeKeyword>(this->KeywordWrapperBits.Keyword);
+ }
+
+ class CannotCastToThisType {};
+ static CannotCastToThisType classof(const T *);
+};
+
+/// A helper class for Type nodes having an ElaboratedTypeKeyword.
+/// The keyword in stored in the free bits of the base class.
+class TypeWithKeyword : public KeywordWrapper<Type> {
+protected:
+ TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
+ QualType Canonical, TypeDependence Dependence)
+ : KeywordWrapper(Keyword, tc, Canonical, Dependence) {}
+};
+
+template <class T> struct FoldingSetPlaceholder : llvm::FoldingSetNode {
+ void Profile(llvm::FoldingSetNodeID &ID) { getType()->Profile(ID); }
+
+ inline const T *getType() const {
+ constexpr unsigned long Offset =
+ llvm::alignTo(sizeof(T), alignof(FoldingSetPlaceholder));
+ const auto *Addr = reinterpret_cast<const T *>(
+ reinterpret_cast<const char *>(this) - Offset);
+ assert(llvm::isAddrAligned(llvm::Align(alignof(T)), Addr));
+ return Addr;
+ }
+};
+
/// Represents the dependent type named by a dependently-scoped
/// typename using declaration, e.g.
/// using typename Base<T>::foo;
///
/// Template instantiation turns these into the underlying type.
-class UnresolvedUsingType : public Type {
+class UnresolvedUsingType final
+ : public TypeWithKeyword,
+ private llvm::TrailingObjects<UnresolvedUsingType,
+ FoldingSetPlaceholder<UnresolvedUsingType>,
+ NestedNameSpecifier> {
friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
UnresolvedUsingTypenameDecl *Decl;
- UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
- : Type(UnresolvedUsing, QualType(),
- TypeDependence::DependentInstantiation),
- Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {}
+ unsigned numTrailingObjects(
+ OverloadToken<FoldingSetPlaceholder<UnresolvedUsingType>>) const {
+ assert(UnresolvedUsingBits.hasQualifier ||
+ getKeyword() != ElaboratedTypeKeyword::None);
+ return 1;
+ }
+
+ FoldingSetPlaceholder<UnresolvedUsingType> *getFoldingSetPlaceholder() {
+ assert(numTrailingObjects(
+ OverloadToken<FoldingSetPlaceholder<UnresolvedUsingType>>{}) ==
+ 1);
+ return getTrailingObjects<FoldingSetPlaceholder<UnresolvedUsingType>>();
+ }
+
+ UnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D,
+ const Type *CanonicalType);
public:
+ NestedNameSpecifier getQualifier() const {
+ return UnresolvedUsingBits.hasQualifier
+ ? *getTrailingObjects<NestedNameSpecifier>()
+ : std::nullopt;
+ }
+
UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- static bool classof(const Type *T) {
- return T->getTypeClass() == UnresolvedUsing;
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D) {
+ static_assert(llvm::to_underlying(ElaboratedTypeKeyword::None) <= 7);
+ ID.AddInteger(uintptr_t(D) | llvm::to_underlying(Keyword));
+ if (Qualifier)
+ Qualifier.Profile(ID);
}
- void Profile(llvm::FoldingSetNodeID &ID) {
- return Profile(ID, Decl);
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getQualifier(), getDecl());
}
- static void Profile(llvm::FoldingSetNodeID &ID,
- UnresolvedUsingTypenameDecl *D) {
- ID.AddPointer(D);
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == UnresolvedUsing;
}
};
-class UsingType final : public Type,
+class UsingType final : public TypeWithKeyword,
public llvm::FoldingSetNode,
- private llvm::TrailingObjects<UsingType, QualType> {
- UsingShadowDecl *Found;
+ llvm::TrailingObjects<UsingType, NestedNameSpecifier> {
+ UsingShadowDecl *D;
+ QualType UnderlyingType;
+
friend class ASTContext; // ASTContext creates these.
friend TrailingObjects;
- UsingType(const UsingShadowDecl *Found, QualType Underlying, QualType Canon);
+ UsingType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const UsingShadowDecl *D, QualType UnderlyingType);
public:
- UsingShadowDecl *getFoundDecl() const { return Found; }
- QualType getUnderlyingType() const;
-
- bool isSugared() const { return true; }
+ NestedNameSpecifier getQualifier() const {
+ return UsingBits.hasQualifier ? *getTrailingObjects() : std::nullopt;
+ }
- // This always has the 'same' type as declared, but not necessarily identical.
- QualType desugar() const { return getUnderlyingType(); }
+ UsingShadowDecl *getDecl() const { return D; }
- // Internal helper, for debugging purposes.
- bool typeMatchesDecl() const { return !UsingBits.hasTypeDifferentFromDecl; }
+ QualType desugar() const { return UnderlyingType; }
+ bool isSugared() const { return true; }
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Found, getUnderlyingType());
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const UsingShadowDecl *D,
+ QualType UnderlyingType) {
+ static_assert(llvm::to_underlying(ElaboratedTypeKeyword::None) <= 7);
+ ID.AddInteger(uintptr_t(D) | llvm::to_underlying(Keyword));
+ UnderlyingType.Profile(ID);
+ if (Qualifier)
+ Qualifier.Profile(ID);
}
- static void Profile(llvm::FoldingSetNodeID &ID, const UsingShadowDecl *Found,
- QualType Underlying) {
- ID.AddPointer(Found);
- Underlying.Profile(ID);
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getQualifier(), D, desugar());
}
static bool classof(const Type *T) { return T->getTypeClass() == Using; }
};
-class TypedefType final : public Type,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<TypedefType, QualType> {
+class TypedefType final
+ : public TypeWithKeyword,
+ private llvm::TrailingObjects<TypedefType,
+ FoldingSetPlaceholder<TypedefType>,
+ NestedNameSpecifier, QualType> {
TypedefNameDecl *Decl;
friend class ASTContext; // ASTContext creates these.
friend TrailingObjects;
- TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType UnderlyingType,
- bool HasTypeDifferentFromDecl);
+ unsigned
+ numTrailingObjects(OverloadToken<FoldingSetPlaceholder<TypedefType>>) const {
+ assert(TypedefBits.hasQualifier || TypedefBits.hasTypeDifferentFromDecl ||
+ getKeyword() != ElaboratedTypeKeyword::None);
+ return 1;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<NestedNameSpecifier>) const {
+ return TypedefBits.hasQualifier;
+ }
+
+ TypedefType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TypedefNameDecl *D,
+ QualType UnderlyingType, bool HasTypeDifferentFromDecl);
+
+ FoldingSetPlaceholder<TypedefType> *getFoldingSetPlaceholder() {
+ assert(numTrailingObjects(
+ OverloadToken<FoldingSetPlaceholder<TypedefType>>{}) == 1);
+ return getTrailingObjects<FoldingSetPlaceholder<TypedefType>>();
+ }
public:
+ NestedNameSpecifier getQualifier() const {
+ return TypedefBits.hasQualifier ? *getTrailingObjects<NestedNameSpecifier>()
+ : std::nullopt;
+ }
+
TypedefNameDecl *getDecl() const { return Decl; }
bool isSugared() const { return true; }
@@ -5855,16 +6056,25 @@ public:
// Internal helper, for debugging purposes.
bool typeMatchesDecl() const { return !TypedefBits.hasTypeDifferentFromDecl; }
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Decl, typeMatchesDecl() ? QualType() : desugar());
- }
- static void Profile(llvm::FoldingSetNodeID &ID, const TypedefNameDecl *Decl,
- QualType Underlying) {
- ID.AddPointer(Decl);
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *Decl, QualType Underlying) {
+
+ ID.AddInteger(uintptr_t(Decl) | (Keyword != ElaboratedTypeKeyword::None) |
+ (!Qualifier << 1));
+ if (Keyword != ElaboratedTypeKeyword::None)
+ ID.AddInteger(llvm::to_underlying(Keyword));
+ if (Qualifier)
+ Qualifier.Profile(ID);
if (!Underlying.isNull())
Underlying.Profile(ID);
}
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getKeyword(), getQualifier(), getDecl(),
+ typeMatchesDecl() ? QualType() : desugar());
+ }
+
static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
};
@@ -6131,71 +6341,143 @@ public:
}
};
-class TagType : public Type {
- friend class ASTReader;
- template <class T> friend class serialization::AbstractTypeReader;
+class TagType : public TypeWithKeyword {
+ friend class ASTContext; // ASTContext creates these.
/// Stores the TagDecl associated with this type. The decl may point to any
/// TagDecl that declares the entity.
TagDecl *decl;
+ void *getTrailingPointer() const;
+ NestedNameSpecifier &getTrailingQualifier() const;
+
protected:
- TagType(TypeClass TC, const TagDecl *D, QualType can);
+ TagType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *TD, bool OwnsTag,
+ bool IsInjected, const Type *CanonicalType);
public:
- TagDecl *getDecl() const;
+ TagDecl *getOriginalDecl() const { return decl; }
+
+ NestedNameSpecifier getQualifier() const;
- /// Determines whether this type is in the process of being defined.
- bool isBeingDefined() const;
+ /// Does the TagType own this declaration of the Tag?
+ bool isTagOwned() const { return TagTypeBits.OwnsTag; }
+
+ bool isInjected() const { return TagTypeBits.IsInjected; }
+
+ ClassTemplateDecl *getTemplateDecl() const;
+ TemplateName getTemplateName(const ASTContext &Ctx) const;
+ ArrayRef<TemplateArgument> getTemplateArgs(const ASTContext &Ctx) const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return getCanonicalTypeInternal(); }
static bool classof(const Type *T) {
- return T->getTypeClass() == Enum || T->getTypeClass() == Record;
+ return T->getTypeClass() == Enum || T->getTypeClass() == Record ||
+ T->getTypeClass() == InjectedClassName;
+ }
+};
+
+struct TagTypeFoldingSetPlaceholder : public llvm::FoldingSetNode {
+ static constexpr size_t getOffset() {
+ return alignof(TagType) -
+ (sizeof(TagTypeFoldingSetPlaceholder) % alignof(TagType));
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *Tag,
+ bool OwnsTag, bool IsInjected) {
+ ID.AddInteger(uintptr_t(Tag) | OwnsTag | (IsInjected << 1) |
+ ((Keyword != ElaboratedTypeKeyword::None) << 2));
+ if (Keyword != ElaboratedTypeKeyword::None)
+ ID.AddInteger(llvm::to_underlying(Keyword));
+ if (Qualifier)
+ Qualifier.Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ const TagType *T = getTagType();
+ Profile(ID, T->getKeyword(), T->getQualifier(), T->getOriginalDecl(),
+ T->isTagOwned(), T->isInjected());
+ }
+
+ TagType *getTagType() {
+ return reinterpret_cast<TagType *>(reinterpret_cast<char *>(this + 1) +
+ getOffset());
+ }
+ const TagType *getTagType() const {
+ return const_cast<TagTypeFoldingSetPlaceholder *>(this)->getTagType();
+ }
+ static TagTypeFoldingSetPlaceholder *fromTagType(TagType *T) {
+ return reinterpret_cast<TagTypeFoldingSetPlaceholder *>(
+ reinterpret_cast<char *>(T) - getOffset()) -
+ 1;
}
};
/// A helper class that allows the use of isa/cast/dyncast
/// to detect TagType objects of structs/unions/classes.
-class RecordType : public TagType {
-protected:
- friend class ASTContext; // ASTContext creates these.
-
- explicit RecordType(const RecordDecl *D)
- : TagType(Record, reinterpret_cast<const TagDecl*>(D), QualType()) {}
- explicit RecordType(TypeClass TC, RecordDecl *D)
- : TagType(TC, reinterpret_cast<const TagDecl*>(D), QualType()) {}
+class RecordType final : public TagType {
+ using TagType::TagType;
public:
- RecordDecl *getDecl() const {
- return reinterpret_cast<RecordDecl*>(TagType::getDecl());
+ RecordDecl *getOriginalDecl() const {
+ return reinterpret_cast<RecordDecl *>(TagType::getOriginalDecl());
}
/// Recursively check all fields in the record for const-ness. If any field
/// is declared const, return true. Otherwise, return false.
bool hasConstFields() const;
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
static bool classof(const Type *T) { return T->getTypeClass() == Record; }
};
/// A helper class that allows the use of isa/cast/dyncast
/// to detect TagType objects of enums.
-class EnumType : public TagType {
+class EnumType final : public TagType {
+ using TagType::TagType;
+
+public:
+ EnumDecl *getOriginalDecl() const {
+ return reinterpret_cast<EnumDecl *>(TagType::getOriginalDecl());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
+};
+
+/// The injected class name of a C++ class template or class
+/// template partial specialization. Used to record that a type was
+/// spelled with a bare identifier rather than as a template-id; the
+/// equivalent for non-templated classes is just RecordType.
+///
+/// Injected class name types are always dependent. Template
+/// instantiation turns these into RecordTypes.
+///
+/// Injected class name types are always canonical. This works
+/// because it is impossible to compare an injected class name type
+/// with the corresponding non-injected template type, for the same
+/// reason that it is impossible to directly compare template
+/// parameters from different dependent contexts: injected class name
+/// types can only occur within the scope of a particular templated
+/// declaration, and within that scope every template specialization
+/// will canonicalize to the injected class name (when appropriate
+/// according to the rules of the language).
+class InjectedClassNameType final : public TagType {
friend class ASTContext; // ASTContext creates these.
- explicit EnumType(const EnumDecl *D)
- : TagType(Enum, reinterpret_cast<const TagDecl*>(D), QualType()) {}
+ InjectedClassNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *TD,
+ bool IsInjected, const Type *CanonicalType);
public:
- EnumDecl *getDecl() const {
- return reinterpret_cast<EnumDecl*>(TagType::getDecl());
+ CXXRecordDecl *getOriginalDecl() const {
+ return reinterpret_cast<CXXRecordDecl *>(TagType::getOriginalDecl());
}
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == InjectedClassName;
+ }
};
/// An attributed type is a type to which a type attribute has been applied.
@@ -6806,34 +7088,38 @@ public:
};
/// Represents a C++17 deduced template specialization type.
-class DeducedTemplateSpecializationType : public DeducedType,
+class DeducedTemplateSpecializationType : public KeywordWrapper<DeducedType>,
public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
/// The name of the template whose arguments will be deduced.
TemplateName Template;
- DeducedTemplateSpecializationType(TemplateName Template,
+ DeducedTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
QualType DeducedAsType,
bool IsDeducedAsDependent, QualType Canon)
- : DeducedType(DeducedTemplateSpecialization, DeducedAsType,
- toTypeDependence(Template.getDependence()) |
- (IsDeducedAsDependent
- ? TypeDependence::DependentInstantiation
- : TypeDependence::None),
- Canon),
+ : KeywordWrapper(Keyword, DeducedTemplateSpecialization, DeducedAsType,
+ toTypeDependence(Template.getDependence()) |
+ (IsDeducedAsDependent
+ ? TypeDependence::DependentInstantiation
+ : TypeDependence::None),
+ Canon),
Template(Template) {}
public:
/// Retrieve the name of the template that we are deducing.
- TemplateName getTemplateName() const { return Template;}
+ TemplateName getTemplateName() const { return Template; }
void Profile(llvm::FoldingSetNodeID &ID) const {
- Profile(ID, getTemplateName(), getDeducedType(), isDependentType());
+ Profile(ID, getKeyword(), getTemplateName(), getDeducedType(),
+ isDependentType());
}
- static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template,
- QualType Deduced, bool IsDependent) {
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ TemplateName Template, QualType Deduced,
+ bool IsDependent) {
+ ID.AddInteger(llvm::to_underlying(Keyword));
Template.Profile(ID);
Deduced.Profile(ID);
ID.AddBoolean(IsDependent || Template.isDependent());
@@ -6864,7 +7150,8 @@ public:
/// TemplateArguments, followed by a QualType representing the
/// non-canonical aliased type when the template is a type alias
/// template.
-class TemplateSpecializationType : public Type, public llvm::FoldingSetNode {
+class TemplateSpecializationType : public TypeWithKeyword,
+ public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
/// The name of the template being specialized. This is
@@ -6876,8 +7163,8 @@ class TemplateSpecializationType : public Type, public llvm::FoldingSetNode {
/// replacement must, recursively, be one of these).
TemplateName Template;
- TemplateSpecializationType(TemplateName T, bool IsAlias,
- ArrayRef<TemplateArgument> Args,
+ TemplateSpecializationType(ElaboratedTypeKeyword Keyword, TemplateName T,
+ bool IsAlias, ArrayRef<TemplateArgument> Args,
QualType Underlying);
public:
@@ -6978,241 +7265,6 @@ bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
ArrayRef<TemplateArgument> Args,
unsigned Depth);
-/// The injected class name of a C++ class template or class
-/// template partial specialization. Used to record that a type was
-/// spelled with a bare identifier rather than as a template-id; the
-/// equivalent for non-templated classes is just RecordType.
-///
-/// Injected class name types are always dependent. Template
-/// instantiation turns these into RecordTypes.
-///
-/// Injected class name types are always canonical. This works
-/// because it is impossible to compare an injected class name type
-/// with the corresponding non-injected template type, for the same
-/// reason that it is impossible to directly compare template
-/// parameters from different dependent contexts: injected class name
-/// types can only occur within the scope of a particular templated
-/// declaration, and within that scope every template specialization
-/// will canonicalize to the injected class name (when appropriate
-/// according to the rules of the language).
-class InjectedClassNameType : public Type {
- friend class ASTContext; // ASTContext creates these.
- friend class ASTNodeImporter;
- friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not
- // currently suitable for AST reading, too much
- // interdependencies.
- template <class T> friend class serialization::AbstractTypeReader;
-
- CXXRecordDecl *Decl;
-
- /// The template specialization which this type represents.
- /// For example, in
- /// template <class T> class A { ... };
- /// this is A<T>, whereas in
- /// template <class X, class Y> class A<B<X,Y> > { ... };
- /// this is A<B<X,Y> >.
- ///
- /// It is always unqualified, always a template specialization type,
- /// and always dependent.
- QualType InjectedType;
-
- InjectedClassNameType(CXXRecordDecl *D, QualType TST)
- : Type(InjectedClassName, QualType(),
- TypeDependence::DependentInstantiation),
- Decl(D), InjectedType(TST) {
- assert(isa<TemplateSpecializationType>(TST));
- assert(!TST.hasQualifiers());
- assert(TST->isDependentType());
- }
-
-public:
- QualType getInjectedSpecializationType() const { return InjectedType; }
-
- const TemplateSpecializationType *getInjectedTST() const {
- return cast<TemplateSpecializationType>(InjectedType.getTypePtr());
- }
-
- TemplateName getTemplateName() const {
- return getInjectedTST()->getTemplateName();
- }
-
- CXXRecordDecl *getDecl() const;
-
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
- static bool classof(const Type *T) {
- return T->getTypeClass() == InjectedClassName;
- }
-};
-
-/// The elaboration keyword that precedes a qualified type name or
-/// introduces an elaborated-type-specifier.
-enum class ElaboratedTypeKeyword {
- /// The "struct" keyword introduces the elaborated-type-specifier.
- Struct,
-
- /// The "__interface" keyword introduces the elaborated-type-specifier.
- Interface,
-
- /// The "union" keyword introduces the elaborated-type-specifier.
- Union,
-
- /// The "class" keyword introduces the elaborated-type-specifier.
- Class,
-
- /// The "enum" keyword introduces the elaborated-type-specifier.
- Enum,
-
- /// The "typename" keyword precedes the qualified type name, e.g.,
- /// \c typename T::type.
- Typename,
-
- /// No keyword precedes the qualified type name.
- None
-};
-
-/// The kind of a tag type.
-enum class TagTypeKind {
- /// The "struct" keyword.
- Struct,
-
- /// The "__interface" keyword.
- Interface,
-
- /// The "union" keyword.
- Union,
-
- /// The "class" keyword.
- Class,
-
- /// The "enum" keyword.
- Enum
-};
-
-/// A helper class for Type nodes having an ElaboratedTypeKeyword.
-/// The keyword in stored in the free bits of the base class.
-/// Also provides a few static helpers for converting and printing
-/// elaborated type keyword and tag type kind enumerations.
-class TypeWithKeyword : public Type {
-protected:
- TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
- QualType Canonical, TypeDependence Dependence)
- : Type(tc, Canonical, Dependence) {
- TypeWithKeywordBits.Keyword = llvm::to_underlying(Keyword);
- }
-
-public:
- ElaboratedTypeKeyword getKeyword() const {
- return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
- }
-
- /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
- static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
-
- /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
- /// It is an error to provide a type specifier which *isn't* a tag kind here.
- static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
-
- /// Converts a TagTypeKind into an elaborated type keyword.
- static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
-
- /// Converts an elaborated type keyword into a TagTypeKind.
- /// It is an error to provide an elaborated type keyword
- /// which *isn't* a tag kind here.
- static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
-
- static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
-
- static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
-
- static StringRef getTagTypeKindName(TagTypeKind Kind) {
- return getKeywordName(getKeywordForTagTypeKind(Kind));
- }
-
- class CannotCastToThisType {};
- static CannotCastToThisType classof(const Type *);
-};
-
-/// Represents a type that was referred to using an elaborated type
-/// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type,
-/// or both.
-///
-/// This type is used to keep track of a type name as written in the
-/// source code, including tag keywords and any nested-name-specifiers.
-/// The type itself is always "sugar", used to express what was written
-/// in the source code but containing no additional semantic information.
-class ElaboratedType final
- : public TypeWithKeyword,
- public llvm::FoldingSetNode,
- private llvm::TrailingObjects<ElaboratedType, TagDecl *> {
- friend class ASTContext; // ASTContext creates these
- friend TrailingObjects;
-
- /// The nested name specifier containing the qualifier.
- NestedNameSpecifier *NNS;
-
- /// The type that this qualified name refers to.
- QualType NamedType;
-
- /// The (re)declaration of this tag type owned by this occurrence is stored
- /// as a trailing object if there is one. Use getOwnedTagDecl to obtain
- /// it, or obtain a null pointer if there is none.
-
- ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
- QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
- : TypeWithKeyword(Keyword, Elaborated, CanonType,
- // Any semantic dependence on the qualifier will have
- // been incorporated into NamedType. We still need to
- // track syntactic (instantiation / error / pack)
- // dependence on the qualifier.
- NamedType->getDependence() |
- (NNS ? toSyntacticDependence(
- toTypeDependence(NNS->getDependence()))
- : TypeDependence::None)),
- NNS(NNS), NamedType(NamedType) {
- ElaboratedTypeBits.HasOwnedTagDecl = false;
- if (OwnedTagDecl) {
- ElaboratedTypeBits.HasOwnedTagDecl = true;
- *getTrailingObjects() = OwnedTagDecl;
- }
- }
-
-public:
- /// Retrieve the qualification on this type.
- NestedNameSpecifier *getQualifier() const { return NNS; }
-
- /// Retrieve the type named by the qualified-id.
- QualType getNamedType() const { return NamedType; }
-
- /// Remove a single level of sugar.
- QualType desugar() const { return getNamedType(); }
-
- /// Returns whether this type directly provides sugar.
- bool isSugared() const { return true; }
-
- /// Return the (re)declaration of this type owned by this occurrence of this
- /// type, or nullptr if there is none.
- TagDecl *getOwnedTagDecl() const {
- return ElaboratedTypeBits.HasOwnedTagDecl ? *getTrailingObjects() : nullptr;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getKeyword(), NNS, NamedType, getOwnedTagDecl());
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, QualType NamedType,
- TagDecl *OwnedTagDecl) {
- ID.AddInteger(llvm::to_underlying(Keyword));
- ID.AddPointer(NNS);
- NamedType.Profile(ID);
- ID.AddPointer(OwnedTagDecl);
- }
-
- static bool classof(const Type *T) { return T->getTypeClass() == Elaborated; }
-};
-
/// Represents a qualified type name for which the type name is
/// dependent.
///
@@ -7229,24 +7281,24 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
/// The nested name specifier containing the qualifier.
- NestedNameSpecifier *NNS;
+ NestedNameSpecifier NNS;
/// The type that this typename specifier refers to.
const IdentifierInfo *Name;
- DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier NNS,
const IdentifierInfo *Name, QualType CanonType)
: TypeWithKeyword(Keyword, DependentName, CanonType,
TypeDependence::DependentInstantiation |
- toTypeDependence(NNS->getDependence())),
+ (NNS ? toTypeDependence(NNS.getDependence())
+ : TypeDependence::Dependent)),
NNS(NNS), Name(Name) {
- assert(NNS);
assert(Name);
}
public:
/// Retrieve the qualification on this type.
- NestedNameSpecifier *getQualifier() const { return NNS; }
+ NestedNameSpecifier getQualifier() const { return NNS; }
/// Retrieve the identifier that terminates this type name.
/// For example, "type" in "typename T::type".
@@ -7262,9 +7314,9 @@ public:
}
static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
+ NestedNameSpecifier NNS, const IdentifierInfo *Name) {
ID.AddInteger(llvm::to_underlying(Keyword));
- ID.AddPointer(NNS);
+ NNS.Profile(ID);
ID.AddPointer(Name);
}
@@ -8815,8 +8867,8 @@ inline bool Type::isIntegerType() const {
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- return IsEnumDeclComplete(ET->getDecl()) &&
- !IsEnumDeclScoped(ET->getDecl());
+ return IsEnumDeclComplete(ET->getOriginalDecl()) &&
+ !IsEnumDeclScoped(ET->getOriginalDecl());
}
return isBitIntType();
}
@@ -8874,7 +8926,7 @@ inline bool Type::isScalarType() const {
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
// Enums are scalar types, but only if they are defined. Incomplete enums
// are not treated as scalar types.
- return IsEnumDeclComplete(ET->getDecl());
+ return IsEnumDeclComplete(ET->getOriginalDecl());
return isa<PointerType>(CanonicalType) ||
isa<BlockPointerType>(CanonicalType) ||
isa<MemberPointerType>(CanonicalType) ||
@@ -8890,7 +8942,7 @@ inline bool Type::isIntegralOrEnumerationType() const {
// Check for a complete enum type; incomplete enum types are not properly an
// enumeration type in the sense required here.
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return IsEnumDeclComplete(ET->getDecl());
+ return IsEnumDeclComplete(ET->getOriginalDecl());
return isBitIntType();
}
@@ -9026,8 +9078,6 @@ template <typename T> const T *Type::getAsAdjusted() const {
Ty = A->getWrappedType().getTypePtr();
else if (const auto *A = dyn_cast<HLSLAttributedResourceType>(Ty))
Ty = A->getWrappedType().getTypePtr();
- else if (const auto *E = dyn_cast<ElaboratedType>(Ty))
- Ty = E->desugar().getTypePtr();
else if (const auto *P = dyn_cast<ParenType>(Ty))
Ty = P->desugar().getTypePtr();
else if (const auto *A = dyn_cast<AdjustedType>(Ty))
diff --git a/clang/include/clang/AST/TypeLoc.h b/clang/include/clang/AST/TypeLoc.h
index 52ef7ac..934aa14 100644
--- a/clang/include/clang/AST/TypeLoc.h
+++ b/clang/include/clang/AST/TypeLoc.h
@@ -16,7 +16,7 @@
#include "clang/AST/ASTConcept.h"
#include "clang/AST/DeclarationName.h"
-#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/NestedNameSpecifierBase.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
@@ -193,6 +193,21 @@ public:
/// Get the SourceLocation of the template keyword (if any).
SourceLocation getTemplateKeywordLoc() const;
+ /// If this type represents a qualified-id, this returns it's nested name
+ /// specifier. For example, for the qualified-id "foo::bar::baz", this returns
+ /// "foo::bar". Returns null if this type represents an unqualified-id.
+ NestedNameSpecifierLoc getPrefix() const;
+
+ /// This returns the position of the type after any elaboration, such as the
+ /// 'struct' keyword, and name qualifiers. This will the 'template' keyword if
+ /// present, or the name location otherwise.
+ SourceLocation getNonPrefixBeginLoc() const;
+
+ /// This returns the position of the type after any elaboration, such as the
+ /// 'struct' keyword. This may be the position of the name qualifiers,
+ /// 'template' keyword, or the name location otherwise.
+ SourceLocation getNonElaboratedBeginLoc() const;
+
/// Initializes this to state that every location in this
/// type is the given location.
///
@@ -679,62 +694,164 @@ public:
}
};
-/// Wrapper for source info for types used via transparent aliases.
-class UsingTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- UsingTypeLoc, UsingType> {
-public:
- QualType getUnderlyingType() const {
- return getTypePtr()->getUnderlyingType();
+struct ElaboratedNameLocInfo {
+ SourceLocation NameLoc;
+ SourceLocation ElaboratedKeywordLoc;
+
+ ElaboratedNameLocInfo() = default;
+ ElaboratedNameLocInfo(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation NameLoc)
+ : NameLoc(NameLoc), ElaboratedKeywordLoc(ElaboratedKeywordLoc),
+ QualifierData(QualifierLoc.getOpaqueData()) {}
+ ElaboratedNameLocInfo(ASTContext &Context, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, SourceLocation Loc)
+ : NameLoc(Loc),
+ ElaboratedKeywordLoc(
+ Keyword != ElaboratedTypeKeyword::None ? Loc : SourceLocation()),
+ QualifierData(getTrivialQualifierData(Context, Qualifier, Loc)) {}
+
+ NestedNameSpecifierLoc getQualifierLoc(NestedNameSpecifier Qualifier) const {
+ assert(!Qualifier == !QualifierData);
+ return NestedNameSpecifierLoc(Qualifier, QualifierData);
+ }
+
+ SourceRange getLocalSourceRange(NestedNameSpecifier Qualifier) const {
+ SourceLocation BeginLoc = ElaboratedKeywordLoc;
+ if (NestedNameSpecifierLoc QualifierLoc = getQualifierLoc(Qualifier);
+ BeginLoc.isInvalid() && Qualifier)
+ BeginLoc = QualifierLoc.getBeginLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = NameLoc;
+ return SourceRange(BeginLoc, NameLoc);
}
- UsingShadowDecl *getFoundDecl() const { return getTypePtr()->getFoundDecl(); }
-};
-/// Wrapper for source info for typedefs.
-class TypedefTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- TypedefTypeLoc,
- TypedefType> {
-public:
- TypedefNameDecl *getTypedefNameDecl() const {
- return getTypePtr()->getDecl();
+private:
+ void *QualifierData;
+
+ static void *getTrivialQualifierData(ASTContext &Context,
+ NestedNameSpecifier Qualifier,
+ SourceLocation Loc) {
+ if (!Qualifier)
+ return nullptr;
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Qualifier, Loc);
+ return Builder.getWithLocInContext(Context).getOpaqueData();
}
};
-/// Wrapper for source info for injected class names of class
-/// templates.
-class InjectedClassNameTypeLoc :
- public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- InjectedClassNameTypeLoc,
- InjectedClassNameType> {
+template <class TL, class T>
+class ElaboratedNameTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, TL, T, ElaboratedNameLocInfo> {
public:
- CXXRecordDecl *getDecl() const {
- return getTypePtr()->getDecl();
+ auto *getDecl() const { return this->getTypePtr()->getDecl(); }
+
+ void set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation NameLoc) {
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ this->getTypePtr()->getQualifier());
+ *this->getLocalData() =
+ ElaboratedNameLocInfo(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
+ }
+
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKeywordLoc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ return this->getLocalData()->getQualifierLoc(
+ this->getTypePtr()->getQualifier());
+ }
+
+ SourceLocation getNameLoc() const { return this->getLocalData()->NameLoc; }
+
+ SourceRange getLocalSourceRange() const {
+ return this->getLocalData()->getLocalSourceRange(
+ this->getTypePtr()->getQualifier());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ const auto *Ptr = this->getTypePtr();
+ *this->getLocalData() = ElaboratedNameLocInfo(Context, Ptr->getKeyword(),
+ Ptr->getQualifier(), Loc);
}
};
+/// Wrapper for source info for typedefs.
+class TypedefTypeLoc
+ : public ElaboratedNameTypeLoc<TypedefTypeLoc, TypedefType> {};
+
/// Wrapper for source info for unresolved typename using decls.
-class UnresolvedUsingTypeLoc :
- public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- UnresolvedUsingTypeLoc,
- UnresolvedUsingType> {
-public:
- UnresolvedUsingTypenameDecl *getDecl() const {
- return getTypePtr()->getDecl();
- }
+class UnresolvedUsingTypeLoc
+ : public ElaboratedNameTypeLoc<UnresolvedUsingTypeLoc,
+ UnresolvedUsingType> {};
+
+/// Wrapper for source info for types used via transparent aliases.
+class UsingTypeLoc : public ElaboratedNameTypeLoc<UsingTypeLoc, UsingType> {};
+
+struct TagTypeLocInfo {
+ SourceLocation NameLoc;
+ SourceLocation ElaboratedKWLoc;
+ void *QualifierData;
};
-/// Wrapper for source info for tag types. Note that this only
-/// records source info for the name itself; a type written 'struct foo'
-/// should be represented as an ElaboratedTypeLoc. We currently
-/// only do that when C++ is enabled because of the expense of
-/// creating an ElaboratedType node for so many type references in C.
-class TagTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- TagTypeLoc,
- TagType> {
+class TagTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, TagTypeLoc, TagType,
+ TagTypeLocInfo> {
public:
- TagDecl *getDecl() const { return getTypePtr()->getDecl(); }
+ TagDecl *getOriginalDecl() const { return getTypePtr()->getOriginalDecl(); }
/// True if the tag was defined in this type specifier.
bool isDefinition() const;
+
+ SourceLocation getElaboratedKeywordLoc() const {
+ return getLocalData()->ElaboratedKWLoc;
+ }
+
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ getLocalData()->ElaboratedKWLoc = Loc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ NestedNameSpecifier Qualifier = getTypePtr()->getQualifier();
+ void *QualifierData = getLocalData()->QualifierData;
+ assert(!Qualifier == !QualifierData);
+ return NestedNameSpecifierLoc(Qualifier, QualifierData);
+ }
+
+ void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()->getQualifier());
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+ }
+
+ SourceLocation getNameLoc() const { return getLocalData()->NameLoc; }
+
+ void setNameLoc(SourceLocation Loc) { getLocalData()->NameLoc = Loc; }
+
+ SourceRange getLocalSourceRange() const {
+ SourceLocation BeginLoc = getElaboratedKeywordLoc();
+ if (NestedNameSpecifierLoc Qualifier = getQualifierLoc();
+ BeginLoc.isInvalid() && Qualifier)
+ BeginLoc = Qualifier.getBeginLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = getNameLoc();
+ return SourceRange(BeginLoc, getNameLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setElaboratedKeywordLoc(getTypePtr()->getKeyword() !=
+ ElaboratedTypeKeyword::None
+ ? Loc
+ : SourceLocation());
+ if (NestedNameSpecifier Qualifier = getTypePtr()->getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Qualifier, Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ } else {
+ getLocalData()->QualifierData = nullptr;
+ }
+ setNameLoc(Loc);
+ }
};
/// Wrapper for source info for record types.
@@ -742,7 +859,9 @@ class RecordTypeLoc : public InheritingConcreteTypeLoc<TagTypeLoc,
RecordTypeLoc,
RecordType> {
public:
- RecordDecl *getDecl() const { return getTypePtr()->getDecl(); }
+ RecordDecl *getOriginalDecl() const {
+ return getTypePtr()->getOriginalDecl();
+ }
};
/// Wrapper for source info for enum types.
@@ -750,7 +869,18 @@ class EnumTypeLoc : public InheritingConcreteTypeLoc<TagTypeLoc,
EnumTypeLoc,
EnumType> {
public:
- EnumDecl *getDecl() const { return getTypePtr()->getDecl(); }
+ EnumDecl *getOriginalDecl() const { return getTypePtr()->getOriginalDecl(); }
+};
+
+/// Wrapper for source info for injected class names of class
+/// templates.
+class InjectedClassNameTypeLoc
+ : public InheritingConcreteTypeLoc<TagTypeLoc, InjectedClassNameTypeLoc,
+ InjectedClassNameType> {
+public:
+ CXXRecordDecl *getOriginalDecl() const {
+ return getTypePtr()->getOriginalDecl();
+ }
};
/// Wrapper for template type parameters.
@@ -1405,7 +1535,7 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setSigilLoc(Loc);
- if (auto *Qualifier = getTypePtr()->getQualifier()) {
+ if (NestedNameSpecifier Qualifier = getTypePtr()->getQualifier()) {
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, Qualifier, Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
@@ -1701,9 +1831,11 @@ struct TemplateNameLocInfo {
};
struct TemplateSpecializationLocInfo : TemplateNameLocInfo {
+ SourceRange SR;
+ SourceLocation ElaboratedKWLoc;
SourceLocation TemplateKWLoc;
SourceLocation LAngleLoc;
- SourceLocation RAngleLoc;
+ void *QualifierData;
};
class TemplateSpecializationTypeLoc :
@@ -1712,54 +1844,53 @@ class TemplateSpecializationTypeLoc :
TemplateSpecializationType,
TemplateSpecializationLocInfo> {
public:
- SourceLocation getTemplateKeywordLoc() const {
- return getLocalData()->TemplateKWLoc;
- }
+ void set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc, SourceLocation NameLoc,
+ SourceLocation LAngleLoc, SourceLocation RAngleLoc);
- void setTemplateKeywordLoc(SourceLocation Loc) {
- getLocalData()->TemplateKWLoc = Loc;
- }
+ void set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc, SourceLocation NameLoc,
+ const TemplateArgumentListInfo &TAL);
- SourceLocation getLAngleLoc() const {
- return getLocalData()->LAngleLoc;
+ SourceLocation getElaboratedKeywordLoc() const {
+ return getLocalData()->ElaboratedKWLoc;
}
- void setLAngleLoc(SourceLocation Loc) {
- getLocalData()->LAngleLoc = Loc;
- }
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ if (!getLocalData()->QualifierData)
+ return NestedNameSpecifierLoc();
- SourceLocation getRAngleLoc() const {
- return getLocalData()->RAngleLoc;
+ auto *QTN =
+ getTypePtr()->getTemplateName().getAsAdjustedQualifiedTemplateName();
+ assert(QTN && "missing qualification");
+ return NestedNameSpecifierLoc(QTN->getQualifier(),
+ getLocalData()->QualifierData);
}
- void setRAngleLoc(SourceLocation Loc) {
- getLocalData()->RAngleLoc = Loc;
+ SourceLocation getTemplateKeywordLoc() const {
+ return getLocalData()->TemplateKWLoc;
}
+ SourceLocation getTemplateNameLoc() const { return getLocalData()->NameLoc; }
+
+ SourceLocation getLAngleLoc() const { return getLocalData()->LAngleLoc; }
+
unsigned getNumArgs() const {
return getTypePtr()->template_arguments().size();
}
- void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
- getArgInfos()[i] = AI;
- }
-
- TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
- return getArgInfos()[i];
+ MutableArrayRef<TemplateArgumentLocInfo> getArgLocInfos() {
+ return {getArgInfos(), getNumArgs()};
}
TemplateArgumentLoc getArgLoc(unsigned i) const {
return TemplateArgumentLoc(getTypePtr()->template_arguments()[i],
- getArgLocInfo(i));
+ getArgInfos()[i]);
}
- SourceLocation getTemplateNameLoc() const {
- return getLocalData()->NameLoc;
- }
-
- void setTemplateNameLoc(SourceLocation Loc) {
- getLocalData()->NameLoc = Loc;
- }
+ SourceLocation getRAngleLoc() const { return getLocalData()->SR.getEnd(); }
/// - Copy the location information from the given info.
void copy(TemplateSpecializationTypeLoc Loc) {
@@ -1773,21 +1904,9 @@ public:
memcpy(Data, Loc.Data, size);
}
- SourceRange getLocalSourceRange() const {
- if (getTemplateKeywordLoc().isValid())
- return SourceRange(getTemplateKeywordLoc(), getRAngleLoc());
- else
- return SourceRange(getTemplateNameLoc(), getRAngleLoc());
- }
+ SourceRange getLocalSourceRange() const { return getLocalData()->SR; }
- void initializeLocal(ASTContext &Context, SourceLocation Loc) {
- setTemplateKeywordLoc(SourceLocation());
- setTemplateNameLoc(Loc);
- setLAngleLoc(Loc);
- setRAngleLoc(Loc);
- initializeArgLocs(Context, getTypePtr()->template_arguments(),
- getArgInfos(), Loc);
- }
+ void initializeLocal(ASTContext &Context, SourceLocation Loc);
static void initializeArgLocs(ASTContext &Context,
ArrayRef<TemplateArgument> Args,
@@ -2346,99 +2465,68 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc);
};
-class DeducedTemplateSpecializationTypeLoc
- : public InheritingConcreteTypeLoc<DeducedTypeLoc,
- DeducedTemplateSpecializationTypeLoc,
- DeducedTemplateSpecializationType> {
-public:
- SourceLocation getTemplateNameLoc() const {
- return getNameLoc();
- }
-
- void setTemplateNameLoc(SourceLocation Loc) {
- setNameLoc(Loc);
- }
-};
-
-struct ElaboratedLocInfo {
+struct DeducedTemplateSpecializationLocInfo : TypeSpecLocInfo {
SourceLocation ElaboratedKWLoc;
-
/// Data associated with the nested-name-specifier location.
void *QualifierData;
};
-class ElaboratedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
- ElaboratedTypeLoc,
- ElaboratedType,
- ElaboratedLocInfo> {
+class DeducedTemplateSpecializationTypeLoc
+ : public ConcreteTypeLoc<DeducedTypeLoc,
+ DeducedTemplateSpecializationTypeLoc,
+ DeducedTemplateSpecializationType,
+ DeducedTemplateSpecializationLocInfo> {
public:
SourceLocation getElaboratedKeywordLoc() const {
- return !isEmpty() ? getLocalData()->ElaboratedKWLoc : SourceLocation();
+ return getLocalData()->ElaboratedKWLoc;
}
void setElaboratedKeywordLoc(SourceLocation Loc) {
- if (isEmpty()) {
- assert(Loc.isInvalid());
- return;
- }
getLocalData()->ElaboratedKWLoc = Loc;
}
+ SourceLocation getTemplateNameLoc() const { return getNameLoc(); }
+
+ void setTemplateNameLoc(SourceLocation Loc) { setNameLoc(Loc); }
+
NestedNameSpecifierLoc getQualifierLoc() const {
- return !isEmpty() ? NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
- getLocalData()->QualifierData)
- : NestedNameSpecifierLoc();
+ void *Data = getLocalData()->QualifierData;
+ if (!Data)
+ return NestedNameSpecifierLoc();
+ NestedNameSpecifier Qualifier = getTypePtr()
+ ->getTemplateName()
+ .getAsAdjustedQualifiedTemplateName()
+ ->getQualifier();
+ return NestedNameSpecifierLoc(Qualifier, Data);
}
void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
- assert(QualifierLoc.getNestedNameSpecifier() ==
- getTypePtr()->getQualifier() &&
- "Inconsistent nested-name-specifier pointer");
- if (isEmpty()) {
- assert(!QualifierLoc.hasQualifier());
+ if (!QualifierLoc) {
+ // Even if we have a nested-name-specifier in the dependent
+ // template specialization type, we won't record the nested-name-specifier
+ // location information when this type-source location information is
+ // part of a nested-name-specifier.
+ getLocalData()->QualifierData = nullptr;
return;
}
- getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
- }
- SourceRange getLocalSourceRange() const {
- if (getElaboratedKeywordLoc().isValid())
- if (getQualifierLoc())
- return SourceRange(getElaboratedKeywordLoc(),
- getQualifierLoc().getEndLoc());
- else
- return SourceRange(getElaboratedKeywordLoc());
- else
- return getQualifierLoc().getSourceRange();
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()
+ ->getTemplateName()
+ .getAsAdjustedQualifiedTemplateName()
+ ->getQualifier() &&
+ "Inconsistent nested-name-specifier pointer");
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
}
void initializeLocal(ASTContext &Context, SourceLocation Loc);
+};
- TypeLoc getNamedTypeLoc() const { return getInnerTypeLoc(); }
-
- QualType getInnerType() const { return getTypePtr()->getNamedType(); }
-
- bool isEmpty() const {
- return getTypePtr()->getKeyword() == ElaboratedTypeKeyword::None &&
- !getTypePtr()->getQualifier();
- }
-
- unsigned getLocalDataAlignment() const {
- // FIXME: We want to return 1 here in the empty case, but
- // there are bugs in how alignment is handled in TypeLocs
- // that prevent this from working.
- return ConcreteTypeLoc::getLocalDataAlignment();
- }
-
- unsigned getLocalDataSize() const {
- return !isEmpty() ? ConcreteTypeLoc::getLocalDataSize() : 0;
- }
+struct ElaboratedLocInfo {
+ SourceLocation ElaboratedKWLoc;
- void copy(ElaboratedTypeLoc Loc) {
- unsigned size = getFullDataSize();
- assert(size == Loc.getFullDataSize());
- memcpy(Data, Loc.Data, size);
- }
+ /// Data associated with the nested-name-specifier location.
+ void *QualifierData;
};
// This is exactly the structure of an ElaboratedTypeLoc whose inner
@@ -2749,8 +2837,6 @@ inline T TypeLoc::getAsAdjusted() const {
Cur = ATL.getWrappedLoc();
else if (auto ATL = Cur.getAs<HLSLAttributedResourceTypeLoc>())
Cur = ATL.getWrappedLoc();
- else if (auto ETL = Cur.getAs<ElaboratedTypeLoc>())
- Cur = ETL.getNamedTypeLoc();
else if (auto ATL = Cur.getAs<AdjustedTypeLoc>())
Cur = ATL.getOriginalLoc();
else if (auto MQL = Cur.getAs<MacroQualifiedTypeLoc>())
diff --git a/clang/include/clang/AST/TypeProperties.td b/clang/include/clang/AST/TypeProperties.td
index 3373e96..388f6dd 100644
--- a/clang/include/clang/AST/TypeProperties.td
+++ b/clang/include/clang/AST/TypeProperties.td
@@ -379,38 +379,59 @@ let Class = AtomicType in {
}
let Class = UnresolvedUsingType in {
- def : Property<"declaration", DeclRef> {
- let Read = [{ node->getDecl() }];
+ def : Property<"IsCanonical", Bool> {
+ let Read = [{ node->isCanonicalUnqualified() }];
}
-
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getKeyword() }];
+ }
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getQualifier() }];
+ }
+ def : Property<"D", DeclRef> { let Read = [{ node->getDecl() }]; }
def : Creator<[{
- return ctx.getUnresolvedUsingType(cast<UnresolvedUsingTypenameDecl>(declaration));
+ auto *UD = cast<UnresolvedUsingTypenameDecl>(D);
+ return IsCanonical ? ctx.getCanonicalUnresolvedUsingType(UD) : ctx.getUnresolvedUsingType(*Keyword, *Qualifier, UD);
}]>;
}
let Class = UsingType in {
- def : Property<"foundDeclaration", UsingShadowDeclRef> {
- let Read = [{ node->getFoundDecl() }];
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
}
- def : Property<"underlyingType", QualType> {
- let Read = [{ node->getUnderlyingType() }];
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Read = [{ node->getQualifier() }];
+ }
+ def : Property<"D", UsingShadowDeclRef> { let Read = [{ node->getDecl() }]; }
+ def : Property<"UnderlyingType", QualType> {
+ let Read = [{ node->desugar() }];
}
-
def : Creator<[{
- return ctx.getUsingType(foundDeclaration, underlyingType);
+ return ctx.getUsingType(Keyword, Qualifier, D, UnderlyingType);
}]>;
}
let Class = TypedefType in {
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
+ }
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Read = [{ node->getQualifier() }];
+ }
def : Property<"declaration", DeclRef> {
let Read = [{ node->getDecl() }];
}
- def : Property<"underlyingType", QualType> {
+ def : Property<"UnderlyingType", QualType> {
let Read = [{ node->desugar() }];
}
+ def : Property<"TypeMatchesDecl", Bool> {
+ let Read = [{ node->typeMatchesDecl() }];
+ }
def : Creator<[{
- return ctx.getTypedefType(cast<TypedefNameDecl>(declaration), underlyingType);
+ return ctx.getTypedefType(Keyword, Qualifier, cast<TypedefNameDecl>(declaration), UnderlyingType, TypeMatchesDecl);
}]>;
}
@@ -520,6 +541,9 @@ let Class = AutoType in {
}
let Class = DeducedTemplateSpecializationType in {
+ def : Property<"keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
+ }
def : Property<"templateName", Optional<TemplateName>> {
let Read = [{ makeOptionalFromNullable(node->getTemplateName()) }];
}
@@ -533,97 +557,42 @@ let Class = DeducedTemplateSpecializationType in {
}
def : Creator<[{
- return ctx.getDeducedTemplateSpecializationType(
+ return ctx.getDeducedTemplateSpecializationType(keyword,
makeNullableFromOptional(templateName),
deducedType, dependent);
}]>;
}
let Class = TagType in {
- def : Property<"dependent", Bool> {
- let Read = [{ node->isDependentType() }];
+ def : Property<"IsCanonical", Bool> {
+ let Read = [{ node->isCanonicalUnqualified() }];
}
- def : Property<"declaration", DeclRef> {
- // We don't know which declaration was originally referenced here, and we
- // cannot reference a declaration that follows the use (because that can
- // introduce deserialization cycles), so conservatively generate a
- // reference to the first declaration.
- // FIXME: If this is a reference to a class template specialization, that
- // can still introduce a deserialization cycle.
- let Read = [{ node->getDecl()->getCanonicalDecl() }];
+ def : Property<"Keyword", ElaboratedTypeKeyword> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getKeyword() }];
+ }
+ def : Property<"Qualifier", NestedNameSpecifier> {
+ let Conditional = [{ !IsCanonical }];
+ let Read = [{ node->getQualifier() }];
}
+ def : Property<"TD", TagDeclRef> { let Read = [{ node->getOriginalDecl() }]; }
}
let Class = EnumType in {
+ def : Property<"OwnsTag", Bool> { let Read = [{ node->isTagOwned() }]; }
def : Creator<[{
- QualType result = ctx.getEnumType(cast<EnumDecl>(declaration));
- if (dependent)
- const_cast<Type *>(result.getTypePtr())
- ->addDependence(TypeDependence::DependentInstantiation);
- return result;
+ return IsCanonical ? ctx.getCanonicalTagType(TD) : ctx.getTagType(*Keyword, *Qualifier, TD, OwnsTag);
}]>;
}
-
let Class = RecordType in {
+ def : Property<"OwnsTag", Bool> { let Read = [{ node->isTagOwned() }]; }
def : Creator<[{
- auto record = cast<RecordDecl>(declaration);
- QualType result = ctx.getRecordType(record);
- if (dependent)
- const_cast<Type *>(result.getTypePtr())
- ->addDependence(TypeDependence::DependentInstantiation);
- return result;
+ return IsCanonical ? ctx.getCanonicalTagType(TD) : ctx.getTagType(*Keyword, *Qualifier, TD, OwnsTag);
}]>;
}
-
-let Class = ElaboratedType in {
- def : Property<"keyword", ElaboratedTypeKeyword> {
- let Read = [{ node->getKeyword() }];
- }
- def : Property<"qualifier", NestedNameSpecifier> {
- let Read = [{ node->getQualifier() }];
- }
- def : Property<"namedType", QualType> {
- let Read = [{ node->getNamedType() }];
- }
- def : Property<"ownedTag", Optional<TagDeclRef>> {
- let Read = [{ makeOptionalFromPointer(
- const_cast<const TagDecl *>(node->getOwnedTagDecl())) }];
- }
-
- def : Creator<[{
- return ctx.getElaboratedType(keyword, qualifier, namedType,
- makePointerFromOptional(ownedTag));
- }]>;
-}
-
let Class = InjectedClassNameType in {
- def : Property<"declaration", DeclRef> {
- // FIXME: drilling down to the canonical declaration is what the
- // existing serialization code was doing, but it's not clear why.
- let Read = [{ node->getDecl()->getCanonicalDecl() }];
- }
- def : Property<"injectedSpecializationType", QualType> {
- let Read = [{ node->getInjectedSpecializationType() }];
- }
-
def : Creator<[{
- // FIXME: ASTContext::getInjectedClassNameType is not currently suitable
- // for AST reading, too much interdependencies.
- const Type *T = nullptr;
- auto typeDecl = cast<CXXRecordDecl>(declaration);
- for (auto *DI = typeDecl; DI; DI = DI->getPreviousDecl()) {
- if (const Type *existing = DI->getTypeForDecl()) {
- T = existing;
- break;
- }
- }
- if (!T) {
- T = new (ctx, TypeAlignment)
- InjectedClassNameType(typeDecl, injectedSpecializationType);
- for (auto *DI = typeDecl; DI; DI = DI->getPreviousDecl())
- DI->setTypeForDecl(T);
- }
- return QualType(T, 0);
+ return IsCanonical ? ctx.getCanonicalTagType(TD) : ctx.getTagType(*Keyword, *Qualifier, TD, /*OwnsTag=*/false);
}]>;
}
@@ -741,6 +710,9 @@ let Class = DependentAddressSpaceType in {
}
let Class = TemplateSpecializationType in {
+ def : Property<"keyword", ElaboratedTypeKeyword> {
+ let Read = [{ node->getKeyword() }];
+ }
def : Property<"templateName", TemplateName> {
let Read = [{ node->getTemplateName() }];
}
@@ -753,7 +725,7 @@ let Class = TemplateSpecializationType in {
}
def : Creator<[{
- return ctx.getTemplateSpecializationType(templateName, args, {}, UnderlyingType);
+ return ctx.getTemplateSpecializationType(keyword, templateName, args, {}, UnderlyingType);
}]>;
}
diff --git a/clang/include/clang/ASTMatchers/ASTMatchers.h b/clang/include/clang/ASTMatchers/ASTMatchers.h
index 08c898f..cbd931c 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -222,6 +222,19 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
+/// \brief Matches shadow declarations introduced into a scope by a
+/// (resolved) using declaration.
+///
+/// Given
+/// \code
+/// namespace n { int f; }
+/// namespace declToImport { using n::f; }
+/// \endcode
+/// usingShadowDecl()
+/// matches \code f \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingShadowDecl>
+ usingShadowDecl;
+
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
@@ -3740,7 +3753,7 @@ extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
-/// Matcher<UnresolvedUsingType>
+/// Matcher<UnresolvedUsingType>, Matcher<UsingType>
inline internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>
@@ -4375,7 +4388,13 @@ AST_POLYMORPHIC_MATCHER_P(throughUsingDecl,
AST_POLYMORPHIC_SUPPORTED_TYPES(DeclRefExpr,
UsingType),
internal::Matcher<UsingShadowDecl>, Inner) {
- const NamedDecl *FoundDecl = Node.getFoundDecl();
+ const NamedDecl *FoundDecl;
+ if constexpr (std::is_same_v<NodeType, UsingType>) {
+ FoundDecl = Node.getDecl();
+ } else {
+ static_assert(std::is_same_v<NodeType, DeclRefExpr>);
+ FoundDecl = Node.getFoundDecl();
+ }
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return Inner.matches(*UsingDecl, Finder, Builder);
return false;
@@ -7004,37 +7023,6 @@ AST_POLYMORPHIC_MATCHER_P2(
InnerMatcher.matches(Args[Index], Finder, Builder);
}
-/// Matches C or C++ elaborated `TypeLoc`s.
-///
-/// Given
-/// \code
-/// struct s {};
-/// struct s ss;
-/// \endcode
-/// elaboratedTypeLoc()
-/// matches the `TypeLoc` of the variable declaration of `ss`.
-extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
- elaboratedTypeLoc;
-
-/// Matches elaborated `TypeLoc`s that have a named `TypeLoc` matching
-/// `InnerMatcher`.
-///
-/// Given
-/// \code
-/// template <typename T>
-/// class C {};
-/// class C<int> c;
-///
-/// class D {};
-/// class D d;
-/// \endcode
-/// elaboratedTypeLoc(hasNamedTypeLoc(templateSpecializationTypeLoc()));
-/// matches the `TypeLoc` of the variable declaration of `c`, but not `d`.
-AST_MATCHER_P(ElaboratedTypeLoc, hasNamedTypeLoc, internal::Matcher<TypeLoc>,
- InnerMatcher) {
- return InnerMatcher.matches(Node.getNamedTypeLoc(), Finder, Builder);
-}
-
/// Matches type \c bool.
///
/// Given
@@ -7301,7 +7289,7 @@ extern const AstTypeMatcher<DecltypeType> decltypeType;
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
-/// Matches \c DecltypeType or \c UsingType nodes to find the underlying type.
+/// Matches \c QualType nodes to find the underlying type.
///
/// Given
/// \code
@@ -7311,10 +7299,13 @@ AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
-/// Usable as: Matcher<DecltypeType>, Matcher<UsingType>
-AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
- AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType,
- UsingType));
+/// Usable as: Matcher<QualType>
+AST_MATCHER_P(Type, hasUnderlyingType, internal::Matcher<QualType>, Inner) {
+ QualType QT = Node.getLocallyUnqualifiedSingleStepDesugaredType();
+ if (QT == QualType(&Node, 0))
+ return false;
+ return Inner.matches(QT, Finder, Builder);
+}
/// Matches \c FunctionType nodes.
///
@@ -7593,27 +7584,7 @@ extern const AstTypeMatcher<RecordType> recordType;
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
-/// Matches types specified with an elaborated type keyword or with a
-/// qualified name.
-///
-/// Given
-/// \code
-/// namespace N {
-/// namespace M {
-/// class D {};
-/// }
-/// }
-/// class C {};
-///
-/// class C c;
-/// N::M::D d;
-/// \endcode
-///
-/// \c elaboratedType() matches the type of the variable declarations of both
-/// \c c and \c d.
-extern const AstTypeMatcher<ElaboratedType> elaboratedType;
-
-/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
+/// Matches Types whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
@@ -7628,34 +7599,14 @@ extern const AstTypeMatcher<ElaboratedType> elaboratedType;
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
-AST_MATCHER_P(ElaboratedType, hasQualifier,
- internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
- if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
- return InnerMatcher.matches(*Qualifier, Finder, Builder);
+AST_MATCHER_P(Type, hasQualifier, internal::Matcher<NestedNameSpecifier>,
+ InnerMatcher) {
+ if (NestedNameSpecifier Qualifier = Node.getPrefix())
+ return InnerMatcher.matches(Qualifier, Finder, Builder);
return false;
}
-/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
-///
-/// Given
-/// \code
-/// namespace N {
-/// namespace M {
-/// class D {};
-/// }
-/// }
-/// N::M::D d;
-/// \endcode
-///
-/// \c elaboratedType(namesType(recordType(
-/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
-/// declaration of \c d.
-AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
- InnerMatcher) {
- return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
-}
-
/// Matches types specified through a using declaration.
///
/// Given
@@ -7824,7 +7775,7 @@ AST_MATCHER_FUNCTION_P_OVERLOAD(
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
- if (!Node.getAsType())
+ if (Node.getKind() != NestedNameSpecifier::Kind::Type)
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
@@ -7842,8 +7793,12 @@ AST_MATCHER_P(NestedNameSpecifier, specifiesType,
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
- return Node && Node.getNestedNameSpecifier()->getAsType() &&
- InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
+ if (!Node)
+ return false;
+ TypeLoc TL = Node.getAsTypeLoc();
+ if (!TL)
+ return false;
+ return InnerMatcher.matches(TL, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
@@ -7858,10 +7813,21 @@ AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
- const NestedNameSpecifier *NextNode = Node.getPrefix();
+ NestedNameSpecifier NextNode = std::nullopt;
+ switch (Node.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace:
+ NextNode = Node.getAsNamespaceAndPrefix().Prefix;
+ break;
+ case NestedNameSpecifier::Kind::Type:
+ NextNode = Node.getAsType()->getPrefix();
+ break;
+ default:
+ break;
+ }
+
if (!NextNode)
return false;
- return InnerMatcher.matches(*NextNode, Finder, Builder);
+ return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
@@ -7876,7 +7842,12 @@ AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
- NestedNameSpecifierLoc NextNode = Node.getPrefix();
+ NestedNameSpecifierLoc NextNode;
+ if (TypeLoc TL = Node.getAsTypeLoc())
+ NextNode = TL.getPrefix();
+ else
+ NextNode = Node.getAsNamespaceAndPrefix().Prefix;
+
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
@@ -7894,9 +7865,13 @@ AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
- if (auto *NS = dyn_cast_if_present<NamespaceDecl>(Node.getAsNamespace()))
- return InnerMatcher.matches(*NS, Finder, Builder);
- return false;
+ if (Node.getKind() != NestedNameSpecifier::Kind::Namespace)
+ return false;
+ const auto *Namespace =
+ dyn_cast<NamespaceDecl>(Node.getAsNamespaceAndPrefix().Namespace);
+ if (!Namespace)
+ return false;
+ return InnerMatcher.matches(*Namespace, Finder, Builder);
}
/// Matches attributes.
diff --git a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
index 5df2294..1ab6f11 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -1017,10 +1017,7 @@ private:
// First, for any types that have a declaration, extract the declaration and
// match on it.
if (const auto *S = dyn_cast<TagType>(&Node)) {
- return matchesDecl(S->getDecl(), Finder, Builder);
- }
- if (const auto *S = dyn_cast<InjectedClassNameType>(&Node)) {
- return matchesDecl(S->getDecl(), Finder, Builder);
+ return matchesDecl(S->getOriginalDecl(), Finder, Builder);
}
if (const auto *S = dyn_cast<TemplateTypeParmType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
@@ -1031,6 +1028,9 @@ private:
if (const auto *S = dyn_cast<UnresolvedUsingType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
+ if (const auto *S = dyn_cast<UsingType>(&Node)) {
+ return matchesDecl(S->getDecl(), Finder, Builder);
+ }
if (const auto *S = dyn_cast<ObjCObjectType>(&Node)) {
return matchesDecl(S->getInterface(), Finder, Builder);
}
@@ -1066,12 +1066,6 @@ private:
Builder);
}
- // FIXME: We desugar elaborated types. This makes the assumption that users
- // do never want to match on whether a type is elaborated - there are
- // arguments for both sides; for now, continue desugaring.
- if (const auto *S = dyn_cast<ElaboratedType>(&Node)) {
- return matchesSpecialized(S->desugar(), Finder, Builder);
- }
// Similarly types found via using declarations.
// These are *usually* meaningless sugar, and this matches the historical
// behavior prior to the introduction of UsingType.
@@ -1211,8 +1205,8 @@ using AdaptativeDefaultToTypes =
/// All types that are supported by HasDeclarationMatcher above.
using HasDeclarationSupportedTypes =
TypeList<CallExpr, CXXConstructExpr, CXXNewExpr, DeclRefExpr, EnumType,
- ElaboratedType, InjectedClassNameType, LabelStmt, AddrLabelExpr,
- MemberExpr, QualType, RecordType, TagType,
+ InjectedClassNameType, LabelStmt, AddrLabelExpr, MemberExpr,
+ QualType, RecordType, TagType, UsingType,
TemplateSpecializationType, TemplateTypeParmType, TypedefType,
UnresolvedUsingType, ObjCIvarRefExpr, ObjCInterfaceDecl>;
@@ -1789,7 +1783,7 @@ public:
private:
static DynTypedNode extract(const NestedNameSpecifierLoc &Loc) {
- return DynTypedNode::create(*Loc.getNestedNameSpecifier());
+ return DynTypedNode::create(Loc.getNestedNameSpecifier());
}
};
diff --git a/clang/include/clang/Analysis/FlowSensitive/ASTOps.h b/clang/include/clang/Analysis/FlowSensitive/ASTOps.h
index 8c7ee86..a404b06 100644
--- a/clang/include/clang/Analysis/FlowSensitive/ASTOps.h
+++ b/clang/include/clang/Analysis/FlowSensitive/ASTOps.h
@@ -112,8 +112,14 @@ public:
// fields that are only used in these.
// Note: The operand of the `noexcept` operator is an unevaluated operand, but
// nevertheless it appears in the Clang CFG, so we don't exclude it here.
- bool TraverseDecltypeTypeLoc(DecltypeTypeLoc) override { return true; }
- bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc) override { return true; }
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc,
+ bool TraverseQualifier) override {
+ return true;
+ }
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc,
+ bool TraverseQualifier) override {
+ return true;
+ }
bool TraverseCXXTypeidExpr(CXXTypeidExpr *TIE) override {
if (TIE->isPotentiallyEvaluated())
return DynamicRecursiveASTVisitor::TraverseCXXTypeidExpr(TIE);
diff --git a/clang/include/clang/Basic/ABIVersions.def b/clang/include/clang/Basic/ABIVersions.def
new file mode 100644
index 0000000..f6524bc
--- /dev/null
+++ b/clang/include/clang/Basic/ABIVersions.def
@@ -0,0 +1,135 @@
+//===--- ABIVersions.def - Clang ABI Versions Database ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file enumerates Clang ABI versions.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file ABIVersions.def
+///
+/// In this file, each of the Clang ABI Versions is enumerated
+/// ABI_VER_MAJOR_MINOR, ABI_VER_MAJOR, or ABI_VER_LATEST macro.
+///
+/// ABI_VER_MAJOR is used when the minor version is 0 or can be omitted.
+///
+/// The first argument of ABI_VER_MAJOR_MINOR and ABI_VER_MAJOR is the major
+/// version.
+///
+/// The second argument of ABI_VER_MAJOR_MINOR is the minor version.
+///
+/// The first argument of ABI_VER_LATEST is an identifier `Latest`.
+
+#if defined(ABI_VER_MAJOR_MINOR) != defined(ABI_VER_MAJOR) || \
+ defined(ABI_VER_MAJOR) != defined(ABI_VER_LATEST)
+# error ABI_VER_MAJOR_MINOR, ABI_VER_MAJOR and ABI_VER_LATEST should be defined simultaneously
+#endif
+
+#ifndef ABI_VER_MAJOR_MINOR
+# define ABI_VER_MAJOR_MINOR(Major, Minor)
+#endif
+
+#ifndef ABI_VER_MAJOR
+# define ABI_VER_MAJOR(Major)
+#endif
+
+#ifndef ABI_VER_LATEST
+# define ABI_VER_LATEST(Latest)
+#endif
+
+/// Attempt to be ABI-compatible with code generated by Clang 3.8.x
+/// (SVN r257626). This causes <1 x long long> to be passed in an integer
+/// register instead of an SSE register on x64_64.
+ABI_VER_MAJOR_MINOR(3, 8)
+
+/// Attempt to be ABI-compatible with code generated by Clang 4.0.x
+/// (SVN r291814). This causes move operations to be ignored when determining
+/// whether a class type can be passed or returned directly.
+ABI_VER_MAJOR(4)
+
+/// Attempt to be ABI-compatible with code generated by Clang 6.0.x
+/// (SVN r321711). This causes determination of whether a type is
+/// standard-layout to ignore collisions between empty base classes and between
+/// base classes and member subobjects, which affects whether we reuse base
+/// class tail padding in some ABIs.
+ABI_VER_MAJOR(6)
+
+/// Attempt to be ABI-compatible with code generated by Clang 7.0.x
+/// (SVN r338536). This causes alignof (C++) and _Alignof (C11) to be compatible
+/// with __alignof (i.e., return the preferred alignment) rather than returning
+/// the required alignment.
+ABI_VER_MAJOR(7)
+
+/// Attempt to be ABI-compatible with code generated by Clang 9.0.x
+/// (SVN r351319). This causes vectors of __int128 to be passed in memory
+/// instead of passing in multiple scalar registers on x86_64 on Linux and
+/// NetBSD.
+ABI_VER_MAJOR(9)
+
+/// Attempt to be ABI-compatible with code generated by Clang 11.0.x
+/// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit vector
+/// member on the stack instead of using registers, to not properly mangle
+/// substitutions for template names in some cases, and to mangle declaration
+/// template arguments without a cast to the parameter type even when that can
+/// lead to mangling collisions.
+ABI_VER_MAJOR(11)
+
+/// Attempt to be ABI-compatible with code generated by Clang 12.0.x
+/// (git 8e464dd76bef). This causes clang to mangle lambdas within global-scope
+/// inline variables incorrectly.
+ABI_VER_MAJOR(12)
+
+/// Attempt to be ABI-compatible with code generated by Clang 14.0.x.
+/// This causes clang to:
+/// - mangle dependent nested names incorrectly.
+/// - make trivial only those defaulted copy constructors with a
+/// parameter-type-list equivalent to the parameter-type-list of an implicit
+/// declaration.
+ABI_VER_MAJOR(14)
+
+/// Attempt to be ABI-compatible with code generated by Clang 15.0.x.
+/// This causes clang to:
+/// - Reverse the implementation for CWG692, CWG1395 and CWG1432.
+/// - pack non-POD members of packed structs.
+/// - consider classes with defaulted special member functions non-pod.
+ABI_VER_MAJOR(15)
+
+/// Attempt to be ABI-compatible with code generated by Clang 17.0.x.
+/// This causes clang to revert some fixes to its implementation of the Itanium
+/// name mangling scheme, with the consequence that overloaded function
+/// templates are mangled the same if they differ only by:
+/// - constraints
+/// - whether a non-type template parameter has a deduced type
+/// - the parameter list of a template template parameter
+ABI_VER_MAJOR(17)
+
+/// Attempt to be ABI-compatible with code generated by Clang 18.0.x.
+/// This causes clang to revert some fixes to the mangling of lambdas in the
+/// initializers of members of local classes.
+ABI_VER_MAJOR(18)
+
+/// Attempt to be ABI-compatible with code generated by Clang 19.0.x.
+/// This causes clang to:
+/// - Incorrectly mangle the 'base type' substitutions of the CXX construction
+/// vtable because it hasn't added 'type' as a substitution.
+/// - Skip mangling enclosing class templates of member-like friend function
+/// templates.
+/// - Ignore empty struct arguments in C++ mode for ARM, instead of passing
+/// them as if they had a size of 1 byte.
+ABI_VER_MAJOR(19)
+
+/// Attempt to be ABI-compatible with code generated by Clang 20.0.x.
+/// This causes clang to:
+/// - Incorrectly return C++ records in AVX registers on x86_64.
+ABI_VER_MAJOR(20)
+
+/// Conform to the underlying platform's C and C++ ABIs as closely as we can.
+ABI_VER_LATEST(Latest)
+
+#undef ABI_VER_MAJOR_MINOR
+#undef ABI_VER_MAJOR
+#undef ABI_VER_LATEST
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index a4acc72..3efc0be 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -93,13 +93,11 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
}
let Features = "sse2" in {
- def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pavgb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
def pavgw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def packsswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">;
def packssdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">;
def packuswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">;
- def pmulhuw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def vec_ext_v2di : X86Builtin<"long long int(_Vector<2, long long int>, _Constant int)">;
def vec_ext_v4si : X86Builtin<"int(_Vector<4, int>, _Constant int)">;
def vec_ext_v4sf : X86Builtin<"float(_Vector<4, float>, _Constant int)">;
@@ -107,6 +105,11 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
def vec_set_v8hi : X86Builtin<"_Vector<8, short>(_Vector<8, short>, short, _Constant int)">;
}
+ let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
+ def pmulhuw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">;
+ }
+
let Features = "sse3" in {
foreach Op = ["addsub", "hadd", "hsub"] in {
def Op#ps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>)">;
@@ -579,8 +582,6 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
def pmuldq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, _Vector<8, int>)">;
def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
- def pmulhuw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
- def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def pmuludq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, _Vector<8, int>)">;
def psadbw256 : X86Builtin<"_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
def pshufb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
@@ -619,6 +620,11 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def insert128i256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>, _Constant int)">;
}
+let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+ def pmulhuw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned short>, _Vector<16, unsigned short>)">;
+ def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
+}
+
let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def maskloadd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int const *>, _Vector<8, int>)">;
def maskloadq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int const *>, _Vector<4, long long int>)">;
@@ -878,11 +884,6 @@ let Features = "sha", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in
def sha256msg2 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
}
-let Features = "fma|fma4", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vfmaddps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, float>)">;
- def vfmaddpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, double>)">;
-}
-
let Features = "fma", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
def vfmaddss3 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Vector<4, float>)">;
def vfmaddsd3 : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, double>)">;
@@ -898,9 +899,8 @@ let Features = "fma|fma4", Attributes = [NoThrow, Const, RequiredVectorWidth<128
def vfmaddsubpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Vector<2, double>)">;
}
-let Features = "fma|fma4", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vfmaddps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, float>)">;
- def vfmaddpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, double>)">;
+let Features = "fma|fma4",
+ Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
def vfmaddsubps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, float>)">;
def vfmaddsubpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, double>)">;
}
@@ -1429,7 +1429,10 @@ let Features = "avx512bitalg,evex512", Attributes = [NoThrow, Const, RequiredVec
let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
- def pmulhuw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
+}
+
+let Features = "avx512bw,evex512", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+ def pmulhuw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
def pmulhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
}
@@ -4140,14 +4143,6 @@ let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, RequiredVecto
def vcvtps2phx512_mask : X86Builtin<"_Vector<16, _Float16>(_Vector<16, float>, _Vector<16, _Float16>, unsigned short, _Constant int)">;
}
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vfmaddph : X86Builtin<"_Vector<8, _Float16>(_Vector<8, _Float16>, _Vector<8, _Float16>, _Vector<8, _Float16>)">;
-}
-
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vfmaddph256 : X86Builtin<"_Vector<16, _Float16>(_Vector<16, _Float16>, _Vector<16, _Float16>, _Vector<16, _Float16>)">;
-}
-
let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def vfmaddph512_mask : X86Builtin<"_Vector<32, _Float16>(_Vector<32, _Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, _Constant int)">;
def vfmaddph512_mask3 : X86Builtin<"_Vector<32, _Float16>(_Vector<32, _Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, _Constant int)">;
@@ -5373,13 +5368,4 @@ let Features = "avx10.2-256", Attributes = [NoThrow, Const, RequiredVectorWidth<
let Features = "avx10.2-512", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
def vsqrtbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>)">;
- def vfmaddbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>, _Vector<32, __bf16>, _Vector<32, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
- def vfmaddbf16256 : X86Builtin<"_Vector<16, __bf16>(_Vector<16, __bf16>, _Vector<16, __bf16>, _Vector<16, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def vfmaddbf16128 : X86Builtin<"_Vector<8, __bf16>(_Vector<8, __bf16>, _Vector<8, __bf16>, _Vector<8, __bf16>)">;
}
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index cf23594..116341f 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -13234,9 +13234,9 @@ def err_wasm_builtin_arg_must_match_table_element_type : Error <
"%ordinal0 argument must match the element type of the WebAssembly table in the %ordinal1 argument">;
def err_wasm_builtin_arg_must_be_integer_type : Error <
"%ordinal0 argument must be an integer">;
-def err_wasm_builtin_test_fp_sig_cannot_include_reference_type
- : Error<"not supported for "
- "function pointers with a reference type %select{return "
+def err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union
+ : Error<"not supported with the multivalue ABI for "
+ "function pointers with a struct/union as %select{return "
"value|parameter}0">;
// OpenACC diagnostics.
diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def
index 72f2361..c58e3f2 100644
--- a/clang/include/clang/Basic/Features.def
+++ b/clang/include/clang/Basic/Features.def
@@ -303,6 +303,14 @@ FEATURE(is_trivially_assignable, LangOpts.CPlusPlus)
FEATURE(is_trivially_constructible, LangOpts.CPlusPlus)
FEATURE(is_trivially_copyable, LangOpts.CPlusPlus)
FEATURE(is_union, LangOpts.CPlusPlus)
+FEATURE(cfi_sanitizer, LangOpts.Sanitize.hasOneOf(SanitizerKind::CFI))
+FEATURE(cfi_cast_strict_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFICastStrict))
+FEATURE(cfi_derived_cast_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast))
+FEATURE(cfi_icall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIICall))
+FEATURE(cfi_mfcall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIMFCall))
+FEATURE(cfi_unrelated_cast_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast))
+FEATURE(cfi_nvcall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFINVCall))
+FEATURE(cfi_vcall_sanitizer, LangOpts.Sanitize.has(SanitizerKind::CFIVCall))
FEATURE(kcfi, LangOpts.Sanitize.has(SanitizerKind::KCFI))
FEATURE(kcfi_arity, LangOpts.Sanitize.has(SanitizerKind::KCFI))
FEATURE(modules, LangOpts.Modules)
diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h
index 0407897..569584b 100644
--- a/clang/include/clang/Basic/LangOptions.h
+++ b/clang/include/clang/Basic/LangOptions.h
@@ -186,95 +186,10 @@ public:
/// Clang versions with different platform ABI conformance.
enum class ClangABI {
- /// Attempt to be ABI-compatible with code generated by Clang 3.8.x
- /// (SVN r257626). This causes <1 x long long> to be passed in an
- /// integer register instead of an SSE register on x64_64.
- Ver3_8,
-
- /// Attempt to be ABI-compatible with code generated by Clang 4.0.x
- /// (SVN r291814). This causes move operations to be ignored when
- /// determining whether a class type can be passed or returned directly.
- Ver4,
-
- /// Attempt to be ABI-compatible with code generated by Clang 6.0.x
- /// (SVN r321711). This causes determination of whether a type is
- /// standard-layout to ignore collisions between empty base classes
- /// and between base classes and member subobjects, which affects
- /// whether we reuse base class tail padding in some ABIs.
- Ver6,
-
- /// Attempt to be ABI-compatible with code generated by Clang 7.0.x
- /// (SVN r338536). This causes alignof (C++) and _Alignof (C11) to be
- /// compatible with __alignof (i.e., return the preferred alignment)
- /// rather than returning the required alignment.
- Ver7,
-
- /// Attempt to be ABI-compatible with code generated by Clang 9.0.x
- /// (SVN r351319). This causes vectors of __int128 to be passed in memory
- /// instead of passing in multiple scalar registers on x86_64 on Linux and
- /// NetBSD.
- Ver9,
-
- /// Attempt to be ABI-compatible with code generated by Clang 11.0.x
- /// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit
- /// vector member on the stack instead of using registers, to not properly
- /// mangle substitutions for template names in some cases, and to mangle
- /// declaration template arguments without a cast to the parameter type
- /// even when that can lead to mangling collisions.
- Ver11,
-
- /// Attempt to be ABI-compatible with code generated by Clang 12.0.x
- /// (git 8e464dd76bef). This causes clang to mangle lambdas within
- /// global-scope inline variables incorrectly.
- Ver12,
-
- /// Attempt to be ABI-compatible with code generated by Clang 14.0.x.
- /// This causes clang to:
- /// - mangle dependent nested names incorrectly.
- /// - make trivial only those defaulted copy constructors with a
- /// parameter-type-list equivalent to the parameter-type-list of an
- /// implicit declaration.
- Ver14,
-
- /// Attempt to be ABI-compatible with code generated by Clang 15.0.x.
- /// This causes clang to:
- /// - Reverse the implementation for DR692, DR1395 and DR1432.
- /// - pack non-POD members of packed structs.
- /// - consider classes with defaulted special member functions non-pod.
- Ver15,
-
- /// Attempt to be ABI-compatible with code generated by Clang 17.0.x.
- /// This causes clang to revert some fixes to its implementation of the
- /// Itanium name mangling scheme, with the consequence that overloaded
- /// function templates are mangled the same if they differ only by:
- /// - constraints
- /// - whether a non-type template parameter has a deduced type
- /// - the parameter list of a template template parameter
- Ver17,
-
- /// Attempt to be ABI-compatible with code generated by Clang 18.0.x.
- /// This causes clang to revert some fixes to the mangling of lambdas
- /// in the initializers of members of local classes.
- Ver18,
-
- /// Attempt to be ABI-compatible with code generated by Clang 19.0.x.
- /// This causes clang to:
- /// - Incorrectly mangle the 'base type' substitutions of the CXX
- /// construction vtable because it hasn't added 'type' as a substitution.
- /// - Skip mangling enclosing class templates of member-like friend
- /// function templates.
- /// - Ignore empty struct arguments in C++ mode for ARM, instead of
- /// passing them as if they had a size of 1 byte.
- Ver19,
-
- /// Attempt to be ABI-compatible with code generated by Clang 20.0.x.
- /// This causes clang to:
- /// - Incorrectly return C++ records in AVX registers on x86_64.
- Ver20,
-
- /// Conform to the underlying platform's C and C++ ABIs as closely
- /// as we can.
- Latest
+#define ABI_VER_MAJOR_MINOR(Major, Minor) Ver##Major##_##Minor,
+#define ABI_VER_MAJOR(Major) Ver##Major,
+#define ABI_VER_LATEST(Latest) Latest
+#include "clang/Basic/ABIVersions.def"
};
enum class CoreFoundationABI {
diff --git a/clang/include/clang/Basic/TypeNodes.td b/clang/include/clang/Basic/TypeNodes.td
index 971ce54..e4960ec6 100644
--- a/clang/include/clang/Basic/TypeNodes.td
+++ b/clang/include/clang/Basic/TypeNodes.td
@@ -90,7 +90,7 @@ def UnaryTransformType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def TagType : TypeNode<Type, 1>;
def RecordType : TypeNode<TagType>, LeafType;
def EnumType : TypeNode<TagType>, LeafType;
-def ElaboratedType : TypeNode<Type>, NeverCanonical;
+def InjectedClassNameType : TypeNode<TagType>, AlwaysDependent, LeafType;
def AttributedType : TypeNode<Type>, NeverCanonical;
def BTFTagAttributedType : TypeNode<Type>, NeverCanonical;
def HLSLAttributedResourceType : TypeNode<Type>;
@@ -102,7 +102,6 @@ def TemplateSpecializationType : TypeNode<Type>, NeverCanonicalUnlessDependent;
def DeducedType : TypeNode<Type, 1>;
def AutoType : TypeNode<DeducedType>;
def DeducedTemplateSpecializationType : TypeNode<DeducedType>;
-def InjectedClassNameType : TypeNode<Type>, AlwaysDependent, LeafType;
def DependentNameType : TypeNode<Type>, AlwaysDependent;
def DependentTemplateSpecializationType : TypeNode<Type>, AlwaysDependent;
def PackExpansionType : TypeNode<Type>, AlwaysDependent;
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 07786c6..7513a3e 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -980,8 +980,8 @@ defm SVCLASTA_N : SVEPerm<"svclasta[_n_{d}]", "sPsd", "aarch64_sve_clasta_n">;
defm SVCLASTB : SVEPerm<"svclastb[_{d}]", "dPdd", "aarch64_sve_clastb">;
defm SVCLASTB_N : SVEPerm<"svclastb[_n_{d}]", "sPsd", "aarch64_sve_clastb_n">;
-let SVETargetGuard = "sve", SMETargetGuard = InvalidMode in {
-def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact">;
+let SVETargetGuard = "sve", SMETargetGuard = "sme2p2" in {
+def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact", [VerifyRuntimeMode]>;
}
// Note: svdup_lane is implemented using the intrinsic for TBL to represent a
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
index 588fb0d..3d34d77 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td
@@ -516,4 +516,36 @@ def CIR_BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> {
];
}
+//===----------------------------------------------------------------------===//
+// AddressPointAttr
+//===----------------------------------------------------------------------===//
+
+def CIR_AddressPointAttr : CIR_Attr<"AddressPoint", "address_point"> {
+ let summary = "Address point attribute";
+
+ let description = [{
+ Attribute specifying the address point within a C++ virtual table (vtable).
+
+ The `index` (vtable index) parameter identifies which vtable to use within a
+ vtable group, while the `offset` (address point index) specifies the offset
+ within that vtable where the address begins.
+
+ Example:
+ ```mlir
+ cir.global linkonce_odr @_ZTV1B = ...
+ ...
+ %3 = cir.vtable.address_point(@_ZTV1B,
+ address_point = <index = 0, offset = 2>)
+ : !cir.vptr
+ ```
+ }];
+
+ let parameters = (ins "int32_t":$index,
+ "int32_t":$offset);
+
+ let assemblyFormat = [{
+ `<` struct($index, $offset) `>`
+ }];
+}
+
#endif // CLANG_CIR_DIALECT_IR_CIRATTRS_TD
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td
index 3fdbf65..fdba4e4 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td
@@ -40,6 +40,7 @@ def CIR_Dialect : Dialect {
static llvm::StringRef getCalleeAttrName() { return "callee"; }
static llvm::StringRef getNoThrowAttrName() { return "nothrow"; }
static llvm::StringRef getSideEffectAttrName() { return "side_effect"; }
+ static llvm::StringRef getModuleLevelAsmAttrName() { return "cir.module_asm"; }
void registerAttributes();
void registerTypes();
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 72841a1..51cef23 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1692,6 +1692,49 @@ def CIR_GetGlobalOp : CIR_Op<"get_global", [
}
//===----------------------------------------------------------------------===//
+// VTableAddrPointOp
+//===----------------------------------------------------------------------===//
+
+def CIR_VTableAddrPointOp : CIR_Op<"vtable.address_point", [
+ Pure, DeclareOpInterfaceMethods<SymbolUserOpInterface>
+]> {
+ let summary = "Get the vtable (global variable) address point";
+ let description = [{
+ The `vtable.address_point` operation retrieves the "effective" address
+ (address point) of a C++ virtual table. An object internal `__vptr`
+ gets initializated on top of the value returned by this operation.
+
+ `address_point.index` (vtable index) provides the appropriate vtable within
+ the vtable group (as specified by Itanium ABI), and `address_point.offset`
+ (address point index) the actual address point within that vtable.
+
+ The return type is always `!cir.vptr`.
+
+ Example:
+ ```mlir
+ cir.global linkonce_odr @_ZTV1B = ...
+ ...
+ %3 = cir.vtable.address_point(@_ZTV1B,
+ address_point = <index = 0, offset = 2>) : !cir.vptr
+ ```
+ }];
+
+ let arguments = (ins
+ FlatSymbolRefAttr:$name,
+ CIR_AddressPointAttr:$address_point
+ );
+
+ let results = (outs Res<CIR_VPtrType, "", []>:$addr);
+
+ let assemblyFormat = [{
+ `(`
+ $name `,` `address_point` `=` $address_point
+ `)`
+ `:` qualified(type($addr)) attr-dict
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// SetBitfieldOp
//===----------------------------------------------------------------------===//
@@ -3143,6 +3186,56 @@ def CIR_AssumeOp : CIR_Op<"assume"> {
}];
}
+def CIR_AssumeAlignedOp : CIR_Op<"assume_aligned", [
+ Pure, AllTypesMatch<["pointer", "result"]>
+]> {
+ let summary = "Tell the optimizer that a pointer is aligned";
+ let description = [{
+ The `cir.assume_aligned` operation takes two or three arguments. The first
+ argument `pointer` gives the pointer value whose alignment is to be assumed,
+ and the second argument `align` is an integer attribute that gives the
+ assumed alignment.
+
+ The `offset` argument is optional. If given, it represents misalignment
+ offset. When it's present, this operation tells the optimizer that the
+ pointer is always misaligned to the alignment by `offset` bytes, a.k.a. the
+ pointer yielded by `(char *)pointer - offset` is aligned to the specified
+ alignment. Note that the `offset` argument is an SSA value rather than an
+ attribute, which means that you could pass a dynamically determined value
+ as the mialignment offset.
+
+ The result of this operation has the same value as the `pointer` argument,
+ but it additionally carries any alignment information indicated by this
+ operation.
+
+ This operation corresponds to the `__builtin_assume_aligned` builtin
+ function.
+
+ Example:
+
+ ```mlir
+ // Assume that %0 is a CIR pointer value of type !cir.ptr<!s32i>
+ %1 = cir.assume_aligned %0 alignment 16 : !cir.ptr<!s32i>
+
+ // With a misalignment offset of 4 bytes:
+ %2 = cir.const #cir.int<4> : !u64i
+ %3 = cir.assume_aligned %0 alignment 16 [offset %2 : !u64i] : !cir.ptr<!s32i>
+ ```
+ }];
+
+ let arguments = (ins CIR_PointerType:$pointer,
+ I64Attr:$alignment,
+ Optional<CIR_IntType>:$offset);
+ let results = (outs CIR_PointerType:$result);
+
+ let assemblyFormat = [{
+ $pointer
+ `alignment` $alignment
+ (`[` `offset` $offset^ `:` type($offset) `]`)?
+ `:` qualified(type($pointer)) attr-dict
+ }];
+}
+
def CIR_AssumeSepStorageOp : CIR_Op<"assume_separate_storage", [
SameTypeOperands
]> {
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 27dd181..fcc8ce7 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -199,6 +199,7 @@ struct MissingFeatures {
static bool dataLayoutTypeAllocSize() { return false; }
static bool deferredCXXGlobalInit() { return false; }
static bool ehCleanupFlags() { return false; }
+ static bool ehCleanupScope() { return false; }
static bool ehstackBranches() { return false; }
static bool emitCheckedInBoundsGEP() { return false; }
static bool emitCondLikelihoodViaExpectIntrinsic() { return false; }
@@ -253,6 +254,7 @@ struct MissingFeatures {
static bool thunks() { return false; }
static bool tryEmitAsConstant() { return false; }
static bool typeChecks() { return false; }
+ static bool vtableInitializer() { return false; }
static bool weakRefReference() { return false; }
static bool writebacks() { return false; }
static bool appleKext() { return false; }
diff --git a/clang/include/clang/Driver/OffloadBundler.h b/clang/include/clang/Driver/OffloadBundler.h
index 667156a..e7306ce 100644
--- a/clang/include/clang/Driver/OffloadBundler.h
+++ b/clang/include/clang/Driver/OffloadBundler.h
@@ -120,7 +120,7 @@ public:
static llvm::Expected<CompressedBundleHeader> tryParse(llvm::StringRef);
};
- static inline const uint16_t DefaultVersion = 2;
+ static inline const uint16_t DefaultVersion = 3;
static llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
compress(llvm::compression::Params P, const llvm::MemoryBuffer &Input,
diff --git a/clang/include/clang/ExtractAPI/DeclarationFragments.h b/clang/include/clang/ExtractAPI/DeclarationFragments.h
index 4ac7444..4859225 100644
--- a/clang/include/clang/ExtractAPI/DeclarationFragments.h
+++ b/clang/include/clang/ExtractAPI/DeclarationFragments.h
@@ -440,9 +440,8 @@ private:
DeclarationFragments &);
/// Build DeclarationFragments for a NestedNameSpecifier.
- static DeclarationFragments getFragmentsForNNS(const NestedNameSpecifier *,
- ASTContext &,
- DeclarationFragments &);
+ static DeclarationFragments
+ getFragmentsForNNS(NestedNameSpecifier, ASTContext &, DeclarationFragments &);
/// Build DeclarationFragments for Qualifiers.
static DeclarationFragments getFragmentsForQualifiers(const Qualifiers quals);
diff --git a/clang/include/clang/Interpreter/RemoteJITUtils.h b/clang/include/clang/Interpreter/RemoteJITUtils.h
index 8705a3b..bc71232 100644
--- a/clang/include/clang/Interpreter/RemoteJITUtils.h
+++ b/clang/include/clang/Interpreter/RemoteJITUtils.h
@@ -26,7 +26,8 @@
llvm::Expected<std::unique_ptr<llvm::orc::SimpleRemoteEPC>>
launchExecutor(llvm::StringRef ExecutablePath, bool UseSharedMemory,
- llvm::StringRef SlabAllocateSizeString);
+ llvm::StringRef SlabAllocateSizeString,
+ std::function<void()> CustomizeFork = nullptr);
/// Create a JITLinkExecutor that connects to the given network address
/// through a TCP socket. A valid NetworkAddress provides hostname and port,
@@ -35,4 +36,13 @@ llvm::Expected<std::unique_ptr<llvm::orc::SimpleRemoteEPC>>
connectTCPSocket(llvm::StringRef NetworkAddress, bool UseSharedMemory,
llvm::StringRef SlabAllocateSizeString);
+#ifdef LLVM_ON_UNIX
+/// Returns PID of last launched executor.
+pid_t getLastLaunchedExecutorPID();
+
+/// Returns PID of nth launched executor.
+/// 1-based indexing.
+pid_t getNthLaunchedExecutorPID(int n);
+#endif
+
#endif // LLVM_CLANG_INTERPRETER_REMOTEJITUTILS_H
diff --git a/clang/include/clang/Sema/CodeCompleteConsumer.h b/clang/include/clang/Sema/CodeCompleteConsumer.h
index 2dd2759..c26f4e3 100644
--- a/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -162,7 +162,8 @@ SimplifiedTypeClass getSimplifiedTypeClass(CanQualType T);
/// Determine the type that this declaration will have if it is used
/// as a type or in an expression.
-QualType getDeclUsageType(ASTContext &C, const NamedDecl *ND);
+QualType getDeclUsageType(ASTContext &C, NestedNameSpecifier Qualifier,
+ const NamedDecl *ND);
/// Determine the priority to be given to a macro code completion result
/// with the given name.
@@ -867,7 +868,7 @@ public:
/// If the result should have a nested-name-specifier, this is it.
/// When \c QualifierIsInformative, the nested-name-specifier is
/// informative rather than required.
- NestedNameSpecifier *Qualifier = nullptr;
+ NestedNameSpecifier Qualifier = std::nullopt;
/// If this Decl was unshadowed by using declaration, this can store a
/// pointer to the UsingShadowDecl which was used in the unshadowing process.
@@ -882,7 +883,7 @@ public:
/// Build a result that refers to a declaration.
CodeCompletionResult(const NamedDecl *Declaration, unsigned Priority,
- NestedNameSpecifier *Qualifier = nullptr,
+ NestedNameSpecifier Qualifier = std::nullopt,
bool QualifierIsInformative = false,
bool Accessible = true,
std::vector<FixItHint> FixIts = std::vector<FixItHint>())
diff --git a/clang/include/clang/Sema/DeclSpec.h b/clang/include/clang/Sema/DeclSpec.h
index e568081..c1a99a1 100644
--- a/clang/include/clang/Sema/DeclSpec.h
+++ b/clang/include/clang/Sema/DeclSpec.h
@@ -91,12 +91,11 @@ public:
}
/// Retrieve the representation of the nested-name-specifier.
- NestedNameSpecifier *getScopeRep() const {
+ NestedNameSpecifier getScopeRep() const {
return Builder.getRepresentation();
}
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'type::'.
+ /// Make a nested-name-specifier of the form 'type::'.
///
/// \param Context The AST context in which this nested-name-specifier
/// resides.
@@ -106,21 +105,7 @@ public:
/// \param TL The TypeLoc that describes the type preceding the '::'.
///
/// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
-
- /// Extend the current nested-name-specifier by another
- /// nested-name-specifier component of the form 'identifier::'.
- ///
- /// \param Context The AST context in which this nested-name-specifier
- /// resides.
- ///
- /// \param Identifier The identifier.
- ///
- /// \param IdentifierLoc The location of the identifier.
- ///
- /// \param ColonColonLoc The location of the trailing '::'.
- void Extend(ASTContext &Context, IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
+ void Make(ASTContext &Context, TypeLoc TL, SourceLocation ColonColonLoc);
/// Extend the current nested-name-specifier by another
/// nested-name-specifier component of the form 'namespace::'.
@@ -154,8 +139,9 @@ public:
/// name.
///
/// \param ColonColonLoc The location of the trailing '::'.
- void MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
- SourceLocation SuperLoc, SourceLocation ColonColonLoc);
+ void MakeMicrosoftSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc);
/// Make a new nested-name-specifier from incomplete source-location
/// information.
@@ -163,7 +149,7 @@ public:
/// FIXME: This routine should be used very, very rarely, in cases where we
/// need to synthesize a nested-name-specifier. Most code should instead use
/// \c Adopt() with a proper \c NestedNameSpecifierLoc.
- void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier Qualifier,
SourceRange R);
/// Adopt an existing nested-name-specifier (with source-range
@@ -189,14 +175,14 @@ public:
SourceLocation getLastQualifierNameLoc() const;
/// No scope specifier.
- bool isEmpty() const { return Range.isInvalid() && getScopeRep() == nullptr; }
+ bool isEmpty() const { return Range.isInvalid() && !getScopeRep(); }
/// A scope specifier is present, but may be valid or invalid.
bool isNotEmpty() const { return !isEmpty(); }
/// An error occurred during parsing of the scope specifier.
- bool isInvalid() const { return Range.isValid() && getScopeRep() == nullptr; }
+ bool isInvalid() const { return Range.isValid() && !getScopeRep(); }
/// A scope specifier is present, and it refers to a real scope.
- bool isValid() const { return getScopeRep() != nullptr; }
+ bool isValid() const { return bool(getScopeRep()); }
/// Indicate that this nested-name-specifier is invalid.
void SetInvalid(SourceRange R) {
@@ -209,7 +195,7 @@ public:
/// Deprecated. Some call sites intend isNotEmpty() while others intend
/// isValid().
- bool isSet() const { return getScopeRep() != nullptr; }
+ bool isSet() const { return bool(getScopeRep()); }
void clear() {
Range = SourceRange();
diff --git a/clang/include/clang/Sema/HeuristicResolver.h b/clang/include/clang/Sema/HeuristicResolver.h
index e193c0b..71588be 100644
--- a/clang/include/clang/Sema/HeuristicResolver.h
+++ b/clang/include/clang/Sema/HeuristicResolver.h
@@ -67,8 +67,7 @@ public:
// Try to heuristically resolve a dependent nested name specifier
// to the type it likely denotes. Note that *dependent* name specifiers always
// denote types, not namespaces.
- QualType
- resolveNestedNameSpecifierToType(const NestedNameSpecifier *NNS) const;
+ QualType resolveNestedNameSpecifierToType(NestedNameSpecifier NNS) const;
// Perform an imprecise lookup of a dependent name in `RD`.
// This function does not follow strict semantic rules and should be used
diff --git a/clang/include/clang/Sema/ParsedTemplate.h b/clang/include/clang/Sema/ParsedTemplate.h
index 3a8050f..4a3df78 100644
--- a/clang/include/clang/Sema/ParsedTemplate.h
+++ b/clang/include/clang/Sema/ParsedTemplate.h
@@ -48,8 +48,8 @@ namespace clang {
///
/// \param Arg the template type argument or non-type template argument.
/// \param Loc the location of the type.
- ParsedTemplateArgument(KindType Kind, void *Arg, SourceLocation Loc)
- : Kind(Kind), Arg(Arg), Loc(Loc) { }
+ ParsedTemplateArgument(KindType Kind, void *Arg, SourceLocation NameLoc)
+ : Kind(Kind), Arg(Arg), NameLoc(NameLoc) {}
/// Create a template template argument.
///
@@ -60,11 +60,11 @@ namespace clang {
/// argument refers.
///
/// \param TemplateLoc the location of the template name.
- ParsedTemplateArgument(const CXXScopeSpec &SS,
- ParsedTemplateTy Template,
- SourceLocation TemplateLoc)
- : Kind(ParsedTemplateArgument::Template),
- Arg(Template.getAsOpaquePtr()), SS(SS), Loc(TemplateLoc) {}
+ ParsedTemplateArgument(SourceLocation TemplateKwLoc, const CXXScopeSpec &SS,
+ ParsedTemplateTy Template, SourceLocation NameLoc)
+ : Kind(ParsedTemplateArgument::Template),
+ Arg(Template.getAsOpaquePtr()), SS(SS), TemplateKwLoc(TemplateKwLoc),
+ NameLoc(NameLoc) {}
/// Determine whether the given template argument is invalid.
bool isInvalid() const { return Arg == nullptr; }
@@ -91,7 +91,10 @@ namespace clang {
}
/// Retrieve the location of the template argument.
- SourceLocation getLocation() const { return Loc; }
+ SourceLocation getTemplateKwLoc() const { return TemplateKwLoc; }
+
+ /// Retrieve the location of the template argument.
+ SourceLocation getNameLoc() const { return NameLoc; }
/// Retrieve the nested-name-specifier that precedes the template
/// name in a template template argument.
@@ -128,8 +131,11 @@ namespace clang {
/// argument.
CXXScopeSpec SS;
- /// the location of the template argument.
- SourceLocation Loc;
+ /// the location of the template keyword.
+ SourceLocation TemplateKwLoc;
+
+ /// the location of the template name.
+ SourceLocation NameLoc;
/// The ellipsis location that can accompany a template template
/// argument (turning it into a template template argument expansion).
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 5211373..1dfc276 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -3225,7 +3225,7 @@ public:
/// current instantiation (C++0x [temp.dep.type]p1).
///
/// \param NNS a dependent nested name specifier.
- CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
+ CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
@@ -3262,7 +3262,7 @@ public:
/// (e.g., Base::), perform name lookup for that identifier as a
/// nested-name-specifier within the given scope, and return the result of
/// that name lookup.
- NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
+ NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
@@ -3581,8 +3581,8 @@ public:
/// Returns the TypeDeclType for the given type declaration,
/// as ASTContext::getTypeDeclType would, but
/// performs the required semantic checks for name lookup of said entity.
- QualType getTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
- TypeDecl *TD, SourceLocation NameLoc);
+ void checkTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK, TypeDecl *TD,
+ SourceLocation NameLoc);
/// If the identifier refers to a type name within this scope,
/// return the declaration of that type.
@@ -7618,7 +7618,7 @@ public:
/// "real" base class is checked as appropriate when checking the access of
/// the member name.
ExprResult PerformObjectMemberConversion(Expr *From,
- NestedNameSpecifier *Qualifier,
+ NestedNameSpecifier Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
@@ -10210,7 +10210,7 @@ public:
ExprResult InitializeExplicitObjectArgument(Sema &S, Expr *Obj,
FunctionDecl *Fun);
ExprResult PerformImplicitObjectArgumentInitialization(
- Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl,
+ Expr *From, NestedNameSpecifier Qualifier, NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// PerformContextuallyConvertToBool - Perform a contextual conversion
@@ -11618,13 +11618,16 @@ public:
void NoteAllFoundTemplates(TemplateName Name);
- QualType CheckTemplateIdType(TemplateName Template,
+ QualType CheckTemplateIdType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
- ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy Template, const IdentifierInfo *TemplateII,
+ ActOnTemplateIdType(Scope *S, ElaboratedTypeKeyword ElaboratedKeyword,
+ SourceLocation ElaboratedKeywordLoc, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc, TemplateTy Template,
+ const IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false,
@@ -11858,8 +11861,8 @@ public:
/// argument, substitute into that default template argument and
/// return the corresponding template argument.
TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(
- TemplateDecl *Template, SourceLocation TemplateLoc,
- SourceLocation RAngleLoc, Decl *Param,
+ TemplateDecl *Template, SourceLocation TemplateKWLoc,
+ SourceLocation TemplateNameLoc, SourceLocation RAngleLoc, Decl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
ArrayRef<TemplateArgument> CanonicalConverted, bool &HasDefaultArg);
@@ -13762,8 +13765,9 @@ public:
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
- SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
- SourceLocation Loc,
+ SubstTemplateName(SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc &QualifierLoc, TemplateName Name,
+ SourceLocation NameLoc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
@@ -15179,14 +15183,6 @@ public:
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
- /// Retrieve a version of the type 'T' that is elaborated by Keyword,
- /// qualified by the nested-name-specifier contained in SS, and that is
- /// (re)declared by OwnedTagDecl, which is nullptr if this is not a
- /// (re)declaration.
- QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
- const CXXScopeSpec &SS, QualType T,
- TagDecl *OwnedTagDecl = nullptr);
-
// Returns the underlying type of a decltype with the given expression.
QualType getDecltypeForExpr(Expr *E);
diff --git a/clang/include/clang/Sema/SemaInternal.h b/clang/include/clang/Sema/SemaInternal.h
index 42c9469..f7c67c1 100644
--- a/clang/include/clang/Sema/SemaInternal.h
+++ b/clang/include/clang/Sema/SemaInternal.h
@@ -209,7 +209,7 @@ private:
class NamespaceSpecifierSet {
struct SpecifierInfo {
DeclContext* DeclCtx;
- NestedNameSpecifier* NameSpecifier;
+ NestedNameSpecifier NameSpecifier;
unsigned EditDistance;
};
@@ -229,9 +229,9 @@ private:
static DeclContextList buildContextChain(DeclContext *Start);
unsigned buildNestedNameSpecifier(DeclContextList &DeclChain,
- NestedNameSpecifier *&NNS);
+ NestedNameSpecifier &NNS);
- public:
+ public:
NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext,
CXXScopeSpec *CurScopeSpec);
@@ -276,7 +276,7 @@ private:
};
void addName(StringRef Name, NamedDecl *ND,
- NestedNameSpecifier *NNS = nullptr, bool isKeyword = false);
+ NestedNameSpecifier NNS = std::nullopt, bool isKeyword = false);
/// Find any visible decls for the given typo correction candidate.
/// If none are found, it to the set of candidates for which qualified lookups
diff --git a/clang/include/clang/Sema/SemaWasm.h b/clang/include/clang/Sema/SemaWasm.h
index 8c0639f..f825907 100644
--- a/clang/include/clang/Sema/SemaWasm.h
+++ b/clang/include/clang/Sema/SemaWasm.h
@@ -37,7 +37,8 @@ public:
bool BuiltinWasmTableGrow(CallExpr *TheCall);
bool BuiltinWasmTableFill(CallExpr *TheCall);
bool BuiltinWasmTableCopy(CallExpr *TheCall);
- bool BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall);
+ bool BuiltinWasmTestFunctionPointerSignature(const TargetInfo &TI,
+ CallExpr *TheCall);
WebAssemblyImportNameAttr *
mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL);
diff --git a/clang/include/clang/Sema/TypoCorrection.h b/clang/include/clang/Sema/TypoCorrection.h
index 09de164..1d780c4 100644
--- a/clang/include/clang/Sema/TypoCorrection.h
+++ b/clang/include/clang/Sema/TypoCorrection.h
@@ -57,15 +57,15 @@ public:
static const unsigned CallbackDistanceWeight = 150U;
TypoCorrection(const DeclarationName &Name, NamedDecl *NameDecl,
- NestedNameSpecifier *NNS = nullptr, unsigned CharDistance = 0,
- unsigned QualifierDistance = 0)
+ NestedNameSpecifier NNS = std::nullopt,
+ unsigned CharDistance = 0, unsigned QualifierDistance = 0)
: CorrectionName(Name), CorrectionNameSpec(NNS),
CharDistance(CharDistance), QualifierDistance(QualifierDistance) {
if (NameDecl)
CorrectionDecls.push_back(NameDecl);
}
- TypoCorrection(NamedDecl *Name, NestedNameSpecifier *NNS = nullptr,
+ TypoCorrection(NamedDecl *Name, NestedNameSpecifier NNS = std::nullopt,
unsigned CharDistance = 0)
: CorrectionName(Name->getDeclName()), CorrectionNameSpec(NNS),
CharDistance(CharDistance) {
@@ -73,7 +73,7 @@ public:
CorrectionDecls.push_back(Name);
}
- TypoCorrection(DeclarationName Name, NestedNameSpecifier *NNS = nullptr,
+ TypoCorrection(DeclarationName Name, NestedNameSpecifier NNS = std::nullopt,
unsigned CharDistance = 0)
: CorrectionName(Name), CorrectionNameSpec(NNS),
CharDistance(CharDistance) {}
@@ -88,13 +88,13 @@ public:
}
/// Gets the NestedNameSpecifier needed to use the typo correction
- NestedNameSpecifier *getCorrectionSpecifier() const {
+ NestedNameSpecifier getCorrectionSpecifier() const {
return CorrectionNameSpec;
}
- void setCorrectionSpecifier(NestedNameSpecifier *NNS) {
+ void setCorrectionSpecifier(NestedNameSpecifier NNS) {
CorrectionNameSpec = NNS;
- ForceSpecifierReplacement = (NNS != nullptr);
+ ForceSpecifierReplacement = !!NNS;
}
void WillReplaceSpecifier(bool ForceReplacement) {
@@ -264,7 +264,7 @@ private:
// Results.
DeclarationName CorrectionName;
- NestedNameSpecifier *CorrectionNameSpec = nullptr;
+ NestedNameSpecifier CorrectionNameSpec = std::nullopt;
SmallVector<NamedDecl *, 1> CorrectionDecls;
unsigned CharDistance = 0;
unsigned QualifierDistance = 0;
@@ -282,8 +282,9 @@ class CorrectionCandidateCallback {
public:
static const unsigned InvalidDistance = TypoCorrection::InvalidDistance;
- explicit CorrectionCandidateCallback(const IdentifierInfo *Typo = nullptr,
- NestedNameSpecifier *TypoNNS = nullptr)
+ explicit CorrectionCandidateCallback(
+ const IdentifierInfo *Typo = nullptr,
+ NestedNameSpecifier TypoNNS = std::nullopt)
: Typo(Typo), TypoNNS(TypoNNS) {}
virtual ~CorrectionCandidateCallback() = default;
@@ -320,7 +321,7 @@ public:
virtual std::unique_ptr<CorrectionCandidateCallback> clone() = 0;
void setTypoName(const IdentifierInfo *II) { Typo = II; }
- void setTypoNNS(NestedNameSpecifier *NNS) { TypoNNS = NNS; }
+ void setTypoNNS(NestedNameSpecifier NNS) { TypoNNS = NNS; }
// Flags for context-dependent keywords. WantFunctionLikeCasts is only
// used/meaningful when WantCXXNamedCasts is false.
@@ -346,13 +347,13 @@ protected:
}
const IdentifierInfo *Typo;
- NestedNameSpecifier *TypoNNS;
+ NestedNameSpecifier TypoNNS;
};
class DefaultFilterCCC final : public CorrectionCandidateCallback {
public:
explicit DefaultFilterCCC(const IdentifierInfo *Typo = nullptr,
- NestedNameSpecifier *TypoNNS = nullptr)
+ NestedNameSpecifier TypoNNS = std::nullopt)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
std::unique_ptr<CorrectionCandidateCallback> clone() override {
@@ -366,7 +367,7 @@ template <class C>
class DeclFilterCCC final : public CorrectionCandidateCallback {
public:
explicit DeclFilterCCC(const IdentifierInfo *Typo = nullptr,
- NestedNameSpecifier *TypoNNS = nullptr)
+ NestedNameSpecifier TypoNNS = std::nullopt)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
diff --git a/clang/include/clang/Serialization/ASTRecordReader.h b/clang/include/clang/Serialization/ASTRecordReader.h
index 1472497..aed1b7d 100644
--- a/clang/include/clang/Serialization/ASTRecordReader.h
+++ b/clang/include/clang/Serialization/ASTRecordReader.h
@@ -223,7 +223,7 @@ public:
void readQualifierInfo(QualifierInfo &Info);
/// Return a nested name specifier, advancing Idx.
- // NestedNameSpecifier *readNestedNameSpecifier(); (inherited)
+ // NestedNameSpecifier readNestedNameSpecifier(); (inherited)
NestedNameSpecifierLoc readNestedNameSpecifierLoc();
diff --git a/clang/include/clang/Serialization/ASTRecordWriter.h b/clang/include/clang/Serialization/ASTRecordWriter.h
index ee005ec..9849ea6 100644
--- a/clang/include/clang/Serialization/ASTRecordWriter.h
+++ b/clang/include/clang/Serialization/ASTRecordWriter.h
@@ -247,8 +247,7 @@ public:
void AddTypeLoc(TypeLoc TL);
/// Emits a template argument location info.
- void AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
- const TemplateArgumentLocInfo &Arg);
+ void AddTemplateArgumentLocInfo(const TemplateArgumentLoc &Arg);
/// Emits a template argument location.
void AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg);
@@ -280,7 +279,7 @@ public:
void AddQualifierInfo(const QualifierInfo &Info);
/// Emit a nested name specifier.
- void AddNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ void AddNestedNameSpecifier(NestedNameSpecifier NNS) {
writeNestedNameSpecifier(NNS);
}
diff --git a/clang/include/clang/Serialization/TypeBitCodes.def b/clang/include/clang/Serialization/TypeBitCodes.def
index 613eb6a..8af32db 100644
--- a/clang/include/clang/Serialization/TypeBitCodes.def
+++ b/clang/include/clang/Serialization/TypeBitCodes.def
@@ -32,7 +32,6 @@ TYPE_BIT_CODE(Enum, ENUM, 20)
TYPE_BIT_CODE(ObjCInterface, OBJC_INTERFACE, 21)
TYPE_BIT_CODE(ObjCObjectPointer, OBJC_OBJECT_POINTER, 22)
TYPE_BIT_CODE(Decltype, DECLTYPE, 23)
-TYPE_BIT_CODE(Elaborated, ELABORATED, 24)
TYPE_BIT_CODE(SubstTemplateTypeParm, SUBST_TEMPLATE_TYPE_PARM, 25)
TYPE_BIT_CODE(UnresolvedUsing, UNRESOLVED_USING, 26)
TYPE_BIT_CODE(InjectedClassName, INJECTED_CLASS_NAME, 27)
diff --git a/clang/include/clang/StaticAnalyzer/Core/Checker.h b/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 31cc095c..d9a7c00 100644
--- a/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -209,8 +209,8 @@ public:
class Bind {
template <typename CHECKER>
static void _checkBind(void *checker, SVal location, SVal val, const Stmt *S,
- CheckerContext &C) {
- ((const CHECKER *)checker)->checkBind(location, val, S, C);
+ bool AtDeclInit, CheckerContext &C) {
+ ((const CHECKER *)checker)->checkBind(location, val, S, AtDeclInit, C);
}
public:
diff --git a/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index c8e6f12..bf33ce6 100644
--- a/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -338,10 +338,9 @@ public:
ExprEngine &Eng);
/// Run checkers for binding of a value to a location.
- void runCheckersForBind(ExplodedNodeSet &Dst,
- const ExplodedNodeSet &Src,
- SVal location, SVal val,
- const Stmt *S, ExprEngine &Eng,
+ void runCheckersForBind(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
+ SVal location, SVal val, const Stmt *S,
+ bool AtDeclInit, ExprEngine &Eng,
const ProgramPoint &PP);
/// Run checkers after taking a control flow edge.
@@ -499,8 +498,8 @@ public:
using CheckLocationFunc = CheckerFn<void(SVal location, bool isLoad,
const Stmt *S, CheckerContext &)>;
- using CheckBindFunc =
- CheckerFn<void(SVal location, SVal val, const Stmt *S, CheckerContext &)>;
+ using CheckBindFunc = CheckerFn<void(SVal location, SVal val, const Stmt *S,
+ bool AtDeclInit, CheckerContext &)>;
using CheckBlockEntranceFunc =
CheckerFn<void(const BlockEntrance &, CheckerContext &)>;
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index fbb3434..2335588 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -660,7 +660,7 @@ private:
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore, VisitDeclStmt, and others.
void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
- SVal location, SVal Val, bool atDeclInit = false,
+ SVal location, SVal Val, bool AtDeclInit = false,
const ProgramPoint *PP = nullptr);
ProgramStateRef
diff --git a/clang/include/clang/Tooling/Refactoring/Lookup.h b/clang/include/clang/Tooling/Refactoring/Lookup.h
index dcb40b7e..fe0df86 100644
--- a/clang/include/clang/Tooling/Refactoring/Lookup.h
+++ b/clang/include/clang/Tooling/Refactoring/Lookup.h
@@ -38,8 +38,7 @@ namespace tooling {
/// \param ReplacementString The replacement nested name. Must be fully
/// qualified including a leading "::".
/// \returns The new name to be inserted in place of the current nested name.
-std::string replaceNestedName(const NestedNameSpecifier *Use,
- SourceLocation UseLoc,
+std::string replaceNestedName(NestedNameSpecifier Use, SourceLocation UseLoc,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
StringRef ReplacementString);
diff --git a/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h b/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
index 271232e..319569f 100644
--- a/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
+++ b/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
@@ -108,19 +108,21 @@ public:
bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
const SourceLocation TypeEndLoc =
Lexer::getLocForEndOfToken(TL.getBeginLoc(), 0, SM, LangOpts);
- return visit(TL.getTypedefNameDecl(), TL.getBeginLoc(), TypeEndLoc);
+ return visit(TL.getDecl(), TL.getBeginLoc(), TypeEndLoc);
}
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) {
// The base visitor will visit NNSL prefixes, so we should only look at
// the current NNS.
- if (NNS) {
- const auto *ND = dyn_cast_if_present<NamespaceDecl>(
- NNS.getNestedNameSpecifier()->getAsNamespace());
- if (!visit(ND, NNS.getLocalBeginLoc(), NNS.getLocalEndLoc()))
+ if (NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ const auto *ND = dyn_cast<NamespaceDecl>(
+ Qualifier.getAsNamespaceAndPrefix().Namespace);
+ if (!visit(ND, QualifierLoc.getLocalBeginLoc(),
+ QualifierLoc.getLocalEndLoc()))
return false;
}
- return BaseType::TraverseNestedNameSpecifierLoc(NNS);
+ return BaseType::TraverseNestedNameSpecifierLoc(QualifierLoc);
}
bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
index ee3dc84..2d62209 100644
--- a/clang/lib/AST/APValue.cpp
+++ b/clang/lib/AST/APValue.cpp
@@ -902,8 +902,9 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
}
case APValue::Struct: {
Out << '{';
- const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
bool First = true;
+ const RecordDecl *RD =
+ Ty->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (unsigned N = getStructNumBases()) {
const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index 2243ac0..d658890 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "llvm/ADT/StringExtras.h"
@@ -92,10 +93,16 @@ ConceptReference::Create(const ASTContext &C, NestedNameSpecifierLoc NNS,
FoundDecl, NamedConcept, ArgsAsWritten);
}
+SourceLocation ConceptReference::getBeginLoc() const {
+ // Note that if the qualifier is null the template KW must also be null.
+ if (auto QualifierLoc = getNestedNameSpecifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return getConceptNameInfo().getBeginLoc();
+}
+
void ConceptReference::print(llvm::raw_ostream &OS,
const PrintingPolicy &Policy) const {
- if (NestedNameSpec)
- NestedNameSpec.getNestedNameSpecifier()->print(OS, Policy);
+ NestedNameSpec.getNestedNameSpecifier().print(OS, Policy);
ConceptName.printName(OS, Policy);
if (hasExplicitTemplateArgs()) {
OS << "<";
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 3a16111..95dd426 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -654,7 +654,7 @@ comments::FullComment *ASTContext::getCommentForDecl(
// does not have one of its own.
QualType QT = TD->getUnderlyingType();
if (const auto *TT = QT->getAs<TagType>())
- if (const Decl *TD = TT->getDecl())
+ if (const Decl *TD = TT->getOriginalDecl())
if (comments::FullComment *FC = getCommentForDecl(TD, PP))
return cloneFullComment(FC, D);
}
@@ -1933,10 +1933,12 @@ TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
// of a base-class subobject. We decide whether that's possible
// during class layout, so here we can just trust the layout results.
if (getLangOpts().CPlusPlus) {
- if (const auto *RT = T->getAs<RecordType>();
- RT && !RT->getDecl()->isInvalidDecl()) {
- const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
- Info.Width = layout.getDataSize();
+ if (const auto *RT = T->getAs<RecordType>()) {
+ const auto *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!RD->isInvalidDecl()) {
+ const ASTRecordLayout &layout = getASTRecordLayout(RD);
+ Info.Width = layout.getDataSize();
+ }
}
}
@@ -2003,8 +2005,9 @@ bool ASTContext::isPromotableIntegerType(QualType T) const {
// Enumerated types are promotable to their compatible integer types
// (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
if (const auto *ET = T->getAs<EnumType>()) {
- if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
- ET->getDecl()->isScoped())
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (T->isDependentType() || ED->getPromotionType().isNull() ||
+ ED->isScoped())
return false;
return true;
@@ -2040,8 +2043,8 @@ unsigned ASTContext::getTypeAlignIfKnown(QualType T,
return Align;
// Otherwise, see if the declaration of the type had an attribute.
- if (const auto *TT = T->getAs<TagType>())
- return TT->getDecl()->getMaxAlignment();
+ if (const auto *TD = T->getAsTagDecl())
+ return TD->getMaxAlignment();
return 0;
}
@@ -2472,15 +2475,16 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Record:
case Type::Enum: {
const auto *TT = cast<TagType>(T);
+ const TagDecl *TD = TT->getOriginalDecl()->getDefinitionOrSelf();
- if (TT->getDecl()->isInvalidDecl()) {
+ if (TD->isInvalidDecl()) {
Width = 8;
Align = 8;
break;
}
- if (const auto *ET = dyn_cast<EnumType>(TT)) {
- const EnumDecl *ED = ET->getDecl();
+ if (isa<EnumType>(TT)) {
+ const EnumDecl *ED = cast<EnumDecl>(TD);
TypeInfo Info =
getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
if (unsigned AttrAlign = ED->getMaxAlignment()) {
@@ -2490,8 +2494,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return Info;
}
- const auto *RT = cast<RecordType>(TT);
- const RecordDecl *RD = RT->getDecl();
+ const auto *RD = cast<RecordDecl>(TD);
const ASTRecordLayout &Layout = getASTRecordLayout(RD);
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
@@ -2543,9 +2546,6 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
}
- case Type::Elaborated:
- return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
-
case Type::Attributed:
return getTypeInfo(
cast<AttributedType>(T)->getEquivalentType().getTypePtr());
@@ -2619,8 +2619,7 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
unsigned UnadjustedAlign;
if (const auto *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = getASTRecordLayout(RT->getOriginalDecl());
UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
} else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
@@ -2696,7 +2695,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
return ABIAlign;
if (const auto *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// When used as part of a typedef, or together with a 'packed' attribute,
// the 'aligned' attribute can be used to decrease alignment. Note that the
@@ -2719,7 +2718,10 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
if (const auto *CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
if (const auto *ET = T->getAs<EnumType>())
- T = ET->getDecl()->getIntegerType().getTypePtr();
+ T = ET->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType()
+ .getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
@@ -2851,7 +2853,8 @@ static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
const RecordDecl *RD,
bool CheckIfTriviallyCopyable) {
assert(RD->isUnion() && "Must be union type");
- CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
+ CharUnits UnionSize =
+ Context.getTypeSizeInChars(Context.getCanonicalTagType(RD));
for (const auto *Field : RD->fields()) {
if (!Context.hasUniqueObjectRepresentations(Field->getType(),
@@ -3045,7 +3048,8 @@ bool ASTContext::hasUniqueObjectRepresentations(
return !ABI->getMemberPointerInfo(MPT).HasPadding;
if (Ty->isRecordType()) {
- const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
+ const RecordDecl *Record =
+ Ty->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (Record->isInvalidDecl())
return false;
@@ -3418,7 +3422,10 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
// type, or an unsigned integer type.
//
// So we have to treat enum types as integers.
- QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType();
+ QualType UnderlyingType = cast<EnumType>(T)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType();
return encodeTypeForFunctionPointerAuth(
Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
}
@@ -3456,7 +3463,7 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
OS << "M";
const auto *MPT = T->castAs<MemberPointerType>();
encodeTypeForFunctionPointerAuth(
- Ctx, OS, QualType(MPT->getQualifier()->getAsType(), 0));
+ Ctx, OS, QualType(MPT->getQualifier().getAsType(), 0));
encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType());
return;
}
@@ -3562,7 +3569,8 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
llvm_unreachable("should never get here");
}
case Type::Record: {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ T->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
const IdentifierInfo *II = RD->getIdentifier();
// In C++, an immediate typedef of an anonymous struct or union
@@ -3740,12 +3748,6 @@ ASTContext::adjustType(QualType Orig,
adjustType(BTFT->getWrappedType(), Adjust));
}
- case Type::Elaborated: {
- const auto *ET = cast<ElaboratedType>(Orig);
- return getElaboratedType(ET->getKeyword(), ET->getQualifier(),
- adjustType(ET->getNamedType(), Adjust));
- }
-
case Type::Paren:
return getParenType(
adjustType(cast<ParenType>(Orig)->getInnerType(), Adjust));
@@ -4163,14 +4165,13 @@ QualType ASTContext::getRValueReferenceType(QualType T) const {
}
QualType ASTContext::getMemberPointerType(QualType T,
- NestedNameSpecifier *Qualifier,
+ NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls) const {
if (!Qualifier) {
assert(Cls && "At least one of Qualifier or Cls must be provided");
- Qualifier = NestedNameSpecifier::Create(*this, /*Prefix=*/nullptr,
- getTypeDeclType(Cls).getTypePtr());
+ Qualifier = NestedNameSpecifier(getCanonicalTagType(Cls).getTypePtr());
} else if (!Cls) {
- Cls = Qualifier->getAsRecordDecl();
+ Cls = Qualifier.getAsRecordDecl();
}
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
@@ -4182,12 +4183,11 @@ QualType ASTContext::getMemberPointerType(QualType T,
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(PT, 0);
- NestedNameSpecifier *CanonicalQualifier = [&] {
+ NestedNameSpecifier CanonicalQualifier = [&] {
if (!Cls)
- return getCanonicalNestedNameSpecifier(Qualifier);
- NestedNameSpecifier *R = NestedNameSpecifier::Create(
- *this, /*Prefix=*/nullptr, Cls->getCanonicalDecl()->getTypeForDecl());
- assert(R == getCanonicalNestedNameSpecifier(R));
+ return Qualifier.getCanonical();
+ NestedNameSpecifier R(getCanonicalTagType(Cls).getTypePtr());
+ assert(R.isCanonical());
return R;
}();
// If the pointee or class type isn't canonical, this won't be a canonical
@@ -5245,7 +5245,6 @@ ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
}
llvm_unreachable("unexpected kind");
};
-
auto *New = new (*this, alignof(PredefinedSugarType))
PredefinedSugarType(KD, &Idents.get(PredefinedSugarType::getName(KD)),
getCanonicalType(*this, static_cast<Kind>(KD)));
@@ -5254,153 +5253,297 @@ ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
return QualType(New, 0);
}
-#ifndef NDEBUG
-static bool NeedsInjectedClassNameType(const RecordDecl *D) {
- if (!isa<CXXRecordDecl>(D)) return false;
- const auto *RD = cast<CXXRecordDecl>(D);
- if (isa<ClassTemplatePartialSpecializationDecl>(RD))
- return true;
- if (RD->getDescribedClassTemplate() &&
- !isa<ClassTemplateSpecializationDecl>(RD))
- return true;
- return false;
-}
-#endif
+QualType ASTContext::getTypeDeclType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypeDecl *Decl) const {
+ if (auto *Tag = dyn_cast<TagDecl>(Decl))
+ return getTagType(Keyword, Qualifier, Tag,
+ /*OwnsTag=*/false);
+ if (auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
+ return getTypedefType(Keyword, Qualifier, Typedef);
+ if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Decl))
+ return getUnresolvedUsingType(Keyword, Qualifier, UD);
-/// getInjectedClassNameType - Return the unique reference to the
-/// injected class name type for the specified templated declaration.
-QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
- QualType TST) const {
- assert(NeedsInjectedClassNameType(Decl));
- if (Decl->TypeForDecl) {
- assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
- assert(PrevDecl->TypeForDecl && "previous declaration has no type");
- Decl->TypeForDecl = PrevDecl->TypeForDecl;
- assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else {
- Type *newType = new (*this, alignof(InjectedClassNameType))
- InjectedClassNameType(Decl, TST);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- }
+ assert(Keyword == ElaboratedTypeKeyword::None);
+ assert(!Qualifier);
return QualType(Decl->TypeForDecl, 0);
}
-/// getTypeDeclType - Return the unique reference to the type for the
-/// specified type declaration.
-QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
- assert(Decl && "Passed null for Decl param");
- assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
-
- if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
- return getTypedefType(Typedef);
-
- assert(!isa<TemplateTypeParmDecl>(Decl) &&
- "Template type parameter types are always available.");
-
- if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
- assert(Record->isFirstDecl() && "struct/union has previous declaration");
- assert(!NeedsInjectedClassNameType(Record));
- return getRecordType(Record);
- } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
- assert(Enum->isFirstDecl() && "enum has previous declaration");
- return getEnumType(Enum);
- } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
- return getUnresolvedUsingType(Using);
- } else
- llvm_unreachable("TypeDecl without a type?");
-
+CanQualType ASTContext::getCanonicalTypeDeclType(const TypeDecl *TD) const {
+ if (auto *Tag = dyn_cast<TagDecl>(TD))
+ return getCanonicalTagType(Tag);
+ if (auto *TN = dyn_cast<TypedefNameDecl>(TD))
+ return getCanonicalType(TN->getUnderlyingType());
+ if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD))
+ return getCanonicalUnresolvedUsingType(UD);
+ assert(TD->TypeForDecl);
+ return TD->TypeForDecl->getCanonicalTypeUnqualified();
+}
+
+QualType ASTContext::getTypeDeclType(const TypeDecl *Decl) const {
+ if (const auto *TD = dyn_cast<TagDecl>(Decl))
+ return getCanonicalTagType(TD);
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(Decl);
+ isa_and_nonnull<TypedefDecl, TypeAliasDecl>(TD))
+ return getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
+ if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl))
+ return getCanonicalUnresolvedUsingType(Using);
+
+ assert(Decl->TypeForDecl);
return QualType(Decl->TypeForDecl, 0);
}
/// getTypedefType - Return the unique reference to the type for the
/// specified typedef name decl.
-QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
- QualType Underlying) const {
- if (!Decl->TypeForDecl) {
- if (Underlying.isNull())
- Underlying = Decl->getUnderlyingType();
- auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
- Type::Typedef, Decl, Underlying, /*HasTypeDifferentFromDecl=*/false);
- Decl->TypeForDecl = NewType;
+QualType
+ASTContext::getTypedefType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *Decl, QualType UnderlyingType,
+ std::optional<bool> TypeMatchesDeclOrNone) const {
+ if (!TypeMatchesDeclOrNone) {
+ QualType DeclUnderlyingType = Decl->getUnderlyingType();
+ assert(!DeclUnderlyingType.isNull());
+ if (UnderlyingType.isNull())
+ UnderlyingType = DeclUnderlyingType;
+ else
+ assert(hasSameType(UnderlyingType, DeclUnderlyingType));
+ TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
+ } else {
+ // FIXME: This is a workaround for a serialization cycle: assume the decl
+ // underlying type is not available; don't touch it.
+ assert(!UnderlyingType.isNull());
+ }
+
+ if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
+ *TypeMatchesDeclOrNone) {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ auto *NewType = new (*this, alignof(TypedefType))
+ TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
+ !*TypeMatchesDeclOrNone);
+
Types.push_back(NewType);
+ Decl->TypeForDecl = NewType;
return QualType(NewType, 0);
}
- if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
- return QualType(Decl->TypeForDecl, 0);
- assert(hasSameType(Decl->getUnderlyingType(), Underlying));
llvm::FoldingSetNodeID ID;
- TypedefType::Profile(ID, Decl, Underlying);
+ TypedefType::Profile(ID, Keyword, Qualifier, Decl, UnderlyingType);
void *InsertPos = nullptr;
- if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
- assert(!T->typeMatchesDecl() &&
- "non-divergent case should be handled with TypeDecl");
- return QualType(T, 0);
- }
+ if (FoldingSetPlaceholder<TypedefType> *Placeholder =
+ TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Placeholder->getType(), 0);
- void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true),
- alignof(TypedefType));
- auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
- /*HasTypeDifferentFromDecl=*/true);
- TypedefTypes.InsertNode(NewType, InsertPos);
+ void *Mem =
+ Allocate(TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
+ NestedNameSpecifier, QualType>(
+ 1, !!Qualifier, !*TypeMatchesDeclOrNone),
+ alignof(TypedefType));
+ auto *NewType =
+ new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
+ UnderlyingType, !*TypeMatchesDeclOrNone);
+ auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
+ FoldingSetPlaceholder<TypedefType>();
+ TypedefTypes.InsertNode(Placeholder, InsertPos);
Types.push_back(NewType);
return QualType(NewType, 0);
}
-QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
- QualType Underlying) const {
+QualType ASTContext::getUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UsingShadowDecl *D,
+ QualType UnderlyingType) const {
+ // FIXME: This is expensive to compute every time!
+ if (UnderlyingType.isNull()) {
+ const auto *UD = cast<UsingDecl>(D->getIntroducer());
+ UnderlyingType =
+ getTypeDeclType(UD->hasTypename() ? ElaboratedTypeKeyword::Typename
+ : ElaboratedTypeKeyword::None,
+ UD->getQualifier(), cast<TypeDecl>(D->getTargetDecl()));
+ }
+
llvm::FoldingSetNodeID ID;
- UsingType::Profile(ID, Found, Underlying);
+ UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
void *InsertPos = nullptr;
- if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(T, 0);
- const Type *TypeForDecl =
- cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl();
+ assert(!UnderlyingType.hasLocalQualifiers());
- assert(!Underlying.hasLocalQualifiers());
- QualType Canon = Underlying->getCanonicalTypeInternal();
- assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
+ assert(
+ hasSameType(getCanonicalTypeDeclType(cast<TypeDecl>(D->getTargetDecl())),
+ UnderlyingType));
- if (Underlying.getTypePtr() == TypeForDecl)
- Underlying = QualType();
void *Mem =
- Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
+ Allocate(UsingType::totalSizeToAlloc<NestedNameSpecifier>(!!Qualifier),
alignof(UsingType));
- UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
- Types.push_back(NewType);
- UsingTypes.InsertNode(NewType, InsertPos);
- return QualType(NewType, 0);
+ UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
+ Types.push_back(T);
+ UsingTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
}
-QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TagDecl *TD, bool OwnsTag,
+ bool IsInjected,
+ const Type *CanonicalType,
+ bool WithFoldingSetNode) const {
+ auto [TC, Size] = [&] {
+ switch (TD->getDeclKind()) {
+ case Decl::Enum:
+ static_assert(alignof(EnumType) == alignof(TagType));
+ return std::make_tuple(Type::Enum, sizeof(EnumType));
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::CXXRecord:
+ static_assert(alignof(RecordType) == alignof(TagType));
+ static_assert(alignof(InjectedClassNameType) == alignof(TagType));
+ if (cast<CXXRecordDecl>(TD)->hasInjectedClassType())
+ return std::make_tuple(Type::InjectedClassName,
+ sizeof(InjectedClassNameType));
+ [[fallthrough]];
+ case Decl::Record:
+ return std::make_tuple(Type::Record, sizeof(RecordType));
+ default:
+ llvm_unreachable("unexpected decl kind");
+ }
+ }();
- if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
- if (PrevDecl->TypeForDecl)
- return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+ if (Qualifier) {
+ static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
+ Size = llvm::alignTo(Size, alignof(NestedNameSpecifier)) +
+ sizeof(NestedNameSpecifier);
+ }
+ void *Mem;
+ if (WithFoldingSetNode) {
+ // FIXME: It would be more profitable to tail allocate the folding set node
+ // from the type, instead of the other way around, due to the greater
+ // alignment requirements of the type. But this makes it harder to deal with
+ // the different type node sizes. This would require either uniquing from
+ // different folding sets, or having the folding setaccept a
+ // contextual parameter which is not fixed at construction.
+ Mem = Allocate(
+ sizeof(TagTypeFoldingSetPlaceholder) +
+ TagTypeFoldingSetPlaceholder::getOffset() + Size,
+ std::max(alignof(TagTypeFoldingSetPlaceholder), alignof(TagType)));
+ auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
+ Mem = T->getTagType();
+ } else {
+ Mem = Allocate(Size, alignof(TagType));
+ }
+
+ auto *T = [&, TC = TC]() -> TagType * {
+ switch (TC) {
+ case Type::Enum: {
+ assert(isa<EnumDecl>(TD));
+ auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
+ IsInjected, CanonicalType);
+ assert(reinterpret_cast<void *>(T) ==
+ reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
+ "TagType must be the first base of EnumType");
+ return T;
+ }
+ case Type::Record: {
+ assert(isa<RecordDecl>(TD));
+ auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
+ IsInjected, CanonicalType);
+ assert(reinterpret_cast<void *>(T) ==
+ reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
+ "TagType must be the first base of RecordType");
+ return T;
+ }
+ case Type::InjectedClassName: {
+ auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
+ IsInjected, CanonicalType);
+ assert(reinterpret_cast<void *>(T) ==
+ reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
+ "TagType must be the first base of InjectedClassNameType");
+ return T;
+ }
+ default:
+ llvm_unreachable("unexpected type class");
+ }
+ }();
+ assert(T->getKeyword() == Keyword);
+ assert(T->getQualifier() == Qualifier);
+ assert(T->getOriginalDecl() == TD);
+ assert(T->isInjected() == IsInjected);
+ assert(T->isTagOwned() == OwnsTag);
+ assert((T->isCanonicalUnqualified()
+ ? QualType()
+ : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
+ Types.push_back(T);
+ return T;
+}
- auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+static bool getNonInjectedClassName(const TagDecl *&TD) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(TD);
+ RD && RD->isInjectedClassName()) {
+ TD = cast<TagDecl>(RD->getDeclContext());
+ return true;
+ }
+ return false;
}
-QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+CanQualType ASTContext::getCanonicalTagType(const TagDecl *TD) const {
+ ::getNonInjectedClassName(TD);
+ TD = TD->getCanonicalDecl();
+ if (TD->TypeForDecl)
+ return TD->TypeForDecl->getCanonicalTypeUnqualified();
+
+ const Type *CanonicalType = getTagTypeInternal(
+ ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD,
+ /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
+ /*WithFoldingSetNode=*/false);
+ TD->TypeForDecl = CanonicalType;
+ return CanQualType::CreateUnsafe(QualType(CanonicalType, 0));
+}
+
+QualType ASTContext::getTagType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TagDecl *TD, bool OwnsTag) const {
+ ElaboratedTypeKeyword PreferredKeyword =
+ getLangOpts().CPlusPlus
+ ? ElaboratedTypeKeyword::None
+ : KeywordHelpers::getKeywordForTagTypeKind(TD->getTagKind());
+
+ if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
+ if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
+ return QualType(T, 0);
+
+ bool IsInjected = ::getNonInjectedClassName(TD);
+ const Type *CanonicalType = getCanonicalTagType(TD).getTypePtr();
+ const Type *T =
+ getTagTypeInternal(Keyword,
+ /*Qualifier=*/std::nullopt, TD,
+ /*OwnsTag=*/false, IsInjected, CanonicalType,
+ /*WithFoldingSetNode=*/false);
+ TD->TypeForDecl = T;
+ return QualType(T, 0);
+ }
+
+ bool IsInjected = ::getNonInjectedClassName(TD);
- if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
- if (PrevDecl->TypeForDecl)
- return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+ llvm::FoldingSetNodeID ID;
+ TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, TD, OwnsTag,
+ IsInjected);
- auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+ void *InsertPos = nullptr;
+ if (TagTypeFoldingSetPlaceholder *T =
+ TagTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(T->getTagType(), 0);
+
+ const Type *CanonicalType = getCanonicalTagType(TD).getTypePtr();
+ TagType *T = getTagTypeInternal(Keyword, Qualifier, TD, OwnsTag, IsInjected,
+ CanonicalType, /*WithFoldingSetNode=*/true);
+ TagTypes.InsertNode(TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
+ return QualType(T, 0);
}
bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
@@ -5495,21 +5638,69 @@ bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
return Value.getSignificantBits() <= BitWidth;
}
-QualType ASTContext::getUnresolvedUsingType(
- const UnresolvedUsingTypenameDecl *Decl) const {
- if (Decl->TypeForDecl)
- return QualType(Decl->TypeForDecl, 0);
+UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D, void *InsertPos,
+ const Type *CanonicalType) const {
+ void *Mem = Allocate(
+ UnresolvedUsingType::totalSizeToAlloc<
+ FoldingSetPlaceholder<UnresolvedUsingType>, NestedNameSpecifier>(
+ !!InsertPos, !!Qualifier),
+ alignof(UnresolvedUsingType));
+ auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
+ if (InsertPos) {
+ auto *Placeholder = new (T->getFoldingSetPlaceholder())
+ FoldingSetPlaceholder<TypedefType>();
+ TypedefTypes.InsertNode(Placeholder, InsertPos);
+ }
+ Types.push_back(T);
+ return T;
+}
- if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
- Decl->getCanonicalDecl())
- if (CanonicalDecl->TypeForDecl)
- return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
+CanQualType ASTContext::getCanonicalUnresolvedUsingType(
+ const UnresolvedUsingTypenameDecl *D) const {
+ D = D->getCanonicalDecl();
+ if (D->TypeForDecl)
+ return D->TypeForDecl->getCanonicalTypeUnqualified();
- Type *newType =
- new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+ const Type *CanonicalType = getUnresolvedUsingTypeInternal(
+ ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, D,
+ /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
+ D->TypeForDecl = CanonicalType;
+ return CanQualType::CreateUnsafe(QualType(CanonicalType, 0));
+}
+
+QualType
+ASTContext::getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D) const {
+ if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
+ if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
+ return QualType(T, 0);
+
+ const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
+ const Type *T =
+ getUnresolvedUsingTypeInternal(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, D,
+ /*InsertPos=*/nullptr, CanonicalType);
+ D->TypeForDecl = T;
+ return QualType(T, 0);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
+
+ void *InsertPos = nullptr;
+ if (FoldingSetPlaceholder<UnresolvedUsingType> *Placeholder =
+ UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Placeholder->getType(), 0);
+ assert(InsertPos);
+
+ const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
+ const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
+ InsertPos, CanonicalType);
+ return QualType(T, 0);
}
QualType ASTContext::getAttributedType(attr::Kind attrKind,
@@ -5729,34 +5920,32 @@ QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
}
TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
+ ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
TemplateName Name, SourceLocation NameLoc,
const TemplateArgumentListInfo &SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
- QualType TST = getTemplateSpecializationType(Name, SpecifiedArgs.arguments(),
- CanonicalArgs, Underlying);
+ QualType TST = getTemplateSpecializationType(
+ Keyword, Name, SpecifiedArgs.arguments(), CanonicalArgs, Underlying);
TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
- TemplateSpecializationTypeLoc TL =
- DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
- TL.setTemplateKeywordLoc(SourceLocation());
- TL.setTemplateNameLoc(NameLoc);
- TL.setLAngleLoc(SpecifiedArgs.getLAngleLoc());
- TL.setRAngleLoc(SpecifiedArgs.getRAngleLoc());
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(i, SpecifiedArgs[i].getLocInfo());
+ DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
+ ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
+ SpecifiedArgs);
return DI;
}
QualType ASTContext::getTemplateSpecializationType(
- TemplateName Template, ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
SmallVector<TemplateArgument, 4> SpecifiedArgVec;
SpecifiedArgVec.reserve(SpecifiedArgs.size());
for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
SpecifiedArgVec.push_back(Arg.getArgument());
- return getTemplateSpecializationType(Template, SpecifiedArgVec, CanonicalArgs,
- Underlying);
+ return getTemplateSpecializationType(Keyword, Template, SpecifiedArgVec,
+ CanonicalArgs, Underlying);
}
[[maybe_unused]] static bool
@@ -5787,7 +5976,8 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
sizeof(TemplateArgument) * Args.size(),
alignof(TemplateSpecializationType));
auto *Spec = new (Mem)
- TemplateSpecializationType(Template, /*IsAlias=*/false, Args, QualType());
+ TemplateSpecializationType(ElaboratedTypeKeyword::None, Template,
+ /*IsAlias=*/false, Args, QualType());
assert(Spec->isDependentType() &&
"canonical template specialization must be dependent");
Types.push_back(Spec);
@@ -5796,7 +5986,8 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
}
QualType ASTContext::getTemplateSpecializationType(
- TemplateName Template, ArrayRef<TemplateArgument> SpecifiedArgs,
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ ArrayRef<TemplateArgument> SpecifiedArgs,
ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
assert(!Template.getUnderlying().getAsDependentTemplateName() &&
"No dependent template names here!");
@@ -5806,7 +5997,8 @@ QualType ASTContext::getTemplateSpecializationType(
if (Underlying.isNull()) {
TemplateName CanonTemplate =
getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true);
- bool NonCanonical = Template != CanonTemplate;
+ bool NonCanonical =
+ Template != CanonTemplate || Keyword != ElaboratedTypeKeyword::None;
SmallVector<TemplateArgument, 4> CanonArgsVec;
if (CanonicalArgs.empty()) {
CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
@@ -5837,42 +6029,12 @@ QualType ASTContext::getTemplateSpecializationType(
sizeof(TemplateArgument) * SpecifiedArgs.size() +
(IsTypeAlias ? sizeof(QualType) : 0),
alignof(TemplateSpecializationType));
- auto *Spec = new (Mem) TemplateSpecializationType(Template, IsTypeAlias,
- SpecifiedArgs, Underlying);
+ auto *Spec = new (Mem) TemplateSpecializationType(
+ Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
Types.push_back(Spec);
return QualType(Spec, 0);
}
-QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
- QualType NamedType,
- TagDecl *OwnedTagDecl) const {
- llvm::FoldingSetNodeID ID;
- ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
-
- void *InsertPos = nullptr;
- ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
- if (T)
- return QualType(T, 0);
-
- QualType Canon = NamedType;
- if (!Canon.isCanonical()) {
- Canon = getCanonicalType(NamedType);
- ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CheckT && "Elaborated canonical type broken");
- (void)CheckT;
- }
-
- void *Mem =
- Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
- alignof(ElaboratedType));
- T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
-
- Types.push_back(T);
- ElaboratedTypes.InsertNode(T, InsertPos);
- return QualType(T, 0);
-}
-
QualType
ASTContext::getParenType(QualType InnerType) const {
llvm::FoldingSetNodeID ID;
@@ -5935,7 +6097,7 @@ getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
}
QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
+ NestedNameSpecifier NNS,
const IdentifierInfo *Name) const {
llvm::FoldingSetNodeID ID;
DependentNameType::Profile(ID, Keyword, NNS, Name);
@@ -5947,7 +6109,7 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
ElaboratedTypeKeyword CanonKeyword =
getCanonicalElaboratedTypeKeyword(Keyword);
- NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ NestedNameSpecifier CanonNNS = NNS.getCanonical();
QualType Canon;
if (CanonKeyword != Keyword || CanonNNS != NNS) {
@@ -5985,13 +6147,13 @@ QualType ASTContext::getDependentTemplateSpecializationType(
T_iter != DependentTemplateSpecializationTypes.end())
return QualType(T_iter->getSecond(), 0);
- NestedNameSpecifier *NNS = Name.getQualifier();
+ NestedNameSpecifier NNS = Name.getQualifier();
QualType Canon;
if (!IsCanonical) {
ElaboratedTypeKeyword CanonKeyword =
getCanonicalElaboratedTypeKeyword(Keyword);
- NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ NestedNameSpecifier CanonNNS = NNS.getCanonical();
bool AnyNonCanonArgs = false;
auto CanonArgs =
::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
@@ -6006,7 +6168,7 @@ QualType ASTContext::getDependentTemplateSpecializationType(
} else {
assert(Keyword == getCanonicalElaboratedTypeKeyword(Keyword));
assert(Name.hasTemplateKeyword());
- assert(NNS == getCanonicalNestedNameSpecifier(NNS));
+ assert(NNS.isCanonical());
#ifndef NDEBUG
for (const auto &Arg : Args)
assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
@@ -6062,7 +6224,8 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
} else {
auto *TTP = cast<TemplateTemplateParmDecl>(Param);
TemplateName Name = getQualifiedTemplateName(
- nullptr, /*TemplateKeyword=*/false, TemplateName(TTP));
+ /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
+ TemplateName(TTP));
if (TTP->isParameterPack())
Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
else
@@ -6334,11 +6497,11 @@ void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
ObjCTypeParamDecl *New) const {
New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
// Update TypeForDecl after updating TypeSourceInfo.
- auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
+ auto *NewTypeParamTy = cast<ObjCTypeParamType>(New->TypeForDecl);
SmallVector<ObjCProtocolDecl *, 8> protocols;
protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
QualType UpdatedTy = getObjCTypeParamType(New, protocols);
- New->setTypeForDecl(UpdatedTy.getTypePtr());
+ New->TypeForDecl = UpdatedTy.getTypePtr();
}
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
@@ -6737,20 +6900,20 @@ QualType ASTContext::getUnconstrainedType(QualType T) const {
}
QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
- TemplateName Template, QualType DeducedType, bool IsDependent,
- QualType Canon) const {
+ ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
+ bool IsDependent, QualType Canon) const {
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
- DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
+ DeducedTemplateSpecializationType::Profile(ID, Keyword, Template, DeducedType,
IsDependent);
if (DeducedTemplateSpecializationType *DTST =
DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(DTST, 0);
auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
- DeducedTemplateSpecializationType(Template, DeducedType, IsDependent,
- Canon);
+ DeducedTemplateSpecializationType(Keyword, Template, DeducedType,
+ IsDependent, Canon);
#ifndef NDEBUG
llvm::FoldingSetNodeID TempID;
@@ -6766,14 +6929,20 @@ QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
/// which has been deduced to the given type, or to the canonical undeduced
/// such type, or the canonical deduced-but-dependent such type.
QualType ASTContext::getDeducedTemplateSpecializationType(
- TemplateName Template, QualType DeducedType, bool IsDependent) const {
- QualType Canon = DeducedType.isNull()
- ? getDeducedTemplateSpecializationTypeInternal(
- getCanonicalTemplateName(Template), QualType(),
- IsDependent, QualType())
- : DeducedType.getCanonicalType();
- return getDeducedTemplateSpecializationTypeInternal(Template, DeducedType,
- IsDependent, Canon);
+ ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
+ bool IsDependent) const {
+ // FIXME: This could save an extra hash table lookup if it handled all the
+ // parameters already being canonical.
+ // FIXME: Can this be formed from a DependentTemplateName, such that the
+ // keyword should be part of the canonical type?
+ QualType Canon =
+ DeducedType.isNull()
+ ? getDeducedTemplateSpecializationTypeInternal(
+ ElaboratedTypeKeyword::None, getCanonicalTemplateName(Template),
+ QualType(), IsDependent, QualType())
+ : DeducedType.getCanonicalType();
+ return getDeducedTemplateSpecializationTypeInternal(
+ Keyword, Template, DeducedType, IsDependent, Canon);
}
/// getAtomicType - Return the uniqued reference to the atomic type for
@@ -6823,15 +6992,6 @@ QualType ASTContext::getAutoRRefDeductType() const {
return AutoRRefDeductTy;
}
-/// getTagDeclType - Return the unique reference to the type for the
-/// specified TagDecl (struct/union/class/enum) decl.
-QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
- assert(Decl);
- // FIXME: What is the design on getTagDeclType when it requires casting
- // away const? mutable?
- return getTypeDeclType(const_cast<TagDecl*>(Decl));
-}
-
/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
/// needs to agree with the definition in <stddef.h>.
@@ -7053,8 +7213,8 @@ bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
*RD2 = T2MPType->getMostRecentCXXRecordDecl();
RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
return false;
- if (getCanonicalNestedNameSpecifier(T1MPType->getQualifier()) !=
- getCanonicalNestedNameSpecifier(T2MPType->getQualifier()))
+ if (T1MPType->getQualifier().getCanonical() !=
+ T2MPType->getQualifier().getCanonical())
return false;
T1 = T1MPType->getPointeeType();
T2 = T2MPType->getPointeeType();
@@ -7211,9 +7371,8 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
assert(DTN && "Non-dependent template names must refer to template decls.");
- NestedNameSpecifier *Qualifier = DTN->getQualifier();
- NestedNameSpecifier *CanonQualifier =
- getCanonicalNestedNameSpecifier(Qualifier);
+ NestedNameSpecifier Qualifier = DTN->getQualifier();
+ NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
return getDependentTemplateName({CanonQualifier, DTN->getName(),
/*HasTemplateKeyword=*/true});
@@ -7430,38 +7589,40 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate());
}
-static bool isSameQualifier(const NestedNameSpecifier *X,
- const NestedNameSpecifier *Y) {
- if (X->getKind() != Y->getKind())
+static bool isSameQualifier(const NestedNameSpecifier X,
+ const NestedNameSpecifier Y) {
+ if (X == Y)
+ return true;
+ if (!X || !Y)
+ return false;
+
+ auto Kind = X.getKind();
+ if (Kind != Y.getKind())
return false;
// FIXME: For namespaces and types, we're permitted to check that the entity
// is named via the same tokens. We should probably do so.
- switch (X->getKind()) {
- case NestedNameSpecifier::Identifier:
- if (X->getAsIdentifier() != Y->getAsIdentifier())
- return false;
- break;
- case NestedNameSpecifier::Namespace:
- if (!declaresSameEntity(X->getAsNamespace(), Y->getAsNamespace()))
+ switch (Kind) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
+ auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
+ if (!declaresSameEntity(NamespaceX->getNamespace(),
+ NamespaceY->getNamespace()))
return false;
- break;
- case NestedNameSpecifier::TypeSpec:
- if (X->getAsType()->getCanonicalTypeInternal() !=
- Y->getAsType()->getCanonicalTypeInternal())
+ return isSameQualifier(PrefixX, PrefixY);
+ }
+ case NestedNameSpecifier::Kind::Type: {
+ const auto *TX = X.getAsType(), *TY = Y.getAsType();
+ if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
return false;
- break;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ return isSameQualifier(TX->getPrefix(), TY->getPrefix());
+ }
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return true;
}
-
- // Recurse into earlier portion of NNS, if any.
- auto *PX = X->getPrefix();
- auto *PY = Y->getPrefix();
- if (PX && PY)
- return isSameQualifier(PX, PY);
- return !PX && !PY;
+ llvm_unreachable("unhandled qualifier kind");
}
static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
@@ -7854,63 +8015,6 @@ bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
llvm_unreachable("Unhandled template argument kind");
}
-NestedNameSpecifier *
-ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
- if (!NNS)
- return nullptr;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- // Canonicalize the prefix but keep the identifier the same.
- return NestedNameSpecifier::Create(*this,
- getCanonicalNestedNameSpecifier(NNS->getPrefix()),
- NNS->getAsIdentifier());
-
- case NestedNameSpecifier::Namespace:
- // A namespace is canonical; build a nested-name-specifier with
- // this namespace and no prefix.
- return NestedNameSpecifier::Create(
- *this, nullptr, NNS->getAsNamespace()->getNamespace()->getFirstDecl());
-
- // The difference between TypeSpec and TypeSpecWithTemplate is that the
- // latter will have the 'template' keyword when printed.
- case NestedNameSpecifier::TypeSpec: {
- const Type *T = getCanonicalType(NNS->getAsType());
-
- // If we have some kind of dependent-named type (e.g., "typename T::type"),
- // break it apart into its prefix and identifier, then reconsititute those
- // as the canonical nested-name-specifier. This is required to canonicalize
- // a dependent nested-name-specifier involving typedefs of dependent-name
- // types, e.g.,
- // typedef typename T::type T1;
- // typedef typename T1::type T2;
- if (const auto *DNT = T->getAs<DependentNameType>())
- return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
- DNT->getIdentifier());
- if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) {
- const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
- QualType NewT = getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword::None,
- {/*NNS=*/nullptr, DTN.getName(), /*HasTemplateKeyword=*/true},
- DTST->template_arguments(), /*IsCanonical=*/true);
- assert(NewT.isCanonical());
- NestedNameSpecifier *Prefix = DTN.getQualifier();
- if (!Prefix)
- Prefix = getCanonicalNestedNameSpecifier(NNS->getPrefix());
- return NestedNameSpecifier::Create(*this, Prefix, NewT.getTypePtr());
- }
- return NestedNameSpecifier::Create(*this, nullptr, T);
- }
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- // The global specifier and __super specifer are canonical and unique.
- return NNS;
- }
-
- llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
-}
-
const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// Handle the non-qualified case efficiently.
if (!T.hasLocalQualifiers()) {
@@ -8229,7 +8333,7 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(!Promotable.isNull());
assert(isPromotableIntegerType(Promotable));
if (const auto *ET = Promotable->getAs<EnumType>())
- return ET->getDecl()->getPromotionType();
+ return ET->getOriginalDecl()->getDefinitionOrSelf()->getPromotionType();
if (const auto *BT = Promotable->getAs<BuiltinType>()) {
// C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
@@ -8288,8 +8392,9 @@ Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
static const Type *getIntegerTypeForEnum(const EnumType *ET) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
- return ET->getDecl()->getIntegerType().getTypePtr();
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (ED->isComplete() && !ED->isScoped())
+ return ED->getIntegerType().getTypePtr();
return nullptr;
}
@@ -8418,7 +8523,7 @@ TypedefDecl *ASTContext::getCFConstantStringDecl() const {
CFConstantStringTagDecl->completeDefinition();
// This type is designed to be compatible with NSConstantString, but cannot
// use the same name, since NSConstantString is an interface.
- auto tagType = getTagDeclType(CFConstantStringTagDecl);
+ CanQualType tagType = getCanonicalTagType(CFConstantStringTagDecl);
CFConstantStringTypeDecl =
buildImplicitTypedef(tagType, "__NSConstantString");
@@ -8433,14 +8538,15 @@ RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
// getCFConstantStringType - Return the type used for constant CFStrings.
QualType ASTContext::getCFConstantStringType() const {
- return getTypedefType(getCFConstantStringDecl());
+ return getTypedefType(ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ getCFConstantStringDecl());
}
QualType ASTContext::getObjCSuperType() const {
if (ObjCSuperType.isNull()) {
RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl);
- ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
+ ObjCSuperType = getCanonicalTagType(ObjCSuperTypeDecl);
}
return ObjCSuperType;
}
@@ -8449,12 +8555,12 @@ void ASTContext::setCFConstantStringType(QualType T) {
const auto *TD = T->castAs<TypedefType>();
CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl());
const auto *TagType = TD->castAs<RecordType>();
- CFConstantStringTagDecl = TagType->getDecl();
+ CFConstantStringTagDecl = TagType->getOriginalDecl()->getDefinitionOrSelf();
}
QualType ASTContext::getBlockDescriptorType() const {
if (BlockDescriptorType)
- return getTagDeclType(BlockDescriptorType);
+ return getCanonicalTagType(BlockDescriptorType);
RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
@@ -8484,12 +8590,12 @@ QualType ASTContext::getBlockDescriptorType() const {
BlockDescriptorType = RD;
- return getTagDeclType(BlockDescriptorType);
+ return getCanonicalTagType(BlockDescriptorType);
}
QualType ASTContext::getBlockDescriptorExtendedType() const {
if (BlockDescriptorExtendedType)
- return getTagDeclType(BlockDescriptorExtendedType);
+ return getCanonicalTagType(BlockDescriptorExtendedType);
RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
@@ -8523,7 +8629,7 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
RD->completeDefinition();
BlockDescriptorExtendedType = RD;
- return getTagDeclType(BlockDescriptorExtendedType);
+ return getCanonicalTagType(BlockDescriptorExtendedType);
}
OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
@@ -9161,7 +9267,7 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
}
static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
- EnumDecl *Enum = ET->getDecl();
+ EnumDecl *Enum = ET->getOriginalDecl()->getDefinitionOrSelf();
// The encoding of an non-fixed enum type is always 'i', regardless of size.
if (!Enum->isFixed())
@@ -9330,13 +9436,14 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
}
} else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
+ const IdentifierInfo *II = RTy->getOriginalDecl()->getIdentifier();
// GCC binary compat: Need to convert "struct objc_class *" to "#".
- if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
+ if (II == &Idents.get("objc_class")) {
S += '#';
return;
}
// GCC binary compat: Need to convert "struct objc_object *" to "@".
- if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
+ if (II == &Idents.get("objc_object")) {
S += '@';
return;
}
@@ -9401,7 +9508,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Record: {
- RecordDecl *RDecl = cast<RecordType>(CT)->getDecl();
+ RecordDecl *RDecl = cast<RecordType>(CT)->getOriginalDecl();
S += RDecl->isUnion() ? '(' : '{';
// Anonymous structures print as '?'
if (const IdentifierInfo *II = RDecl->getIdentifier()) {
@@ -9900,7 +10007,7 @@ CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __builtin_va_list;
return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
@@ -9952,14 +10059,15 @@ static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
QualType VaListTagTypedefType =
- Context->getTypedefType(VaListTagTypedefDecl);
+ Context->getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
@@ -10011,7 +10119,7 @@ CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// };
@@ -10059,7 +10167,7 @@ CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
Context->VaListTagDecl = VaListDecl;
// typedef struct __va_list __builtin_va_list;
- QualType T = Context->getRecordType(VaListDecl);
+ CanQualType T = Context->getCanonicalTagType(VaListDecl);
return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
@@ -10106,7 +10214,7 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// };
@@ -10153,13 +10261,15 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
- QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
+ QualType VaListTagTypedefType =
+ Context->getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
@@ -10197,7 +10307,7 @@ CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
}
VaListTagDecl->completeDefinition();
Context->VaListTagDecl = VaListTagDecl;
- QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
@@ -10311,7 +10421,7 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
/// Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
-TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier Qualifier,
bool TemplateKeyword,
TemplateName Template) const {
assert(Template.getKind() == TemplateName::Template ||
@@ -10319,14 +10429,14 @@ TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
// FIXME: Canonicalization?
llvm::FoldingSetNodeID ID;
- QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
+ QualifiedTemplateName::Profile(ID, Qualifier, TemplateKeyword, Template);
void *InsertPos = nullptr;
QualifiedTemplateName *QTN =
- QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (!QTN) {
QTN = new (*this, alignof(QualifiedTemplateName))
- QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QualifiedTemplateName(Qualifier, TemplateKeyword, Template);
QualifiedTemplateNames.InsertNode(QTN, InsertPos);
}
@@ -11296,7 +11406,7 @@ QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
bool OfBlockPointer,
bool Unqualified) {
if (const RecordType *UT = T->getAsUnionType()) {
- RecordDecl *UD = UT->getDecl();
+ RecordDecl *UD = UT->getOriginalDecl()->getMostRecentDecl();
if (UD->hasAttr<TransparentUnionAttr>()) {
for (const auto *I : UD->fields()) {
QualType ET = I->getType().getUnqualifiedType();
@@ -11528,7 +11638,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
// Look at the converted type of enum types, since that is the type used
// to pass enum values.
if (const auto *Enum = paramTy->getAs<EnumType>()) {
- paramTy = Enum->getDecl()->getIntegerType();
+ paramTy =
+ Enum->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (paramTy.isNull())
return {};
}
@@ -11560,7 +11671,8 @@ static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
// a signed integer type, or an unsigned integer type.
// Compatibility is based on the underlying type, not the promotion
// type.
- QualType underlyingType = ET->getDecl()->getIntegerType();
+ QualType underlyingType =
+ ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (underlyingType.isNull())
return {};
if (Context.hasSameType(underlyingType, other))
@@ -12119,7 +12231,7 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
unsigned ASTContext::getIntWidth(QualType T) const {
if (const auto *ET = T->getAs<EnumType>())
- T = ET->getDecl()->getIntegerType();
+ T = ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (T->isBooleanType())
return 1;
if (const auto *EIT = T->getAs<BitIntType>())
@@ -12145,7 +12257,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
if (const auto *ETy = T->getAs<EnumType>())
- T = ETy->getDecl()->getIntegerType();
+ T = ETy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
switch (T->castAs<BuiltinType>()->getKind()) {
case BuiltinType::Char_U:
@@ -12219,7 +12331,7 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
if (const auto *ETy = T->getAs<EnumType>())
- T = ETy->getDecl()->getIntegerType();
+ T = ETy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
switch (T->castAs<BuiltinType>()->getKind()) {
case BuiltinType::Char_S:
@@ -13573,7 +13685,7 @@ static T *getCommonDeclChecked(T *X, T *Y) {
const_cast<Decl *>(cast<Decl>(Y))));
}
-static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
+static TemplateName getCommonTemplateName(const ASTContext &Ctx, TemplateName X,
TemplateName Y,
bool IgnoreDeduced = false) {
if (X.getAsVoidPointer() == Y.getAsVoidPointer())
@@ -13588,7 +13700,7 @@ static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
return CX;
}
-static TemplateName getCommonTemplateNameChecked(ASTContext &Ctx,
+static TemplateName getCommonTemplateNameChecked(const ASTContext &Ctx,
TemplateName X, TemplateName Y,
bool IgnoreDeduced) {
TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
@@ -13596,7 +13708,7 @@ static TemplateName getCommonTemplateNameChecked(ASTContext &Ctx,
return R;
}
-static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs,
+static auto getCommonTypes(const ASTContext &Ctx, ArrayRef<QualType> Xs,
ArrayRef<QualType> Ys, bool Unqualified = false) {
assert(Xs.size() == Ys.size());
SmallVector<QualType, 8> Rs(Xs.size());
@@ -13611,7 +13723,7 @@ static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
: SourceLocation();
}
-static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
+static TemplateArgument getCommonTemplateArgument(const ASTContext &Ctx,
const TemplateArgument &X,
const TemplateArgument &Y) {
if (X.getKind() != Y.getKind())
@@ -13657,7 +13769,7 @@ static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
}
}
-static bool getCommonTemplateArguments(ASTContext &Ctx,
+static bool getCommonTemplateArguments(const ASTContext &Ctx,
SmallVectorImpl<TemplateArgument> &R,
ArrayRef<TemplateArgument> Xs,
ArrayRef<TemplateArgument> Ys) {
@@ -13672,7 +13784,7 @@ static bool getCommonTemplateArguments(ASTContext &Ctx,
return false;
}
-static auto getCommonTemplateArguments(ASTContext &Ctx,
+static auto getCommonTemplateArguments(const ASTContext &Ctx,
ArrayRef<TemplateArgument> Xs,
ArrayRef<TemplateArgument> Ys) {
SmallVector<TemplateArgument, 8> R;
@@ -13683,163 +13795,101 @@ static auto getCommonTemplateArguments(ASTContext &Ctx,
}
template <class T>
-static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) {
- return X->getKeyword() == Y->getKeyword() ? X->getKeyword()
- : ElaboratedTypeKeyword::None;
+static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y,
+ bool IsSame) {
+ ElaboratedTypeKeyword KX = X->getKeyword(), KY = Y->getKeyword();
+ if (KX == KY)
+ return KX;
+ KX = getCanonicalElaboratedTypeKeyword(KX);
+ assert(!IsSame || KX == getCanonicalElaboratedTypeKeyword(KY));
+ return KX;
}
/// Returns a NestedNameSpecifier which has only the common sugar
/// present in both NNS1 and NNS2.
-static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx,
- NestedNameSpecifier *NNS1,
- NestedNameSpecifier *NNS2,
- bool IsSame) {
+static NestedNameSpecifier getCommonNNS(const ASTContext &Ctx,
+ NestedNameSpecifier NNS1,
+ NestedNameSpecifier NNS2, bool IsSame) {
// If they are identical, all sugar is common.
if (NNS1 == NNS2)
return NNS1;
- // IsSame implies both NNSes are equivalent.
- NestedNameSpecifier *Canon = Ctx.getCanonicalNestedNameSpecifier(NNS1);
- if (Canon != Ctx.getCanonicalNestedNameSpecifier(NNS2)) {
+ // IsSame implies both Qualifiers are equivalent.
+ NestedNameSpecifier Canon = NNS1.getCanonical();
+ if (Canon != NNS2.getCanonical()) {
assert(!IsSame && "Should be the same NestedNameSpecifier");
// If they are not the same, there is nothing to unify.
- // FIXME: It would be useful here if we could represent a canonically
- // empty NNS, which is not identical to an empty-as-written NNS.
- return nullptr;
+ return std::nullopt;
}
- NestedNameSpecifier *R = nullptr;
- NestedNameSpecifier::SpecifierKind K1 = NNS1->getKind(), K2 = NNS2->getKind();
- switch (K1) {
- case NestedNameSpecifier::SpecifierKind::Identifier: {
- assert(K2 == NestedNameSpecifier::SpecifierKind::Identifier);
- IdentifierInfo *II = NNS1->getAsIdentifier();
- assert(II == NNS2->getAsIdentifier());
- // For an identifier, the prefixes are significant, so they must be the
- // same.
- NestedNameSpecifier *P = ::getCommonNNS(Ctx, NNS1->getPrefix(),
- NNS2->getPrefix(), /*IsSame=*/true);
- R = NestedNameSpecifier::Create(Ctx, P, II);
- break;
- }
- case NestedNameSpecifier::SpecifierKind::Namespace: {
- assert(K2 == NestedNameSpecifier::SpecifierKind::Namespace);
- // The prefixes for namespaces are not significant, its declaration
- // identifies it uniquely.
- NestedNameSpecifier *P =
- ::getCommonNNS(Ctx, NNS1->getPrefix(), NNS2->getPrefix(),
- /*IsSame=*/false);
- NamespaceBaseDecl *Namespace1 = NNS1->getAsNamespace(),
- *Namespace2 = NNS2->getAsNamespace();
+ NestedNameSpecifier R = std::nullopt;
+ NestedNameSpecifier::Kind Kind = NNS1.getKind();
+ assert(Kind == NNS2.getKind());
+ switch (Kind) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
+ auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
auto Kind = Namespace1->getKind();
if (Kind != Namespace2->getKind() ||
(Kind == Decl::NamespaceAlias &&
!declaresSameEntity(Namespace1, Namespace2))) {
- R = NestedNameSpecifier::Create(
- Ctx, P,
+ R = NestedNameSpecifier(
+ Ctx,
::getCommonDeclChecked(Namespace1->getNamespace(),
- Namespace2->getNamespace()));
+ Namespace2->getNamespace()),
+ /*Prefix=*/std::nullopt);
break;
}
- R = NestedNameSpecifier::Create(
- Ctx, P, ::getCommonDeclChecked(Namespace1, Namespace2));
+ // The prefixes for namespaces are not significant, its declaration
+ // identifies it uniquely.
+ NestedNameSpecifier Prefix = ::getCommonNNS(Ctx, Prefix1, Prefix2,
+ /*IsSame=*/false);
+ R = NestedNameSpecifier(Ctx, ::getCommonDeclChecked(Namespace1, Namespace2),
+ Prefix);
break;
}
- case NestedNameSpecifier::SpecifierKind::TypeSpec: {
- // FIXME: See comment below, on Super case.
- if (K2 == NestedNameSpecifier::SpecifierKind::Super)
- return Ctx.getCanonicalNestedNameSpecifier(NNS1);
-
- assert(K2 == NestedNameSpecifier::SpecifierKind::TypeSpec);
-
- const Type *T1 = NNS1->getAsType(), *T2 = NNS2->getAsType();
- if (T1 == T2) {
- // If the types are indentical, then only the prefixes differ.
- // A well-formed NNS never has these types, as they have
- // special normalized forms.
- assert((!isa<DependentNameType, ElaboratedType>(T1)));
- // Only for a DependentTemplateSpecializationType the prefix
- // is actually significant. A DependentName, which would be another
- // plausible case, cannot occur here, as explained above.
- bool IsSame = isa<DependentTemplateSpecializationType>(T1);
- NestedNameSpecifier *P =
- ::getCommonNNS(Ctx, NNS1->getPrefix(), NNS2->getPrefix(), IsSame);
- R = NestedNameSpecifier::Create(Ctx, P, T1);
- break;
- }
- // TODO: Try to salvage the original prefix.
- // If getCommonSugaredType removed any top level sugar, the original prefix
- // is not applicable anymore.
+ case NestedNameSpecifier::Kind::Type: {
+ const Type *T1 = NNS1.getAsType(), *T2 = NNS2.getAsType();
const Type *T = Ctx.getCommonSugaredType(QualType(T1, 0), QualType(T2, 0),
/*Unqualified=*/true)
.getTypePtr();
-
- // A NestedNameSpecifier has special normalization rules for certain types.
- switch (T->getTypeClass()) {
- case Type::Elaborated: {
- // An ElaboratedType is stripped off, it's Qualifier becomes the prefix.
- auto *ET = cast<ElaboratedType>(T);
- R = NestedNameSpecifier::Create(Ctx, ET->getQualifier(),
- ET->getNamedType().getTypePtr());
- break;
- }
- case Type::DependentName: {
- // A DependentName is turned into an Identifier NNS.
- auto *DN = cast<DependentNameType>(T);
- R = NestedNameSpecifier::Create(Ctx, DN->getQualifier(),
- DN->getIdentifier());
- break;
- }
- case Type::DependentTemplateSpecialization: {
- // A DependentTemplateSpecializationType loses it's Qualifier, which
- // is turned into the prefix.
- auto *DTST = cast<DependentTemplateSpecializationType>(T);
- const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
- DependentTemplateStorage NewDTN(/*Qualifier=*/nullptr, DTN.getName(),
- DTN.hasTemplateKeyword());
- T = Ctx.getDependentTemplateSpecializationType(DTST->getKeyword(), NewDTN,
- DTST->template_arguments())
- .getTypePtr();
- R = NestedNameSpecifier::Create(Ctx, DTN.getQualifier(), T);
- break;
- }
- default:
- R = NestedNameSpecifier::Create(Ctx, /*Prefix=*/nullptr, T);
- break;
- }
+ R = NestedNameSpecifier(T);
break;
}
- case NestedNameSpecifier::SpecifierKind::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
// FIXME: Can __super even be used with data members?
// If it's only usable in functions, we will never see it here,
// unless we save the qualifiers used in function types.
// In that case, it might be possible NNS2 is a type,
// in which case we should degrade the result to
// a CXXRecordType.
- return Ctx.getCanonicalNestedNameSpecifier(NNS1);
- case NestedNameSpecifier::SpecifierKind::Global:
- // The global NNS is a singleton.
- assert(K2 == NestedNameSpecifier::SpecifierKind::Global &&
- "Global NNS cannot be equivalent to any other kind");
- llvm_unreachable("Global NestedNameSpecifiers did not compare equal");
- }
- assert(Ctx.getCanonicalNestedNameSpecifier(R) == Canon);
+ R = NestedNameSpecifier(getCommonDeclChecked(NNS1.getAsMicrosoftSuper(),
+ NNS2.getAsMicrosoftSuper()));
+ break;
+ }
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ // These are singletons.
+ llvm_unreachable("singletons did not compare equal");
+ }
+ assert(R.getCanonical() == Canon);
return R;
}
template <class T>
-static NestedNameSpecifier *getCommonQualifier(ASTContext &Ctx, const T *X,
- const T *Y, bool IsSame) {
+static NestedNameSpecifier getCommonQualifier(const ASTContext &Ctx, const T *X,
+ const T *Y, bool IsSame) {
return ::getCommonNNS(Ctx, X->getQualifier(), Y->getQualifier(), IsSame);
}
template <class T>
-static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) {
+static QualType getCommonElementType(const ASTContext &Ctx, const T *X,
+ const T *Y) {
return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType());
}
template <class T>
-static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
+static QualType getCommonArrayElementType(const ASTContext &Ctx, const T *X,
Qualifiers &QX, const T *Y,
Qualifiers &QY) {
QualType EX = X->getElementType(), EY = Y->getElementType();
@@ -13856,11 +13906,13 @@ static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
}
template <class T>
-static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) {
+static QualType getCommonPointeeType(const ASTContext &Ctx, const T *X,
+ const T *Y) {
return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType());
}
-template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) {
+template <class T>
+static auto *getCommonSizeExpr(const ASTContext &Ctx, T *X, T *Y) {
assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
return X->getSizeExpr();
}
@@ -13880,8 +13932,9 @@ static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
// each type (in a canonical sense) only once, in the order they appear
// from X to Y. If they occur in both X and Y, the result will contain
// the common sugared type between them.
-static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out,
- ArrayRef<QualType> X, ArrayRef<QualType> Y) {
+static void mergeTypeLists(const ASTContext &Ctx,
+ SmallVectorImpl<QualType> &Out, ArrayRef<QualType> X,
+ ArrayRef<QualType> Y) {
llvm::DenseMap<QualType, unsigned> Found;
for (auto Ts : {X, Y}) {
for (QualType T : Ts) {
@@ -13900,7 +13953,7 @@ FunctionProtoType::ExceptionSpecInfo
ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
FunctionProtoType::ExceptionSpecInfo ESI2,
SmallVectorImpl<QualType> &ExceptionTypeStorage,
- bool AcceptDependent) {
+ bool AcceptDependent) const {
ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
// If either of them can throw anything, that is the result.
@@ -13964,7 +14017,7 @@ ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
llvm_unreachable("invalid ExceptionSpecificationType");
}
-static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
+static QualType getCommonNonSugarTypeNode(const ASTContext &Ctx, const Type *X,
Qualifiers &QX, const Type *Y,
Qualifiers &QY) {
Type::TypeClass TC = X->getTypeClass();
@@ -13982,10 +14035,8 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
SUGAR_FREE_TYPE(Builtin)
SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
SUGAR_FREE_TYPE(DependentBitInt)
- SUGAR_FREE_TYPE(Enum)
SUGAR_FREE_TYPE(BitInt)
SUGAR_FREE_TYPE(ObjCInterface)
- SUGAR_FREE_TYPE(Record)
SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
SUGAR_FREE_TYPE(UnresolvedUsing)
SUGAR_FREE_TYPE(HLSLAttributedResource)
@@ -14203,13 +14254,15 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY),
getCommonAttrLoc(VX, VY), VX->getVectorKind());
}
+ case Type::Enum:
+ case Type::Record:
case Type::InjectedClassName: {
- const auto *IX = cast<InjectedClassNameType>(X),
- *IY = cast<InjectedClassNameType>(Y);
- return Ctx.getInjectedClassNameType(
- getCommonDeclChecked(IX->getDecl(), IY->getDecl()),
- Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(),
- IY->getInjectedSpecializationType()));
+ const auto *TX = cast<TagType>(X), *TY = cast<TagType>(Y);
+ return Ctx.getTagType(
+ ::getCommonTypeKeyword(TX, TY, /*IsSame=*/false),
+ ::getCommonQualifier(Ctx, TX, TY, /*IsSame=*/false),
+ ::getCommonDeclChecked(TX->getOriginalDecl(), TY->getOriginalDecl()),
+ /*OwnedTag=*/false);
}
case Type::TemplateSpecialization: {
const auto *TX = cast<TemplateSpecializationType>(X),
@@ -14217,6 +14270,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(),
TY->template_arguments());
return Ctx.getTemplateSpecializationType(
+ getCommonTypeKeyword(TX, TY, /*IsSame=*/false),
::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(),
TY->getTemplateName(),
/*IgnoreDeduced=*/true),
@@ -14244,7 +14298,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
*NY = cast<DependentNameType>(Y);
assert(NX->getIdentifier() == NY->getIdentifier());
return Ctx.getDependentNameType(
- getCommonTypeKeyword(NX, NY),
+ getCommonTypeKeyword(NX, NY, /*IsSame=*/true),
getCommonQualifier(Ctx, NX, NY, /*IsSame=*/true), NX->getIdentifier());
}
case Type::DependentTemplateSpecialization: {
@@ -14260,7 +14314,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
/*IsSame=*/true),
SX.getName(), SX.hasTemplateKeyword() || SY.hasTemplateKeyword());
return Ctx.getDependentTemplateSpecializationType(
- getCommonTypeKeyword(TX, TY), Name, As);
+ getCommonTypeKeyword(TX, TY, /*IsSame=*/true), Name, As);
}
case Type::UnaryTransform: {
const auto *TX = cast<UnaryTransformType>(X),
@@ -14301,7 +14355,7 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
llvm_unreachable("Unknown Type Class");
}
-static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
+static QualType getCommonSugarTypeNode(const ASTContext &Ctx, const Type *X,
const Type *Y,
SplitQualType Underlying) {
Type::TypeClass TC = X->getTypeClass();
@@ -14413,15 +14467,6 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
case Type::DeducedTemplateSpecialization:
// FIXME: Try to merge these.
return QualType();
-
- case Type::Elaborated: {
- const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y);
- return Ctx.getElaboratedType(
- ::getCommonTypeKeyword(EX, EY),
- ::getCommonQualifier(Ctx, EX, EY, /*IsSame=*/false),
- Ctx.getQualifiedType(Underlying),
- ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl()));
- }
case Type::MacroQualified: {
const auto *MX = cast<MacroQualifiedType>(X),
*MY = cast<MacroQualifiedType>(Y);
@@ -14465,16 +14510,19 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
if (getCommonTemplateArguments(Ctx, As, TX->template_arguments(),
TY->template_arguments()))
return QualType();
- return Ctx.getTemplateSpecializationType(CTN, As,
- /*CanonicalArgs=*/{},
- Ctx.getQualifiedType(Underlying));
+ return Ctx.getTemplateSpecializationType(
+ getCommonTypeKeyword(TX, TY, /*IsSame=*/false), CTN, As,
+ /*CanonicalArgs=*/{}, Ctx.getQualifiedType(Underlying));
}
case Type::Typedef: {
const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y);
const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl());
if (!CD)
return QualType();
- return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying));
+ return Ctx.getTypedefType(
+ ::getCommonTypeKeyword(TX, TY, /*IsSame=*/false),
+ ::getCommonQualifier(Ctx, TX, TY, /*IsSame=*/false), CD,
+ Ctx.getQualifiedType(Underlying));
}
case Type::TypeOf: {
// The common sugar between two typeof expressions, where one is
@@ -14505,11 +14553,12 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
}
case Type::Using: {
const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y);
- const UsingShadowDecl *CD =
- ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl());
+ const UsingShadowDecl *CD = ::getCommonDecl(UX->getDecl(), UY->getDecl());
if (!CD)
return QualType();
- return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying));
+ return Ctx.getUsingType(::getCommonTypeKeyword(UX, UY, /*IsSame=*/false),
+ ::getCommonQualifier(Ctx, UX, UY, /*IsSame=*/false),
+ CD, Ctx.getQualifiedType(Underlying));
}
case Type::MemberPointer: {
const auto *PX = cast<MemberPointerType>(X),
@@ -14568,7 +14617,7 @@ static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
}
QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
- bool Unqualified) {
+ bool Unqualified) const {
assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
if (X == Y)
return X;
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
index 2ef0c31..d7fd411 100644
--- a/clang/lib/AST/ASTDiagnostic.cpp
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -36,11 +36,6 @@ QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
while (true) {
const Type *Ty = QC.strip(QT);
- // Don't aka just because we saw an elaborated type...
- if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
- QT = ET->desugar();
- continue;
- }
// ... or a using type ...
if (const UsingType *UT = dyn_cast<UsingType>(Ty)) {
QT = UT->desugar();
@@ -130,7 +125,8 @@ QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
if (DesugarArgument) {
ShouldAKA = true;
QT = Context.getTemplateSpecializationType(
- TST->getTemplateName(), Args, /*CanonicalArgs=*/{}, QT);
+ TST->getKeyword(), TST->getTemplateName(), Args,
+ /*CanonicalArgs=*/{}, QT);
}
break;
}
@@ -200,7 +196,8 @@ break; \
// Don't desugar through the primary typedef of an anonymous type.
if (const TagType *UTT = Underlying->getAs<TagType>())
if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
- if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl())
+ if (UTT->getOriginalDecl()->getTypedefNameForAnonDecl() ==
+ QTT->getDecl())
break;
// Record that we actually looked through an opaque type here.
@@ -461,13 +458,12 @@ void clang::FormatASTNodeDiagnosticArgument(
ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), Qualified);
break;
}
- case DiagnosticsEngine::ak_nestednamespec: {
- NestedNameSpecifier *NNS = reinterpret_cast<NestedNameSpecifier*>(Val);
- NNS->print(OS, Context.getPrintingPolicy(),
+ case DiagnosticsEngine::ak_nestednamespec:
+ NestedNameSpecifier::getFromVoidPointer(reinterpret_cast<void *>(Val))
+ .print(OS, Context.getPrintingPolicy(),
/*ResolveTemplateArguments=*/false,
/*PrintFinalScopeResOp=*/false);
break;
- }
case DiagnosticsEngine::ak_declcontext: {
DeclContext *DC = reinterpret_cast<DeclContext *> (Val);
assert(DC && "Should never have a null declaration context");
@@ -484,9 +480,8 @@ void clang::FormatASTNodeDiagnosticArgument(
} else if (isLambdaCallOperator(DC)) {
OS << "lambda expression";
} else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
- OS << ConvertTypeToDiagnosticString(Context,
- Context.getTypeDeclType(Type),
- PrevArgs, QualTypeVals);
+ OS << ConvertTypeToDiagnosticString(
+ Context, Context.getTypeDeclType(Type), PrevArgs, QualTypeVals);
} else {
assert(isa<NamedDecl>(DC) && "Expected a NamedDecl");
NamedDecl *ND = cast<NamedDecl>(DC);
@@ -1158,12 +1153,13 @@ class TemplateDiff {
return nullptr;
const ClassTemplateSpecializationDecl *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
if (!CTSD)
return nullptr;
Ty = Context.getTemplateSpecializationType(
+ ElaboratedTypeKeyword::None,
TemplateName(CTSD->getSpecializedTemplate()),
CTSD->getTemplateArgs().asArray(), /*CanonicalArgs=*/{},
Ty.getLocalUnqualifiedType().getCanonicalType());
@@ -1743,25 +1739,10 @@ class TemplateDiff {
std::string FromTypeStr = FromType.isNull() ? "(no argument)"
: FromType.getAsString(Policy);
- std::string ToTypeStr = ToType.isNull() ? "(no argument)"
- : ToType.getAsString(Policy);
- // Print without ElaboratedType sugar if it is better.
+ std::string ToTypeStr =
+ ToType.isNull() ? "(no argument)" : ToType.getAsString(Policy);
// TODO: merge this with other aka printing above.
if (FromTypeStr == ToTypeStr) {
- const auto *FromElTy = dyn_cast<ElaboratedType>(FromType),
- *ToElTy = dyn_cast<ElaboratedType>(ToType);
- if (FromElTy || ToElTy) {
- std::string FromNamedTypeStr =
- FromElTy ? FromElTy->getNamedType().getAsString(Policy)
- : FromTypeStr;
- std::string ToNamedTypeStr =
- ToElTy ? ToElTy->getNamedType().getAsString(Policy) : ToTypeStr;
- if (FromNamedTypeStr != ToNamedTypeStr) {
- FromTypeStr = FromNamedTypeStr;
- ToTypeStr = ToNamedTypeStr;
- goto PrintTypes;
- }
- }
// Switch to canonical typename if it is better.
std::string FromCanTypeStr =
FromType.getCanonicalType().getAsString(Policy);
@@ -1772,8 +1753,8 @@ class TemplateDiff {
}
}
- PrintTypes:
- if (PrintTree) OS << '[';
+ if (PrintTree)
+ OS << '[';
OS << (FromDefault ? "(default) " : "");
Bold();
OS << FromTypeStr;
diff --git a/clang/lib/AST/ASTDumper.cpp b/clang/lib/AST/ASTDumper.cpp
index 5e4487e..8e651a0 100644
--- a/clang/lib/AST/ASTDumper.cpp
+++ b/clang/lib/AST/ASTDumper.cpp
@@ -117,7 +117,9 @@ void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
// FIXME: The redecls() range sometimes has elements of a less-specific
// type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
// us TagDecls, and should give CXXRecordDecls).
- auto *Redecl = cast<SpecializationDecl>(RedeclWithBadType);
+ auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
+ if (!Redecl)
+ continue;
switch (Redecl->getTemplateSpecializationKind()) {
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 8e2927b..315ead9 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -949,8 +949,10 @@ ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
else
return TSIOrErr.takeError();
} else {
- auto ToTemplateQualifierLocOrErr =
- import(FromInfo.getTemplateQualifierLoc());
+ auto ToTemplateKWLocOrErr = import(FromInfo.getTemplateKwLoc());
+ if (!ToTemplateKWLocOrErr)
+ return ToTemplateKWLocOrErr.takeError();
+ auto ToTemplateQualifierLocOrErr = import(TALoc.getTemplateQualifierLoc());
if (!ToTemplateQualifierLocOrErr)
return ToTemplateQualifierLocOrErr.takeError();
auto ToTemplateNameLocOrErr = import(FromInfo.getTemplateNameLoc());
@@ -961,8 +963,9 @@ ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
if (!ToTemplateEllipsisLocOrErr)
return ToTemplateEllipsisLocOrErr.takeError();
ToInfo = TemplateArgumentLocInfo(
- Importer.getToContext(), *ToTemplateQualifierLocOrErr,
- *ToTemplateNameLocOrErr, *ToTemplateEllipsisLocOrErr);
+ Importer.getToContext(), *ToTemplateKWLocOrErr,
+ *ToTemplateQualifierLocOrErr, *ToTemplateNameLocOrErr,
+ *ToTemplateEllipsisLocOrErr);
}
return TemplateArgumentLoc(Arg, ToInfo);
@@ -1597,13 +1600,15 @@ ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ExpectedType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
Error Err = Error::success();
- auto ToD = importChecked(Err, T->getDecl());
- auto ToPrevD = importChecked(Err, T->getDecl()->getPreviousDecl());
+ auto ToQualifier = importChecked(Err, T->getQualifier());
+ auto *ToD = importChecked(Err, T->getDecl());
if (Err)
return std::move(Err);
- return Importer.getToContext().getTypeDeclType(
- ToD, cast_or_null<TypeDecl>(ToPrevD));
+ if (T->isCanonicalUnqualified())
+ return Importer.getToContext().getCanonicalUnresolvedUsingType(ToD);
+ return Importer.getToContext().getUnresolvedUsingType(T->getKeyword(),
+ ToQualifier, ToD);
}
ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) {
@@ -1631,15 +1636,17 @@ ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- TypedefNameDecl *ToDecl = *ToDeclOrErr;
- if (ToDecl->getTypeForDecl())
- return QualType(ToDecl->getTypeForDecl(), 0);
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- ExpectedType ToUnderlyingTypeOrErr = import(T->desugar());
+ ExpectedType ToUnderlyingTypeOrErr =
+ T->typeMatchesDecl() ? QualType() : import(T->desugar());
if (!ToUnderlyingTypeOrErr)
return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getTypedefType(ToDecl, *ToUnderlyingTypeOrErr);
+ return Importer.getToContext().getTypedefType(
+ T->getKeyword(), *ToQualifierOrErr, *ToDeclOrErr, *ToUnderlyingTypeOrErr);
}
ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
@@ -1658,14 +1665,14 @@ ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
}
ExpectedType ASTNodeImporter::VisitUsingType(const UsingType *T) {
- Expected<UsingShadowDecl *> FoundOrErr = import(T->getFoundDecl());
- if (!FoundOrErr)
- return FoundOrErr.takeError();
- Expected<QualType> UnderlyingOrErr = import(T->getUnderlyingType());
- if (!UnderlyingOrErr)
- return UnderlyingOrErr.takeError();
-
- return Importer.getToContext().getUsingType(*FoundOrErr, *UnderlyingOrErr);
+ Error Err = Error::success();
+ auto ToQualifier = importChecked(Err, T->getQualifier());
+ auto *ToD = importChecked(Err, T->getDecl());
+ QualType ToT = importChecked(Err, T->desugar());
+ if (Err)
+ return std::move(Err);
+ return Importer.getToContext().getUsingType(T->getKeyword(), ToQualifier, ToD,
+ ToT);
}
ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
@@ -1728,36 +1735,37 @@ ExpectedType ASTNodeImporter::VisitDeducedTemplateSpecializationType(
return ToDeducedTypeOrErr.takeError();
return Importer.getToContext().getDeducedTemplateSpecializationType(
- *ToTemplateNameOrErr, *ToDeducedTypeOrErr, T->isDependentType());
+ T->getKeyword(), *ToTemplateNameOrErr, *ToDeducedTypeOrErr,
+ T->isDependentType());
}
-ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
- const InjectedClassNameType *T) {
- Expected<CXXRecordDecl *> ToDeclOrErr = import(T->getDecl());
+ExpectedType ASTNodeImporter::VisitTagType(const TagType *T) {
+ Expected<TagDecl *> ToDeclOrErr = import(T->getOriginalDecl());
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- // The InjectedClassNameType is created in VisitRecordDecl when the
- // T->getDecl() is imported. Here we can return the existing type.
- const Type *Ty = (*ToDeclOrErr)->getTypeForDecl();
- assert(isa_and_nonnull<InjectedClassNameType>(Ty));
- return QualType(Ty, 0);
-}
+ if (T->isCanonicalUnqualified())
+ return Importer.getToContext().getCanonicalTagType(*ToDeclOrErr);
-ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
- Expected<RecordDecl *> ToDeclOrErr = import(T->getDecl());
- if (!ToDeclOrErr)
- return ToDeclOrErr.takeError();
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
+ return Importer.getToContext().getTagType(T->getKeyword(), *ToQualifierOrErr,
+ *ToDeclOrErr, T->isTagOwned());
}
ExpectedType ASTNodeImporter::VisitEnumType(const EnumType *T) {
- Expected<EnumDecl *> ToDeclOrErr = import(T->getDecl());
- if (!ToDeclOrErr)
- return ToDeclOrErr.takeError();
+ return VisitTagType(T);
+}
+
+ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ return VisitTagType(T);
+}
- return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
+ExpectedType
+ASTNodeImporter::VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ return VisitTagType(T);
}
ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
@@ -1850,27 +1858,8 @@ ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
if (!ToUnderlyingOrErr)
return ToUnderlyingOrErr.takeError();
return Importer.getToContext().getTemplateSpecializationType(
- *ToTemplateOrErr, ToTemplateArgs, {}, *ToUnderlyingOrErr);
-}
-
-ExpectedType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
- // Note: the qualifier in an ElaboratedType is optional.
- auto ToQualifierOrErr = import(T->getQualifier());
- if (!ToQualifierOrErr)
- return ToQualifierOrErr.takeError();
-
- ExpectedType ToNamedTypeOrErr = import(T->getNamedType());
- if (!ToNamedTypeOrErr)
- return ToNamedTypeOrErr.takeError();
-
- Expected<TagDecl *> ToOwnedTagDeclOrErr = import(T->getOwnedTagDecl());
- if (!ToOwnedTagDeclOrErr)
- return ToOwnedTagDeclOrErr.takeError();
-
- return Importer.getToContext().getElaboratedType(T->getKeyword(),
- *ToQualifierOrErr,
- *ToNamedTypeOrErr,
- *ToOwnedTagDeclOrErr);
+ T->getKeyword(), *ToTemplateOrErr, ToTemplateArgs, {},
+ *ToUnderlyingOrErr);
}
ExpectedType
@@ -2168,7 +2157,7 @@ Error ASTNodeImporter::ImportDeclParts(
const Type *LeafT =
getLeafPointeeType(P->getType().getCanonicalType().getTypePtr());
auto *RT = dyn_cast<RecordType>(LeafT);
- if (RT && RT->getDecl() == D) {
+ if (RT && RT->getOriginalDecl() == D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
@@ -2421,8 +2410,8 @@ Error ASTNodeImporter::ImportFieldDeclDefinition(const FieldDecl *From,
const RecordType *RecordTo = ToType->getAs<RecordType>();
if (RecordFrom && RecordTo) {
- FromRecordDecl = RecordFrom->getDecl();
- ToRecordDecl = RecordTo->getDecl();
+ FromRecordDecl = RecordFrom->getOriginalDecl();
+ ToRecordDecl = RecordTo->getOriginalDecl();
}
}
@@ -2643,7 +2632,7 @@ Error ASTNodeImporter::ImportDefinition(
return Err;
ExpectedType ToTypeOrErr =
- import(Importer.getFromContext().getTypeDeclType(From));
+ import(QualType(Importer.getFromContext().getCanonicalTagType(From)));
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
@@ -3218,7 +3207,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (auto *Typedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
- FoundDecl = Tag->getDecl();
+ FoundDecl = Tag->getOriginalDecl();
}
if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
@@ -3349,7 +3338,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Decl *Found = FoundDecl;
if (auto *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
- Found = Tag->getDecl();
+ Found = Tag->getOriginalDecl();
}
if (auto *FoundRecord = dyn_cast<RecordDecl>(Found)) {
@@ -3428,17 +3417,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return CDeclOrErr.takeError();
Numbering.ContextDecl = *CDeclOrErr;
D2CXX->setLambdaNumbering(Numbering);
- } else if (DCXX->isInjectedClassName()) {
- // We have to be careful to do a similar dance to the one in
- // Sema::ActOnStartCXXMemberDeclarations
- const bool DelayTypeCreation = true;
- if (GetImportedOrCreateDecl(
- D2CXX, D, Importer.getToContext(), D->getTagKind(), DC,
- *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(),
- cast_or_null<CXXRecordDecl>(PrevDecl), DelayTypeCreation))
- return D2CXX;
- Importer.getToContext().getTypeDeclType(
- D2CXX, dyn_cast<CXXRecordDecl>(DC));
} else {
if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
D->getTagKind(), DC, *BeginLocOrErr, Loc,
@@ -3458,51 +3436,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (Error Err = importInto(ToDescribed, FromDescribed))
return std::move(Err);
D2CXX->setDescribedClassTemplate(ToDescribed);
- if (!DCXX->isInjectedClassName() && !IsFriendTemplate) {
- // In a record describing a template the type should be an
- // InjectedClassNameType (see Sema::CheckClassTemplate). Update the
- // previously set type to the correct value here (ToDescribed is not
- // available at record create).
- CXXRecordDecl *Injected = nullptr;
- for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
- auto *Record = dyn_cast<CXXRecordDecl>(Found);
- if (Record && Record->isInjectedClassName()) {
- Injected = Record;
- break;
- }
- }
- // Create an injected type for the whole redecl chain.
- // The chain may contain an already existing injected type at the start,
- // if yes this should be reused. We must ensure that only one type
- // object exists for the injected type (including the injected record
- // declaration), ASTContext does not check it.
- SmallVector<Decl *, 2> Redecls =
- getCanonicalForwardRedeclChain(D2CXX);
- const Type *FrontTy =
- cast<CXXRecordDecl>(Redecls.front())->getTypeForDecl();
- QualType InjSpec;
- if (auto *InjTy = FrontTy->getAs<InjectedClassNameType>())
- InjSpec = InjTy->getInjectedSpecializationType();
- else
- InjSpec = ToDescribed->getInjectedClassNameSpecialization();
- for (auto *R : Redecls) {
- auto *RI = cast<CXXRecordDecl>(R);
- if (R != Redecls.front() ||
- !isa<InjectedClassNameType>(RI->getTypeForDecl()))
- RI->setTypeForDecl(nullptr);
- // This function tries to get the injected type from getTypeForDecl,
- // then from the previous declaration if possible. If not, it creates
- // a new type.
- Importer.getToContext().getInjectedClassNameType(RI, InjSpec);
- }
- // Set the new type for the injected decl too.
- if (Injected) {
- Injected->setTypeForDecl(nullptr);
- // This function will copy the injected type from D2CXX into Injected.
- // The injected decl does not have a previous decl to copy from.
- Importer.getToContext().getTypeDeclType(Injected, D2CXX);
- }
- }
} else if (MemberSpecializationInfo *MemberInfo =
DCXX->getMemberSpecializationInfo()) {
TemplateSpecializationKind SK =
@@ -3826,11 +3759,12 @@ public:
}
std::optional<bool> VisitTagType(const TagType *T) {
- if (auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl()))
+ if (auto *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(T->getOriginalDecl()))
for (const auto &Arg : Spec->getTemplateArgs().asArray())
if (checkTemplateArgument(Arg))
return true;
- return isAncestorDeclContextOf(ParentDC, T->getDecl());
+ return isAncestorDeclContextOf(ParentDC, T->getOriginalDecl());
}
std::optional<bool> VisitPointerType(const PointerType *T) {
@@ -3842,17 +3776,11 @@ public:
}
std::optional<bool> VisitTypedefType(const TypedefType *T) {
- const TypedefNameDecl *TD = T->getDecl();
- assert(TD);
- return isAncestorDeclContextOf(ParentDC, TD);
+ return isAncestorDeclContextOf(ParentDC, T->getDecl());
}
std::optional<bool> VisitUsingType(const UsingType *T) {
- if (T->getFoundDecl() &&
- isAncestorDeclContextOf(ParentDC, T->getFoundDecl()))
- return true;
-
- return {};
+ return isAncestorDeclContextOf(ParentDC, T->getDecl());
}
std::optional<bool>
@@ -6521,16 +6449,10 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Create the specialization.
ClassTemplateSpecializationDecl *D2 = nullptr;
if (PartialSpec) {
- QualType CanonInjType;
- if (Error Err = importInto(
- CanonInjType, PartialSpec->getInjectedSpecializationType()))
- return std::move(Err);
- CanonInjType = CanonInjType.getCanonicalType();
-
if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr,
*IdLocOrErr, ToTPList, ClassTemplate, ArrayRef(TemplateArgs),
- CanonInjType,
+ /*CanonInjectedTST=*/CanQualType(),
cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
return D2;
@@ -10050,46 +9972,34 @@ Expected<Stmt *> ASTImporter::Import(Stmt *FromS) {
return ToSOrErr;
}
-Expected<NestedNameSpecifier *>
-ASTImporter::Import(NestedNameSpecifier *FromNNS) {
- if (!FromNNS)
- return nullptr;
-
- NestedNameSpecifier *Prefix = nullptr;
- if (Error Err = importInto(Prefix, FromNNS->getPrefix()))
- return std::move(Err);
-
- switch (FromNNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- assert(FromNNS->getAsIdentifier() && "NNS should contain identifier.");
- return NestedNameSpecifier::Create(ToContext, Prefix,
- Import(FromNNS->getAsIdentifier()));
-
- case NestedNameSpecifier::Namespace:
- if (ExpectedDecl NSOrErr = Import(FromNNS->getAsNamespace())) {
- return NestedNameSpecifier::Create(ToContext, Prefix,
- cast<NamespaceBaseDecl>(*NSOrErr));
- } else
+Expected<NestedNameSpecifier> ASTImporter::Import(NestedNameSpecifier FromNNS) {
+ switch (FromNNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ return FromNNS;
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = FromNNS.getAsNamespaceAndPrefix();
+ auto NSOrErr = Import(Namespace);
+ if (!NSOrErr)
return NSOrErr.takeError();
-
- case NestedNameSpecifier::Global:
- return NestedNameSpecifier::GlobalSpecifier(ToContext);
-
- case NestedNameSpecifier::Super:
- if (ExpectedDecl RDOrErr = Import(FromNNS->getAsRecordDecl()))
- return NestedNameSpecifier::SuperSpecifier(ToContext,
- cast<CXXRecordDecl>(*RDOrErr));
+ auto PrefixOrErr = Import(Prefix);
+ if (!PrefixOrErr)
+ return PrefixOrErr.takeError();
+ return NestedNameSpecifier(ToContext, cast<NamespaceBaseDecl>(*NSOrErr),
+ *PrefixOrErr);
+ }
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ if (ExpectedDecl RDOrErr = Import(FromNNS.getAsMicrosoftSuper()))
+ return NestedNameSpecifier(cast<CXXRecordDecl>(*RDOrErr));
else
return RDOrErr.takeError();
-
- case NestedNameSpecifier::TypeSpec:
- if (ExpectedTypePtr TyOrErr = Import(FromNNS->getAsType())) {
- return NestedNameSpecifier::Create(ToContext, Prefix, *TyOrErr);
+ case NestedNameSpecifier::Kind::Type:
+ if (ExpectedTypePtr TyOrErr = Import(FromNNS.getAsType())) {
+ return NestedNameSpecifier(*TyOrErr);
} else {
return TyOrErr.takeError();
}
}
-
llvm_unreachable("Invalid nested name specifier kind");
}
@@ -10103,64 +10013,62 @@ ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
// serialization in reverse order.
while (NNS) {
NestedNames.push_back(NNS);
- NNS = NNS.getPrefix();
+ NNS = NNS.getAsNamespaceAndPrefix().Prefix;
}
NestedNameSpecifierLocBuilder Builder;
while (!NestedNames.empty()) {
NNS = NestedNames.pop_back_val();
- NestedNameSpecifier *Spec = nullptr;
+ NestedNameSpecifier Spec = std::nullopt;
if (Error Err = importInto(Spec, NNS.getNestedNameSpecifier()))
return std::move(Err);
- NestedNameSpecifier::SpecifierKind Kind = Spec->getKind();
+ NestedNameSpecifier::Kind Kind = Spec.getKind();
SourceLocation ToLocalBeginLoc, ToLocalEndLoc;
- if (Kind != NestedNameSpecifier::Super) {
+ if (Kind != NestedNameSpecifier::Kind::MicrosoftSuper) {
if (Error Err = importInto(ToLocalBeginLoc, NNS.getLocalBeginLoc()))
return std::move(Err);
- if (Kind != NestedNameSpecifier::Global)
+ if (Kind != NestedNameSpecifier::Kind::Global)
if (Error Err = importInto(ToLocalEndLoc, NNS.getLocalEndLoc()))
return std::move(Err);
}
switch (Kind) {
- case NestedNameSpecifier::Identifier:
- Builder.Extend(getToContext(), Spec->getAsIdentifier(), ToLocalBeginLoc,
- ToLocalEndLoc);
- break;
-
- case NestedNameSpecifier::Namespace:
- Builder.Extend(getToContext(), Spec->getAsNamespace(), ToLocalBeginLoc,
- ToLocalEndLoc);
+ case NestedNameSpecifier::Kind::Namespace:
+ Builder.Extend(getToContext(), Spec.getAsNamespaceAndPrefix().Namespace,
+ ToLocalBeginLoc, ToLocalEndLoc);
break;
- case NestedNameSpecifier::TypeSpec: {
+ case NestedNameSpecifier::Kind::Type: {
SourceLocation ToTLoc;
- if (Error Err = importInto(ToTLoc, NNS.getTypeLoc().getBeginLoc()))
+ if (Error Err = importInto(ToTLoc, NNS.castAsTypeLoc().getBeginLoc()))
return std::move(Err);
TypeSourceInfo *TSI = getToContext().getTrivialTypeSourceInfo(
- QualType(Spec->getAsType(), 0), ToTLoc);
- Builder.Extend(getToContext(), TSI->getTypeLoc(), ToLocalEndLoc);
+ QualType(Spec.getAsType(), 0), ToTLoc);
+ Builder.Make(getToContext(), TSI->getTypeLoc(), ToLocalEndLoc);
break;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
Builder.MakeGlobal(getToContext(), ToLocalBeginLoc);
break;
- case NestedNameSpecifier::Super: {
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
auto ToSourceRangeOrErr = Import(NNS.getSourceRange());
if (!ToSourceRangeOrErr)
return ToSourceRangeOrErr.takeError();
- Builder.MakeSuper(getToContext(), Spec->getAsRecordDecl(),
- ToSourceRangeOrErr->getBegin(),
- ToSourceRangeOrErr->getEnd());
+ Builder.MakeMicrosoftSuper(getToContext(), Spec.getAsMicrosoftSuper(),
+ ToSourceRangeOrErr->getBegin(),
+ ToSourceRangeOrErr->getEnd());
+ break;
+ }
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
- }
}
return Builder.getWithLocInContext(getToContext());
@@ -10740,7 +10648,7 @@ ASTNodeImporter::ImportAPValue(const APValue &FromValue) {
if (Err)
return std::move(Err);
if (auto *RD = dyn_cast<CXXRecordDecl>(FromDecl))
- FromElemTy = Importer.FromContext.getRecordType(RD);
+ FromElemTy = Importer.FromContext.getCanonicalTagType(RD);
else
FromElemTy = cast<ValueDecl>(FromDecl)->getType();
ToPath[LoopIdx] = APValue::LValuePathEntry(APValue::BaseOrMemberType(
diff --git a/clang/lib/AST/ASTImporterLookupTable.cpp b/clang/lib/AST/ASTImporterLookupTable.cpp
index 4ed3198..29c3af1 100644
--- a/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -49,8 +49,6 @@ struct Builder : RecursiveASTVisitor<Builder> {
bool VisitFriendDecl(FriendDecl *D) {
if (D->getFriendType()) {
QualType Ty = D->getFriendType()->getType();
- if (isa<ElaboratedType>(Ty))
- Ty = cast<ElaboratedType>(Ty)->getNamedType();
// A FriendDecl with a dependent type (e.g. ClassTemplateSpecialization)
// always has that decl as child node.
// However, there are non-dependent cases which does not have the
@@ -64,13 +62,15 @@ struct Builder : RecursiveASTVisitor<Builder> {
dyn_cast<SubstTemplateTypeParmType>(Ty)) {
if (SubstTy->getAsCXXRecordDecl())
LT.add(SubstTy->getAsCXXRecordDecl());
- } else if (isa<TypedefType>(Ty)) {
- // We do not put friend typedefs to the lookup table because
- // ASTImporter does not organize typedefs into redecl chains.
- } else if (isa<UsingType>(Ty)) {
- // Similar to TypedefType, not putting into lookup table.
} else {
- llvm_unreachable("Unhandled type of friend class");
+ if (isa<TypedefType>(Ty)) {
+ // We do not put friend typedefs to the lookup table because
+ // ASTImporter does not organize typedefs into redecl chains.
+ } else if (isa<UsingType>(Ty)) {
+ // Similar to TypedefType, not putting into lookup table.
+ } else {
+ llvm_unreachable("Unhandled type of friend class");
+ }
}
}
}
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index 0e2023f..096bc6c 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -110,8 +110,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgumentLoc &Arg1,
const TemplateArgumentLoc &Arg2);
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- NestedNameSpecifier *NNS1,
- NestedNameSpecifier *NNS2);
+ NestedNameSpecifier NNS1,
+ NestedNameSpecifier NNS2);
static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
const IdentifierInfo *Name2);
@@ -579,35 +579,30 @@ static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
/// Determine whether two nested-name-specifiers are equivalent.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- NestedNameSpecifier *NNS1,
- NestedNameSpecifier *NNS2) {
- if (NNS1->getKind() != NNS2->getKind())
- return false;
-
- NestedNameSpecifier *Prefix1 = NNS1->getPrefix(),
- *Prefix2 = NNS2->getPrefix();
- if ((bool)Prefix1 != (bool)Prefix2)
- return false;
-
- if (Prefix1)
- if (!IsStructurallyEquivalent(Context, Prefix1, Prefix2))
- return false;
-
- switch (NNS1->getKind()) {
- case NestedNameSpecifier::Identifier:
- return IsStructurallyEquivalent(NNS1->getAsIdentifier(),
- NNS2->getAsIdentifier());
- case NestedNameSpecifier::Namespace:
- return IsStructurallyEquivalent(Context, NNS1->getAsNamespace(),
- NNS2->getAsNamespace());
- case NestedNameSpecifier::TypeSpec:
- return IsStructurallyEquivalent(Context, QualType(NNS1->getAsType(), 0),
- QualType(NNS2->getAsType(), 0));
- case NestedNameSpecifier::Global:
+ NestedNameSpecifier NNS1,
+ NestedNameSpecifier NNS2) {
+ auto Kind = NNS1.getKind();
+ if (Kind != NNS2.getKind())
+ return false;
+ switch (Kind) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
return true;
- case NestedNameSpecifier::Super:
- return IsStructurallyEquivalent(Context, NNS1->getAsRecordDecl(),
- NNS2->getAsRecordDecl());
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
+ auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
+ if (!IsStructurallyEquivalent(Context,
+ const_cast<NamespaceBaseDecl *>(Namespace1),
+ const_cast<NamespaceBaseDecl *>(Namespace2)))
+ return false;
+ return IsStructurallyEquivalent(Context, Prefix1, Prefix2);
+ }
+ case NestedNameSpecifier::Kind::Type:
+ return IsStructurallyEquivalent(Context, QualType(NNS1.getAsType(), 0),
+ QualType(NNS2.getAsType(), 0));
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return IsStructurallyEquivalent(Context, NNS1.getAsMicrosoftSuper(),
+ NNS2.getAsMicrosoftSuper());
}
return false;
}
@@ -615,9 +610,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const DependentTemplateStorage &S1,
const DependentTemplateStorage &S2) {
- if (NestedNameSpecifier *NNS1 = S1.getQualifier(), *NNS2 = S2.getQualifier();
- !NNS1 != !NNS2 ||
- (NNS1 && !IsStructurallyEquivalent(Context, NNS1, NNS2)))
+ if (!IsStructurallyEquivalent(Context, S1.getQualifier(), S2.getQualifier()))
return false;
IdentifierOrOverloadedOperator IO1 = S1.getName(), IO2 = S2.getName();
@@ -885,10 +878,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// Treat the enumeration as its underlying type and use the builtin type
// class comparison.
if (T1->getTypeClass() == Type::Enum) {
- T1 = T1->getAs<EnumType>()->getDecl()->getIntegerType();
+ T1 = T1->getAs<EnumType>()->getOriginalDecl()->getIntegerType();
assert(T2->isBuiltinType() && !T1.isNull()); // Sanity check
} else if (T2->getTypeClass() == Type::Enum) {
- T2 = T2->getAs<EnumType>()->getDecl()->getIntegerType();
+ T2 = T2->getAs<EnumType>()->getOriginalDecl()->getIntegerType();
assert(T1->isBuiltinType() && !T2.isNull()); // Sanity check
}
TC = Type::Builtin;
@@ -1206,23 +1199,35 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
- case Type::Using:
- if (!IsStructurallyEquivalent(Context, cast<UsingType>(T1)->getFoundDecl(),
- cast<UsingType>(T2)->getFoundDecl()))
+ case Type::Using: {
+ auto *U1 = cast<UsingType>(T1), *U2 = cast<UsingType>(T2);
+ if (U1->getKeyword() != U2->getKeyword())
return false;
- if (!IsStructurallyEquivalent(Context,
- cast<UsingType>(T1)->getUnderlyingType(),
- cast<UsingType>(T2)->getUnderlyingType()))
+ if (!IsStructurallyEquivalent(Context, U1->getQualifier(),
+ U2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->getDecl(), U2->getDecl()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->desugar(), U2->desugar()))
return false;
break;
-
- case Type::Typedef:
- if (!IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->getDecl(),
- cast<TypedefType>(T2)->getDecl()) ||
- !IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->desugar(),
- cast<TypedefType>(T2)->desugar()))
+ }
+ case Type::Typedef: {
+ auto *U1 = cast<TypedefType>(T1), *U2 = cast<TypedefType>(T2);
+ if (U1->getKeyword() != U2->getKeyword())
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->getQualifier(),
+ U2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, U1->getDecl(), U2->getDecl()))
+ return false;
+ if (U1->typeMatchesDecl() != U2->typeMatchesDecl())
+ return false;
+ if (!U1->typeMatchesDecl() &&
+ !IsStructurallyEquivalent(Context, U1->desugar(), U2->desugar()))
return false;
break;
+ }
case Type::TypeOfExpr:
if (!IsStructurallyEquivalent(
@@ -1286,10 +1291,20 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::Record:
case Type::Enum:
- if (!IsStructurallyEquivalent(Context, cast<TagType>(T1)->getDecl(),
- cast<TagType>(T2)->getDecl()))
+ case Type::InjectedClassName: {
+ const auto *TT1 = cast<TagType>(T1), *TT2 = cast<TagType>(T2);
+ if (TT1->getKeyword() != TT2->getKeyword())
+ return false;
+ if (TT1->isTagOwned() != TT2->isTagOwned())
+ return false;
+ if (!IsStructurallyEquivalent(Context, TT1->getQualifier(),
+ TT2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, TT1->getOriginalDecl(),
+ TT2->getOriginalDecl()))
return false;
break;
+ }
case Type::TemplateTypeParm: {
const auto *Parm1 = cast<TemplateTypeParmType>(T1);
@@ -1348,33 +1363,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
- case Type::Elaborated: {
- const auto *Elab1 = cast<ElaboratedType>(T1);
- const auto *Elab2 = cast<ElaboratedType>(T2);
- // CHECKME: what if a keyword is ElaboratedTypeKeyword::None or
- // ElaboratedTypeKeyword::Typename
- // ?
- if (Elab1->getKeyword() != Elab2->getKeyword())
- return false;
- if (!IsStructurallyEquivalent(Context, Elab1->getQualifier(),
- Elab2->getQualifier()))
- return false;
- if (!IsStructurallyEquivalent(Context, Elab1->getNamedType(),
- Elab2->getNamedType()))
- return false;
- break;
- }
-
- case Type::InjectedClassName: {
- const auto *Inj1 = cast<InjectedClassNameType>(T1);
- const auto *Inj2 = cast<InjectedClassNameType>(T2);
- if (!IsStructurallyEquivalent(Context,
- Inj1->getInjectedSpecializationType(),
- Inj2->getInjectedSpecializationType()))
- return false;
- break;
- }
-
case Type::DependentName: {
const auto *Typename1 = cast<DependentNameType>(T1);
const auto *Typename2 = cast<DependentNameType>(T2);
@@ -1549,8 +1537,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// types
if (Field1->isAnonymousStructOrUnion() &&
Field2->isAnonymousStructOrUnion()) {
- RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getDecl();
- RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getOriginalDecl();
+ RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getOriginalDecl();
return IsStructurallyEquivalent(Context, D1, D2);
}
@@ -1628,7 +1616,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FieldDecl *Field1, FieldDecl *Field2) {
const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
return IsStructurallyEquivalent(Context, Field1, Field2,
- Context.ToCtx.getTypeDeclType(Owner2));
+ Context.ToCtx.getCanonicalTagType(Owner2));
}
/// Determine structural equivalence of two methods.
@@ -1801,7 +1789,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
<< D1->getDeclName() << (unsigned)D1->getTagKind();
@@ -1903,7 +1891,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
<< D2CXX->getNumBases();
@@ -1924,7 +1912,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(Base2->getBeginLoc(), diag::note_odr_base)
<< Base2->getType() << Base2->getSourceRange();
@@ -1940,7 +1928,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(Base2->getBeginLoc(), diag::note_odr_virtual_base)
<< Base2->isVirtual() << Base2->getSourceRange();
@@ -1962,7 +1950,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2CXX)
+ << Context.ToCtx.getCanonicalTagType(D2CXX)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
Context.Diag2(D2->getLocation(), diag::note_odr_missing_friend);
@@ -1975,7 +1963,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2CXX)
+ << Context.ToCtx.getCanonicalTagType(D2CXX)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
@@ -1989,7 +1977,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
Context.Diag1(D1->getLocation(), diag::note_odr_missing_friend);
@@ -2001,7 +1989,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
Context.Diag1(Base1->getBeginLoc(), diag::note_odr_base)
@@ -2013,7 +2001,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
// Check the fields for consistency.
- QualType D2Type = Context.ToCtx.getTypeDeclType(D2);
+ CanQualType D2Type = Context.ToCtx.getCanonicalTagType(D2);
RecordDecl::field_iterator Field2 = D2->field_begin(),
Field2End = D2->field_end();
for (RecordDecl::field_iterator Field1 = D1->field_begin(),
@@ -2024,7 +2012,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
<< Field1->getDeclName() << Field1->getType();
@@ -2041,7 +2029,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
@@ -2101,7 +2089,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1(D1->getLocation(),
D1->isFixed()
@@ -2124,7 +2112,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2(D2->getLocation(),
diag::note_odr_incompatible_fixed_underlying_type)
@@ -2162,7 +2150,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag1((*EC1)->getLocation(), diag::note_odr_enumerator)
<< (*EC1)->getDeclName() << toString((*EC1)->getInitVal(), 10);
@@ -2180,7 +2168,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2((*EC2)->getLocation(), diag::note_odr_enumerator)
<< (*EC2)->getDeclName() << toString((*EC2)->getInitVal(), 10);
@@ -2198,7 +2186,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Context.Complain) {
Context.Diag2(D2->getLocation(), Context.getApplicableDiagnostic(
diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(D2)
+ << Context.ToCtx.getCanonicalTagType(D2)
<< (&Context.FromCtx != &Context.ToCtx);
Context.Diag2((*EC2)->getLocation(), diag::note_odr_enumerator)
<< (*EC2)->getDeclName() << toString((*EC2)->getInitVal(), 10);
@@ -2595,7 +2583,7 @@ DiagnosticBuilder StructuralEquivalenceContext::Diag2(SourceLocation Loc,
UnsignedOrNone
StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
ASTContext &Context = Anon->getASTContext();
- QualType AnonTy = Context.getRecordType(Anon);
+ CanQualType AnonTy = Context.getCanonicalTagType(Anon);
const auto *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
if (!Owner)
@@ -2617,12 +2605,8 @@ StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
// If the field looks like this:
// struct { ... } A;
QualType FieldType = F->getType();
- // In case of nested structs.
- while (const auto *ElabType = dyn_cast<ElaboratedType>(FieldType))
- FieldType = ElabType->getNamedType();
-
if (const auto *RecType = dyn_cast<RecordType>(FieldType)) {
- const RecordDecl *RecDecl = RecType->getDecl();
+ const RecordDecl *RecDecl = RecType->getOriginalDecl();
if (RecDecl->getDeclContext() == Owner && !RecDecl->getIdentifier()) {
if (Context.hasSameType(FieldType, AnonTy))
break;
diff --git a/clang/lib/AST/ASTTypeTraits.cpp b/clang/lib/AST/ASTTypeTraits.cpp
index 99916f5..d2f7fdb 100644
--- a/clang/lib/AST/ASTTypeTraits.cpp
+++ b/clang/lib/AST/ASTTypeTraits.cpp
@@ -194,8 +194,8 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
NNS->print(OS, PP);
else if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>()) {
- if (const NestedNameSpecifier *NNS = NNSL->getNestedNameSpecifier())
- NNS->print(OS, PP);
+ if (NestedNameSpecifier NNS = NNSL->getNestedNameSpecifier())
+ NNS.print(OS, PP);
else
OS << "(empty NestedNameSpecifierLoc)";
} else if (const QualType *QT = get<QualType>())
@@ -234,13 +234,39 @@ void DynTypedNode::dump(llvm::raw_ostream &OS,
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
-SourceRange DynTypedNode::getSourceRange() const {
+SourceRange DynTypedNode::getSourceRange(bool IncludeQualifier) const {
if (const CXXCtorInitializer *CCI = get<CXXCtorInitializer>())
return CCI->getSourceRange();
if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
return NNSL->getSourceRange();
- if (const TypeLoc *TL = get<TypeLoc>())
- return TL->getSourceRange();
+ if (const TypeLoc *TL = get<TypeLoc>()) {
+ if (IncludeQualifier)
+ return TL->getSourceRange();
+ switch (TL->getTypeLocClass()) {
+ case TypeLoc::DependentName:
+ return TL->castAs<DependentNameTypeLoc>().getNameLoc();
+ case TypeLoc::TemplateSpecialization: {
+ auto T = TL->castAs<TemplateSpecializationTypeLoc>();
+ return SourceRange(T.getTemplateNameLoc(), T.getEndLoc());
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto T = TL->castAs<DependentTemplateSpecializationTypeLoc>();
+ return SourceRange(T.getTemplateNameLoc(), T.getEndLoc());
+ }
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ return TL->castAs<TagTypeLoc>().getNameLoc();
+ case TypeLoc::Typedef:
+ return TL->castAs<TypedefTypeLoc>().getNameLoc();
+ case TypeLoc::UnresolvedUsing:
+ return TL->castAs<UnresolvedUsingTypeLoc>().getNameLoc();
+ case TypeLoc::Using:
+ return TL->castAs<UsingTypeLoc>().getNameLoc();
+ default:
+ return TL->getSourceRange();
+ }
+ }
if (const Decl *D = get<Decl>())
return D->getSourceRange();
if (const Stmt *S = get<Stmt>())
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index cc99efa..8e651cf 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -553,9 +553,10 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
// Possibly diagnose casts to enum types if the target type does not
// have a fixed size.
if (Ctx.getLangOpts().CPlusPlus && CE->getType()->isEnumeralType()) {
- if (const auto *ET = CE->getType().getCanonicalType()->castAs<EnumType>();
- !ET->getDecl()->isFixed()) {
- if (!this->emitCheckEnumValue(*FromT, ET->getDecl(), CE))
+ const auto *ET = CE->getType().getCanonicalType()->castAs<EnumType>();
+ const auto *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isFixed()) {
+ if (!this->emitCheckEnumValue(*FromT, ED, CE))
return false;
}
}
@@ -666,8 +667,8 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
}
case CK_VectorSplat: {
- assert(!classify(CE->getType()));
- assert(classify(SubExpr->getType()));
+ assert(!canClassify(CE->getType()));
+ assert(canClassify(SubExpr->getType()));
assert(CE->getType()->isVectorType());
if (!Initializing) {
@@ -1931,15 +1932,10 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
PrimType TargetT = classifyPrim(Init->getType());
- auto Eval = [&](const Expr *Init, unsigned ElemIndex) {
- PrimType InitT = classifyPrim(Init->getType());
- if (!this->visit(Init))
+ auto Eval = [&](const IntegerLiteral *IL, unsigned ElemIndex) {
+ if (!this->emitConst(IL->getValue(), Init))
return false;
- if (InitT != TargetT) {
- if (!this->emitCast(InitT, TargetT, E))
- return false;
- }
- return this->emitInitElem(TargetT, ElemIndex, Init);
+ return this->emitInitElem(TargetT, ElemIndex, IL);
};
if (!EmbedS->doForEachDataElement(Eval, ElementIndex))
return false;
@@ -2061,24 +2057,36 @@ bool Compiler<Emitter>::visitArrayElemInit(unsigned ElemIndex, const Expr *Init,
template <class Emitter>
bool Compiler<Emitter>::visitCallArgs(ArrayRef<const Expr *> Args,
const FunctionDecl *FuncDecl,
- bool Activate) {
+ bool Activate, bool IsOperatorCall) {
assert(VarScope->getKind() == ScopeKind::Call);
- bool HasNonNullAttr = false;
llvm::BitVector NonNullArgs;
- if (FuncDecl && FuncDecl->hasAttr<NonNullAttr>()) {
- HasNonNullAttr = true;
+ if (FuncDecl && FuncDecl->hasAttr<NonNullAttr>())
NonNullArgs = collectNonNullArgs(FuncDecl, Args);
- }
+
+ bool ExplicitMemberFn = false;
+ if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(FuncDecl))
+ ExplicitMemberFn = MD->isExplicitObjectMemberFunction();
unsigned ArgIndex = 0;
for (const Expr *Arg : Args) {
- if (OptPrimType T = classify(Arg)) {
+ if (canClassify(Arg)) {
if (!this->visit(Arg))
return false;
} else {
- std::optional<unsigned> LocalIndex = allocateLocal(
- Arg, Arg->getType(), /*ExtendingDecl=*/nullptr, ScopeKind::Call);
+ DeclTy Source = Arg;
+ if (FuncDecl) {
+ // Try to use the parameter declaration instead of the argument
+ // expression as a source.
+ unsigned DeclIndex = ArgIndex - IsOperatorCall + ExplicitMemberFn;
+ if (DeclIndex < FuncDecl->getNumParams())
+ Source = FuncDecl->getParamDecl(ArgIndex - IsOperatorCall +
+ ExplicitMemberFn);
+ }
+
+ std::optional<unsigned> LocalIndex =
+ allocateLocal(std::move(Source), Arg->getType(),
+ /*ExtendingDecl=*/nullptr, ScopeKind::Call);
if (!LocalIndex)
return false;
@@ -2094,7 +2102,7 @@ bool Compiler<Emitter>::visitCallArgs(ArrayRef<const Expr *> Args,
return false;
}
- if (HasNonNullAttr && NonNullArgs[ArgIndex]) {
+ if (!NonNullArgs.empty() && NonNullArgs[ArgIndex]) {
PrimType ArgT = classify(Arg).value_or(PT_Ptr);
if (ArgT == PT_Ptr) {
if (!this->emitCheckNonNullArg(ArgT, Arg))
@@ -3157,7 +3165,7 @@ bool Compiler<Emitter>::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
template <class Emitter>
bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
QualType T = E->getType();
- assert(!classify(T));
+ assert(!canClassify(T));
if (T->isRecordType()) {
const CXXConstructorDecl *Ctor = E->getConstructor();
@@ -4152,7 +4160,7 @@ template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) {
// Create local variable to hold the return value.
if (!E->isGLValue() && !E->getType()->isAnyComplexType() &&
- !classify(E->getType())) {
+ !canClassify(E->getType())) {
std::optional<unsigned> LocalIndex = allocateLocal(E);
if (!LocalIndex)
return false;
@@ -4172,7 +4180,7 @@ template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) {
template <class Emitter>
bool Compiler<Emitter>::visitInitializer(const Expr *E) {
- assert(!classify(E->getType()));
+ assert(!canClassify(E->getType()));
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
/*NewInitializing=*/true);
@@ -4379,7 +4387,7 @@ bool Compiler<Emitter>::visitZeroArrayInitializer(QualType T, const Expr *E) {
template <class Emitter>
bool Compiler<Emitter>::visitAssignment(const Expr *LHS, const Expr *RHS,
const Expr *E) {
- if (!classify(E->getType()))
+ if (!canClassify(E->getType()))
return false;
if (!this->visit(RHS))
@@ -4491,14 +4499,6 @@ template <class Emitter>
unsigned Compiler<Emitter>::allocateLocalPrimitive(
DeclTy &&Src, PrimType Ty, bool IsConst, const ValueDecl *ExtendingDecl,
ScopeKind SC, bool IsConstexprUnknown) {
- // Make sure we don't accidentally register the same decl twice.
- if (const auto *VD =
- dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- assert(!P.getGlobal(VD));
- assert(!Locals.contains(VD));
- (void)VD;
- }
-
// FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
// (int){12} in C. Consider using Expr::isTemporaryObject() instead
// or isa<MaterializeTemporaryExpr>().
@@ -4520,19 +4520,11 @@ std::optional<unsigned>
Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
const ValueDecl *ExtendingDecl, ScopeKind SC,
bool IsConstexprUnknown) {
- // Make sure we don't accidentally register the same decl twice.
- if ([[maybe_unused]] const auto *VD =
- dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- assert(!P.getGlobal(VD));
- assert(!Locals.contains(VD));
- }
-
const ValueDecl *Key = nullptr;
const Expr *Init = nullptr;
bool IsTemporary = false;
if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
Key = VD;
- Ty = VD->getType();
if (const auto *VarD = dyn_cast<VarDecl>(VD))
Init = VarD->getInit();
@@ -4592,7 +4584,7 @@ const RecordType *Compiler<Emitter>::getRecordTy(QualType Ty) {
template <class Emitter> Record *Compiler<Emitter>::getRecord(QualType Ty) {
if (const auto *RecordTy = getRecordTy(Ty))
- return getRecord(RecordTy->getDecl());
+ return getRecord(RecordTy->getOriginalDecl()->getDefinitionOrSelf());
return nullptr;
}
@@ -5169,7 +5161,8 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
return false;
}
- if (!this->visitCallArgs(Args, FuncDecl, IsAssignmentOperatorCall))
+ if (!this->visitCallArgs(Args, FuncDecl, IsAssignmentOperatorCall,
+ isa<CXXOperatorCallExpr>(E)))
return false;
// Undo the argument reversal we did earlier.
@@ -7142,10 +7135,6 @@ bool Compiler<Emitter>::emitDestruction(const Descriptor *Desc,
assert(!Desc->isPrimitive());
assert(!Desc->isPrimitiveArray());
- // Can happen if the decl is invalid.
- if (Desc->isDummy())
- return true;
-
// Arrays.
if (Desc->isArray()) {
const Descriptor *ElemDesc = Desc->ElemDesc;
diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h
index ee8327d..901934e 100644
--- a/clang/lib/AST/ByteCode/Compiler.h
+++ b/clang/lib/AST/ByteCode/Compiler.h
@@ -256,6 +256,8 @@ protected:
OptPrimType classify(const Expr *E) const { return Ctx.classify(E); }
OptPrimType classify(QualType Ty) const { return Ctx.classify(Ty); }
+ bool canClassify(const Expr *E) const { return Ctx.canClassify(E); }
+ bool canClassify(QualType T) const { return Ctx.canClassify(T); }
/// Classifies a known primitive type.
PrimType classifyPrim(QualType Ty) const {
@@ -304,7 +306,7 @@ protected:
bool visitArrayElemInit(unsigned ElemIndex, const Expr *Init,
OptPrimType InitT);
bool visitCallArgs(ArrayRef<const Expr *> Args, const FunctionDecl *FuncDecl,
- bool Activate);
+ bool Activate, bool IsOperatorCall);
/// Creates a local primitive value.
unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst,
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index f7f528c..6343b2a 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -365,7 +365,7 @@ OptPrimType Context::classify(QualType T) const {
}
if (const auto *ET = T->getAs<EnumType>()) {
- const auto *D = ET->getDecl();
+ const auto *D = ET->getOriginalDecl()->getDefinitionOrSelf();
if (!D->isComplete())
return std::nullopt;
return classify(D->getIntegerType());
@@ -501,7 +501,7 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
// elsewhere in the code.
QualType Ty = FuncDecl->getReturnType();
bool HasRVO = false;
- if (!Ty->isVoidType() && !classify(Ty)) {
+ if (!Ty->isVoidType() && !canClassify(Ty)) {
HasRVO = true;
ParamTypes.push_back(PT_Ptr);
ParamOffsets.push_back(ParamOffset);
diff --git a/clang/lib/AST/ByteCode/Context.h b/clang/lib/AST/ByteCode/Context.h
index 1c084ac..a6d90bb 100644
--- a/clang/lib/AST/ByteCode/Context.h
+++ b/clang/lib/AST/ByteCode/Context.h
@@ -93,6 +93,25 @@ public:
return classify(E->getType());
}
+ bool canClassify(QualType T) {
+ if (const auto *BT = dyn_cast<BuiltinType>(T)) {
+ if (BT->isInteger() || BT->isFloatingPoint())
+ return true;
+ if (BT->getKind() == BuiltinType::Bool)
+ return true;
+ }
+
+ if (T->isArrayType() || T->isRecordType() || T->isAnyComplexType() ||
+ T->isVectorType())
+ return false;
+ return classify(T) != std::nullopt;
+ }
+ bool canClassify(const Expr *E) {
+ if (E->isGLValue())
+ return true;
+ return canClassify(E->getType());
+ }
+
const CXXMethodDecl *
getOverridingFunction(const CXXRecordDecl *DynamicDecl,
const CXXRecordDecl *StaticDecl,
diff --git a/clang/lib/AST/ByteCode/Descriptor.cpp b/clang/lib/AST/ByteCode/Descriptor.cpp
index 629c1ff..234fa2c 100644
--- a/clang/lib/AST/ByteCode/Descriptor.cpp
+++ b/clang/lib/AST/ByteCode/Descriptor.cpp
@@ -367,7 +367,7 @@ Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD,
Descriptor::Descriptor(const DeclTy &D, MetadataSize MD)
: Source(D), ElemSize(1), Size(1), MDSize(MD.value_or(0)),
AllocSize(MDSize), ElemRecord(nullptr), IsConst(true), IsMutable(false),
- IsTemporary(false), IsDummy(true) {
+ IsTemporary(false) {
assert(Source && "Missing source");
}
@@ -377,12 +377,14 @@ QualType Descriptor::getType() const {
if (const auto *D = asValueDecl())
return D->getType();
if (const auto *T = dyn_cast_if_present<TypeDecl>(asDecl()))
- return QualType(T->getTypeForDecl(), 0);
+ return T->getASTContext().getTypeDeclType(T);
// The Source sometimes has a different type than the once
// we really save. Try to consult the Record first.
- if (isRecord())
- return QualType(ElemRecord->getDecl()->getTypeForDecl(), 0);
+ if (isRecord()) {
+ const RecordDecl *RD = ElemRecord->getDecl();
+ return RD->getASTContext().getCanonicalTagType(RD);
+ }
if (const auto *E = asExpr())
return E->getType();
llvm_unreachable("Invalid descriptor type");
@@ -453,7 +455,7 @@ SourceInfo Descriptor::getLoc() const {
}
bool Descriptor::hasTrivialDtor() const {
- if (isPrimitive() || isPrimitiveArray() || isDummy())
+ if (isPrimitive() || isPrimitiveArray())
return true;
if (isRecord()) {
@@ -462,8 +464,9 @@ bool Descriptor::hasTrivialDtor() const {
return !Dtor || Dtor->isTrivial();
}
+ if (!ElemDesc)
+ return true;
// Composite arrays.
- assert(ElemDesc);
return ElemDesc->hasTrivialDtor();
}
diff --git a/clang/lib/AST/ByteCode/Descriptor.h b/clang/lib/AST/ByteCode/Descriptor.h
index cd34e11..4a808c0 100644
--- a/clang/lib/AST/ByteCode/Descriptor.h
+++ b/clang/lib/AST/ByteCode/Descriptor.h
@@ -166,8 +166,6 @@ public:
const bool IsVolatile = false;
/// Flag indicating if the block is an array.
const bool IsArray = false;
- /// Flag indicating if this is a dummy descriptor.
- bool IsDummy = false;
bool IsConstexprUnknown = false;
/// Storage management methods.
@@ -203,9 +201,6 @@ public:
/// Allocates a dummy descriptor.
Descriptor(const DeclTy &D, MetadataSize MD = std::nullopt);
- /// Make this descriptor a dummy descriptor.
- void makeDummy() { IsDummy = true; }
-
QualType getType() const;
QualType getElemQualType() const;
QualType getDataType(const ASTContext &Ctx) const;
@@ -273,8 +268,6 @@ public:
bool isRecord() const { return !IsArray && ElemRecord; }
/// Checks if the descriptor is of a union.
bool isUnion() const;
- /// Checks if this is a dummy descriptor.
- bool isDummy() const { return IsDummy; }
/// Whether variables of this descriptor need their destructor called or not.
bool hasTrivialDtor() const;
diff --git a/clang/lib/AST/ByteCode/Disasm.cpp b/clang/lib/AST/ByteCode/Disasm.cpp
index 5049a65..8eb785d 100644
--- a/clang/lib/AST/ByteCode/Disasm.cpp
+++ b/clang/lib/AST/ByteCode/Disasm.cpp
@@ -338,7 +338,7 @@ LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
}
OS << "\n";
- if (GP.isInitialized() && Desc->isPrimitive() && !Desc->isDummy()) {
+ if (GP.isInitialized() && Desc->isPrimitive() && !G->block()->isDummy()) {
OS << " ";
{
ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_CYAN, false});
@@ -394,8 +394,6 @@ LLVM_DUMP_METHOD void Descriptor::dump(llvm::raw_ostream &OS) const {
else if (isUnknownSizeArray())
OS << " unknown-size-array";
- if (isDummy())
- OS << " dummy";
if (IsConstexprUnknown)
OS << " constexpr-unknown";
}
@@ -541,11 +539,12 @@ LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const {
else
OS << "-\n";
OS << " Pointers: " << NPointers << "\n";
- OS << " Dead: " << IsDead << "\n";
+ OS << " Dead: " << isDead() << "\n";
OS << " Static: " << IsStatic << "\n";
- OS << " Extern: " << IsExtern << "\n";
+ OS << " Extern: " << isExtern() << "\n";
OS << " Initialized: " << IsInitialized << "\n";
- OS << " Weak: " << IsWeak << "\n";
+ OS << " Weak: " << isWeak() << "\n";
+ OS << " Dummy: " << isDummy() << '\n';
OS << " Dynamic: " << IsDynamic << "\n";
}
diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.cpp b/clang/lib/AST/ByteCode/DynamicAllocator.cpp
index 9b8b664..2d62ce7 100644
--- a/clang/lib/AST/ByteCode/DynamicAllocator.cpp
+++ b/clang/lib/AST/ByteCode/DynamicAllocator.cpp
@@ -13,25 +13,6 @@
using namespace clang;
using namespace clang::interp;
-// FIXME: There is a peculiar problem with the way we track pointers
-// to blocks and the way we allocate dynamic memory.
-//
-// When we have code like this:
-// while (true) {
-// char *buffer = new char[1024];
-// delete[] buffer;
-// }
-//
-// We have a local variable 'buffer' pointing to the heap allocated memory.
-// When deallocating the memory via delete[], that local variable still
-// points to the memory, which means we will create a DeadBlock for it and move
-// it over to that block, essentially duplicating the allocation. Moving
-// the data is also slow.
-//
-// However, when we actually try to access the allocation after it has been
-// freed, we need the block to still exist (alive or dead) so we can tell
-// that it's a dynamic allocation.
-
DynamicAllocator::~DynamicAllocator() { cleanup(); }
void DynamicAllocator::cleanup() {
@@ -42,8 +23,11 @@ void DynamicAllocator::cleanup() {
for (auto &Iter : AllocationSites) {
auto &AllocSite = Iter.second;
for (auto &Alloc : AllocSite.Allocations) {
- Block *B = reinterpret_cast<Block *>(Alloc.Memory.get());
+ Block *B = Alloc.block();
+ assert(!B->isDead());
+ assert(B->isInitialized());
B->invokeDtor();
+
if (B->hasPointers()) {
while (B->Pointers) {
Pointer *Next = B->Pointers->asBlockPointer().Next;
@@ -89,6 +73,12 @@ Block *DynamicAllocator::allocate(const Descriptor *D, unsigned EvalID,
assert(D);
assert(D->asExpr());
+ // Garbage collection. Remove all dead allocations that don't have pointers to
+ // them anymore.
+ llvm::erase_if(DeadAllocations, [](Allocation &Alloc) -> bool {
+ return !Alloc.block()->hasPointers();
+ });
+
auto Memory =
std::make_unique<std::byte[]>(sizeof(Block) + D->getAllocSize());
auto *B = new (Memory.get()) Block(EvalID, D, /*isStatic=*/false);
@@ -128,23 +118,39 @@ bool DynamicAllocator::deallocate(const Expr *Source,
return false;
auto &Site = It->second;
- assert(Site.size() > 0);
+ assert(!Site.empty());
// Find the Block to delete.
- auto AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) {
- const Block *B = reinterpret_cast<const Block *>(A.Memory.get());
- return BlockToDelete == B;
+ auto *AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) {
+ return BlockToDelete == A.block();
});
assert(AllocIt != Site.Allocations.end());
- Block *B = reinterpret_cast<Block *>(AllocIt->Memory.get());
+ Block *B = AllocIt->block();
+ assert(B->isInitialized());
+ assert(!B->isDead());
B->invokeDtor();
- S.deallocate(B);
- Site.Allocations.erase(AllocIt);
+ // Almost all our dynamic allocations have a pointer pointing to them
+ // when we deallocate them, since otherwise we can't call delete() at all.
+ // This means that we would usually need to create DeadBlocks for all of them.
+ // To work around that, we instead mark them as dead without moving the data
+ // over to a DeadBlock and simply keep the block in a separate DeadAllocations
+ // list.
+ if (B->hasPointers()) {
+ B->AccessFlags |= Block::DeadFlag;
+ DeadAllocations.push_back(std::move(*AllocIt));
+ Site.Allocations.erase(AllocIt);
+
+ if (Site.size() == 0)
+ AllocationSites.erase(It);
+ return true;
+ }
- if (Site.size() == 0)
+ // Get rid of the allocation altogether.
+ Site.Allocations.erase(AllocIt);
+ if (Site.empty())
AllocationSites.erase(It);
return true;
diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.h b/clang/lib/AST/ByteCode/DynamicAllocator.h
index cff09bf..31d0e58 100644
--- a/clang/lib/AST/ByteCode/DynamicAllocator.h
+++ b/clang/lib/AST/ByteCode/DynamicAllocator.h
@@ -43,6 +43,7 @@ private:
std::unique_ptr<std::byte[]> Memory;
Allocation(std::unique_ptr<std::byte[]> Memory)
: Memory(std::move(Memory)) {}
+ Block *block() const { return reinterpret_cast<Block *>(Memory.get()); }
};
struct AllocationSite {
@@ -55,6 +56,7 @@ private:
}
size_t size() const { return Allocations.size(); }
+ bool empty() const { return Allocations.empty(); }
};
public:
@@ -65,8 +67,6 @@ public:
void cleanup();
- unsigned getNumAllocations() const { return AllocationSites.size(); }
-
/// Allocate ONE element of the given descriptor.
Block *allocate(const Descriptor *D, unsigned EvalID, Form AllocForm);
/// Allocate \p NumElements primitive elements of the given type.
@@ -96,8 +96,13 @@ public:
return llvm::make_range(AllocationSites.begin(), AllocationSites.end());
}
+ bool hasAllocations() const { return !AllocationSites.empty(); }
+
private:
llvm::DenseMap<const Expr *, AllocationSite> AllocationSites;
+ // Allocations that have already been deallocated but had pointers
+ // to them.
+ llvm::SmallVector<Allocation> DeadAllocations;
using PoolAllocTy = llvm::BumpPtrAllocator;
PoolAllocTy DescAllocator;
diff --git a/clang/lib/AST/ByteCode/EvalEmitter.cpp b/clang/lib/AST/ByteCode/EvalEmitter.cpp
index 976b7c0..68f46c1 100644
--- a/clang/lib/AST/ByteCode/EvalEmitter.cpp
+++ b/clang/lib/AST/ByteCode/EvalEmitter.cpp
@@ -98,10 +98,7 @@ bool EvalEmitter::interpretCall(const FunctionDecl *FD, const Expr *E) {
this->Params.insert({PD, {0, false}});
}
- if (!this->visit(E))
- return false;
- PrimType T = Ctx.classify(E).value_or(PT_Ptr);
- return this->emitPop(T, E);
+ return this->visitExpr(E, /*DestroyToplevelScope=*/false);
}
void EvalEmitter::emitLabel(LabelTy Label) { CurrentLabel = Label; }
@@ -292,7 +289,7 @@ bool EvalEmitter::emitGetLocal(uint32_t I, const SourceInfo &Info) {
Block *B = getLocal(I);
- if (!CheckLocalLoad(S, OpPC, Pointer(B)))
+ if (!CheckLocalLoad(S, OpPC, B))
return false;
S.Stk.push<T>(*reinterpret_cast<T *>(B->data()));
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index eb4e480..931d387 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -211,25 +211,26 @@ static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
S.Note(VD->getLocation(), diag::note_declared_at);
}
-static bool CheckTemporary(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+static bool CheckTemporary(InterpState &S, CodePtr OpPC, const Block *B,
AccessKinds AK) {
- if (auto ID = Ptr.getDeclID()) {
- if (!Ptr.isStaticTemporary())
+ if (B->getDeclID()) {
+ if (!(B->isStatic() && B->isTemporary()))
return true;
const auto *MTE = dyn_cast_if_present<MaterializeTemporaryExpr>(
- Ptr.getDeclDesc()->asExpr());
+ B->getDescriptor()->asExpr());
if (!MTE)
return true;
// FIXME(perf): Since we do this check on every Load from a static
// temporary, it might make sense to cache the value of the
// isUsableInConstantExpressions call.
- if (!MTE->isUsableInConstantExpressions(S.getASTContext()) &&
- Ptr.block()->getEvalID() != S.Ctx.getEvalID()) {
+ if (B->getEvalID() != S.Ctx.getEvalID() &&
+ !MTE->isUsableInConstantExpressions(S.getASTContext())) {
const SourceInfo &E = S.Current->getSource(OpPC);
S.FFDiag(E, diag::note_constexpr_access_static_temporary, 1) << AK;
- S.Note(Ptr.getDeclLoc(), diag::note_constexpr_temporary_here);
+ S.Note(B->getDescriptor()->getLocation(),
+ diag::note_constexpr_temporary_here);
return false;
}
}
@@ -517,7 +518,7 @@ bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
- if (!Ptr.isOnePastEnd())
+ if (!Ptr.isOnePastEnd() && !Ptr.isZeroSizeArray())
return true;
if (S.getLangOpts().CPlusPlus) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
@@ -658,17 +659,19 @@ static bool CheckVolatile(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
assert(Ptr.isLive());
+ assert(!Ptr.isInitialized());
+ return DiagnoseUninitialized(S, OpPC, Ptr.isExtern(), Ptr.getDeclDesc(), AK);
+}
- if (Ptr.isInitialized())
- return true;
-
- if (Ptr.isExtern() && S.checkingPotentialConstantExpression())
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, bool Extern,
+ const Descriptor *Desc, AccessKinds AK) {
+ if (Extern && S.checkingPotentialConstantExpression())
return false;
- if (const auto *VD = Ptr.getDeclDesc()->asVarDecl();
+ if (const auto *VD = Desc->asVarDecl();
VD && (VD->isConstexpr() || VD->hasGlobalStorage())) {
if (VD == S.EvaluatingDecl &&
@@ -703,9 +706,9 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-static bool CheckLifetime(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+static bool CheckLifetime(InterpState &S, CodePtr OpPC, Lifetime LT,
AccessKinds AK) {
- if (Ptr.getLifetime() == Lifetime::Started)
+ if (LT == Lifetime::Started)
return true;
if (!S.checkingPotentialConstantExpression()) {
@@ -715,11 +718,11 @@ static bool CheckLifetime(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-static bool CheckWeak(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!Ptr.isWeak())
+static bool CheckWeak(InterpState &S, CodePtr OpPC, const Block *B) {
+ if (!B->isWeak())
return true;
- const auto *VD = Ptr.getDeclDesc()->asVarDecl();
+ const auto *VD = B->getDescriptor()->asVarDecl();
assert(VD);
S.FFDiag(S.Current->getLocation(OpPC), diag::note_constexpr_var_init_weak)
<< VD;
@@ -732,57 +735,100 @@ static bool CheckWeak(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
// ones removed that are impossible on primitive global values.
// For example, since those can't be members of structs, they also can't
// be mutable.
-bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
- if (!CheckConstant(S, OpPC, Ptr))
- return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Read))
+bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B) {
+ const auto &Desc =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData());
+ if (!B->isAccessible()) {
+ if (!CheckExtern(S, OpPC, Pointer(const_cast<Block *>(B))))
+ return false;
+ if (!CheckDummy(S, OpPC, B, AK_Read))
+ return false;
+ return CheckWeak(S, OpPC, B);
+ }
+
+ if (!CheckConstant(S, OpPC, B->getDescriptor()))
return false;
- if (!CheckWeak(S, OpPC, Ptr))
+ if (Desc.InitState != GlobalInitState::Initialized)
+ return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(),
+ AK_Read);
+ if (!CheckTemporary(S, OpPC, B, AK_Read))
return false;
- if (!CheckVolatile(S, OpPC, Ptr, AK_Read))
+ if (B->getDescriptor()->IsVolatile) {
+ if (!S.getLangOpts().CPlusPlus)
+ return Invalid(S, OpPC);
+
+ const ValueDecl *D = B->getDescriptor()->asValueDecl();
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_constexpr_access_volatile_obj, 1)
+ << AK_Read << 1 << D;
+ S.Note(D->getLocation(), diag::note_constexpr_volatile_here) << 1;
return false;
+ }
return true;
}
// Similarly, for local loads.
-bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLifetime(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckVolatile(S, OpPC, Ptr, AK_Read))
+bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B) {
+ assert(!B->isExtern());
+ const auto &Desc = *reinterpret_cast<const InlineDescriptor *>(B->rawData());
+ if (!CheckLifetime(S, OpPC, Desc.LifeState, AK_Read))
+ return false;
+ if (!Desc.IsInitialized)
+ return DiagnoseUninitialized(S, OpPC, /*Extern=*/false, B->getDescriptor(),
+ AK_Read);
+ if (B->getDescriptor()->IsVolatile) {
+ if (!S.getLangOpts().CPlusPlus)
+ return Invalid(S, OpPC);
+
+ const ValueDecl *D = B->getDescriptor()->asValueDecl();
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_constexpr_access_volatile_obj, 1)
+ << AK_Read << 1 << D;
+ S.Note(D->getLocation(), diag::note_constexpr_volatile_here) << 1;
return false;
+ }
return true;
}
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
- if (!CheckLive(S, OpPC, Ptr, AK))
- return false;
- if (!CheckExtern(S, OpPC, Ptr))
+ if (!Ptr.isBlockPointer()) {
+ if (Ptr.isZero()) {
+ const auto &Src = S.Current->getSource(OpPC);
+
+ if (Ptr.isField())
+ S.FFDiag(Src, diag::note_constexpr_null_subobject) << CSK_Field;
+ else
+ S.FFDiag(Src, diag::note_constexpr_access_null) << AK;
+ }
return false;
+ }
+
+ // Block pointers are the only ones we can actually read from.
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckLive(S, OpPC, Ptr, AK))
+ return false;
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckDummy(S, OpPC, Ptr.block(), AK))
+ return false;
+ if (!CheckWeak(S, OpPC, Ptr.block()))
+ return false;
+ }
+
if (!CheckConstant(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr, AK))
- return false;
if (!CheckRange(S, OpPC, Ptr, AK))
return false;
if (!CheckActive(S, OpPC, Ptr, AK))
return false;
- if (!CheckLifetime(S, OpPC, Ptr, AK))
- return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK))
- return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK))
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK))
return false;
- if (!CheckWeak(S, OpPC, Ptr))
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK);
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK))
return false;
+
if (!CheckMutable(S, OpPC, Ptr))
return false;
if (!CheckVolatile(S, OpPC, Ptr, AK))
@@ -793,26 +839,39 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
/// This is not used by any of the opcodes directly. It's used by
/// EvalEmitter to do the final lvalue-to-rvalue conversion.
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLive(S, OpPC, Ptr, AK_Read))
+ if (!Ptr.isBlockPointer()) {
+ if (Ptr.isZero()) {
+ const auto &Src = S.Current->getSource(OpPC);
+
+ if (Ptr.isField())
+ S.FFDiag(Src, diag::note_constexpr_null_subobject) << CSK_Field;
+ else
+ S.FFDiag(Src, diag::note_constexpr_access_null) << AK_Read;
+ }
return false;
+ }
+
+ assert(Ptr.isBlockPointer());
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckLive(S, OpPC, Ptr, AK_Read))
+ return false;
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
+ return false;
+ return CheckWeak(S, OpPC, Ptr.block());
+ }
+
if (!CheckConstant(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
- if (!CheckRange(S, OpPC, Ptr, AK_Read))
- return false;
if (!CheckActive(S, OpPC, Ptr, AK_Read))
return false;
- if (!CheckLifetime(S, OpPC, Ptr, AK_Read))
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Read))
return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Read))
- return false;
- if (!CheckWeak(S, OpPC, Ptr))
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK_Read);
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Read))
return false;
if (!CheckMutable(S, OpPC, Ptr))
return false;
@@ -820,13 +879,17 @@ bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
}
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLive(S, OpPC, Ptr, AK_Assign))
- return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Assign))
- return false;
- if (!CheckLifetime(S, OpPC, Ptr, AK_Assign))
+ if (!Ptr.isBlockPointer())
return false;
- if (!CheckExtern(S, OpPC, Ptr))
+
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckLive(S, OpPC, Ptr, AK_Assign))
+ return false;
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ return CheckDummy(S, OpPC, Ptr.block(), AK_Assign);
+ }
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Assign))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Assign))
return false;
@@ -1098,13 +1161,11 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
return diagnoseUnknownDecl(S, OpPC, D);
}
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
- if (!Ptr.isDummy())
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK) {
+ if (!B->isDummy())
return true;
- const Descriptor *Desc = Ptr.getDeclDesc();
- const ValueDecl *D = Desc->asValueDecl();
+ const ValueDecl *D = B->getDescriptor()->asValueDecl();
if (!D)
return false;
@@ -1426,7 +1487,7 @@ static bool checkConstructor(InterpState &S, CodePtr OpPC, const Function *Func,
bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_Destroy))
return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Destroy))
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Destroy))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Destroy))
return false;
@@ -1620,8 +1681,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl());
const auto *InitialFunction = cast<CXXMethodDecl>(Callee);
- const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction(
- DynamicDecl, StaticDecl, InitialFunction);
+ const CXXMethodDecl *Overrider;
+
+ if (StaticDecl != DynamicDecl) {
+ if (!DynamicDecl->isDerivedFrom(StaticDecl))
+ return false;
+ Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl,
+ InitialFunction);
+
+ } else {
+ Overrider = InitialFunction;
+ }
if (Overrider != InitialFunction) {
// DR1872: An instantiated virtual constexpr function can't be called in a
@@ -1749,7 +1819,7 @@ static void startLifetimeRecurse(const Pointer &Ptr) {
bool StartLifetime(InterpState &S, CodePtr OpPC) {
const auto &Ptr = S.Stk.peek<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr, AK_Destroy))
+ if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
startLifetimeRecurse(Ptr.narrow());
return true;
@@ -1780,7 +1850,7 @@ static void endLifetimeRecurse(const Pointer &Ptr) {
/// Ends the lifetime of the peek'd pointer.
bool EndLifetime(InterpState &S, CodePtr OpPC) {
const auto &Ptr = S.Stk.peek<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr, AK_Destroy))
+ if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
endLifetimeRecurse(Ptr.narrow());
return true;
@@ -1789,7 +1859,7 @@ bool EndLifetime(InterpState &S, CodePtr OpPC) {
/// Ends the lifetime of the pop'd pointer.
bool EndLifetimePop(InterpState &S, CodePtr OpPC) {
const auto &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr, AK_Destroy))
+ if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
endLifetimeRecurse(Ptr.narrow());
return true;
@@ -1802,26 +1872,32 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion())
Ptr.activate();
+ if (!Ptr.isBlockPointer())
+ return false;
+
// Similar to CheckStore(), but with the additional CheckTemporary() call and
// the AccessKinds are different.
- if (!CheckTemporary(S, OpPC, Ptr, AK_Construct))
- return false;
- if (!CheckLive(S, OpPC, Ptr, AK_Construct))
- return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Construct))
+
+ if (!Ptr.block()->isAccessible()) {
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckLive(S, OpPC, Ptr, AK_Construct))
+ return false;
+ return CheckDummy(S, OpPC, Ptr.block(), AK_Construct);
+ }
+ if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Construct))
return false;
// CheckLifetime for this and all base pointers.
for (Pointer P = Ptr;;) {
- if (!CheckLifetime(S, OpPC, P, AK_Construct))
+ if (!CheckLifetime(S, OpPC, P.getLifetime(), AK_Construct))
return false;
if (P.isRoot())
break;
P = P.getBase();
}
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
+
if (!CheckRange(S, OpPC, Ptr, AK_Construct))
return false;
if (!CheckGlobal(S, OpPC, Ptr))
@@ -2011,7 +2087,7 @@ bool GetTypeidPtr(InterpState &S, CodePtr OpPC, const Type *TypeInfoType) {
return false;
// Pick the most-derived type.
- const Type *T = P.getDeclPtr().getType().getTypePtr();
+ CanQualType T = P.getDeclPtr().getType()->getCanonicalTypeUnqualified();
// ... unless we're currently constructing this object.
// FIXME: We have a similar check to this in more places.
if (S.Current->getFunction()) {
@@ -2019,14 +2095,14 @@ bool GetTypeidPtr(InterpState &S, CodePtr OpPC, const Type *TypeInfoType) {
if (const Function *Func = Frame->getFunction();
Func && (Func->isConstructor() || Func->isDestructor()) &&
P.block() == Frame->getThis().block()) {
- T = Func->getParentDecl()->getTypeForDecl();
+ T = S.getContext().getASTContext().getCanonicalTagType(
+ Func->getParentDecl());
break;
}
}
}
- S.Stk.push<Pointer>(T->getCanonicalTypeUnqualified().getTypePtr(),
- TypeInfoType);
+ S.Stk.push<Pointer>(T->getTypePtr(), TypeInfoType);
return true;
}
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 8a28106..75901e5 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -51,8 +51,7 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK);
/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -89,11 +88,14 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK = AK_Read);
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, bool Extern,
+ const Descriptor *Desc, AccessKinds AK);
+
/// Checks a direct load of a primitive value from a global or local variable.
-bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B);
+bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B);
/// Checks if a value can be stored in a block.
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -1351,10 +1353,10 @@ inline bool ConstFloat(InterpState &S, CodePtr OpPC, const Floating &F) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Pointer &Ptr = S.Current->getLocalPointer(I);
- if (!CheckLocalLoad(S, OpPC, Ptr))
+ const Block *B = S.Current->getLocalBlock(I);
+ if (!CheckLocalLoad(S, OpPC, B))
return false;
- S.Stk.push<T>(Ptr.deref<T>());
+ S.Stk.push<T>(B->deref<T>());
return true;
}
@@ -1465,22 +1467,26 @@ bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Pointer &Ptr = S.P.getPtrGlobal(I);
+ const Block *B = S.P.getGlobal(I);
- if (!CheckGlobalLoad(S, OpPC, Ptr))
+ if (!CheckGlobalLoad(S, OpPC, B))
return false;
- S.Stk.push<T>(Ptr.deref<T>());
+ S.Stk.push<T>(B->deref<T>());
return true;
}
/// Same as GetGlobal, but without the checks.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Pointer &Ptr = S.P.getPtrGlobal(I);
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
- S.Stk.push<T>(Ptr.deref<T>());
+ const Block *B = S.P.getGlobal(I);
+ const auto &Desc =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData());
+ if (Desc.InitState != GlobalInitState::Initialized)
+ return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(),
+ AK_Read);
+
+ S.Stk.push<T>(B->deref<T>());
return true;
}
@@ -1764,10 +1770,7 @@ inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off,
const Record *TargetRecord = Ptr.atFieldSub(Off).getRecord();
assert(TargetRecord);
- if (TargetRecord->getDecl()
- ->getTypeForDecl()
- ->getAsCXXRecordDecl()
- ->getCanonicalDecl() !=
+ if (TargetRecord->getDecl()->getCanonicalDecl() !=
TargetType->getAsCXXRecordDecl()->getCanonicalDecl()) {
QualType MostDerivedType = Ptr.getDeclDesc()->getType();
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_downcast)
@@ -2351,8 +2354,8 @@ static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC,
static inline bool IncPtr(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
- return false;
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK_Increment);
return IncDecPtrHelper<ArithOp::Add>(S, OpPC, Ptr);
}
@@ -2360,8 +2363,8 @@ static inline bool IncPtr(InterpState &S, CodePtr OpPC) {
static inline bool DecPtr(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
- return false;
+ if (!Ptr.isInitialized())
+ return DiagnoseUninitialized(S, OpPC, Ptr, AK_Decrement);
return IncDecPtrHelper<ArithOp::Sub>(S, OpPC, Ptr);
}
@@ -3195,6 +3198,9 @@ inline bool GetMemberPtr(InterpState &S, CodePtr OpPC, const ValueDecl *D) {
inline bool GetMemberPtrBase(InterpState &S, CodePtr OpPC) {
const auto &MP = S.Stk.pop<MemberPointer>();
+ if (!MP.isBaseCastPossible())
+ return false;
+
S.Stk.push<Pointer>(MP.getBase());
return true;
}
diff --git a/clang/lib/AST/ByteCode/InterpBlock.cpp b/clang/lib/AST/ByteCode/InterpBlock.cpp
index 963b54e..69221d8 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.cpp
+++ b/clang/lib/AST/ByteCode/InterpBlock.cpp
@@ -64,7 +64,7 @@ void Block::removePointer(Pointer *P) {
}
void Block::cleanup() {
- if (Pointers == nullptr && IsDead)
+ if (Pointers == nullptr && !IsDynamic && isDead())
(reinterpret_cast<DeadBlock *>(this + 1) - 1)->free();
}
@@ -113,8 +113,8 @@ bool Block::hasPointer(const Pointer *P) const {
#endif
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
- : Root(Root), B(~0u, Blk->Desc, Blk->IsStatic, Blk->IsExtern, Blk->IsWeak,
- /*isDead=*/true) {
+ : Root(Root), B(~0u, Blk->Desc, Blk->isExtern(), Blk->IsStatic,
+ Blk->isWeak(), Blk->isDummy(), /*IsDead=*/true) {
// Add the block to the chain of dead blocks.
if (Root)
Root->Prev = this;
@@ -133,8 +133,7 @@ DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
}
void DeadBlock::free() {
- if (B.IsInitialized)
- B.invokeDtor();
+ assert(!B.isInitialized());
if (Prev)
Prev->Next = Next;
diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h
index 5162223..7ded1e8 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.h
+++ b/clang/lib/AST/ByteCode/InterpBlock.h
@@ -42,21 +42,32 @@ enum PrimType : unsigned;
/// the data size and the metadata size.
///
class Block final {
+private:
+ static constexpr uint8_t ExternFlag = 1 << 0;
+ static constexpr uint8_t DeadFlag = 1 << 1;
+ static constexpr uint8_t WeakFlag = 1 << 2;
+ static constexpr uint8_t DummyFlag = 1 << 3;
+
public:
/// Creates a new block.
Block(unsigned EvalID, const std::optional<unsigned> &DeclID,
const Descriptor *Desc, bool IsStatic = false, bool IsExtern = false,
- bool IsWeak = false)
- : EvalID(EvalID), DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern),
- IsDynamic(false), IsWeak(IsWeak), Desc(Desc) {
+ bool IsWeak = false, bool IsDummy = false)
+ : Desc(Desc), DeclID(DeclID), EvalID(EvalID), IsStatic(IsStatic) {
assert(Desc);
+ AccessFlags |= (ExternFlag * IsExtern);
+ AccessFlags |= (WeakFlag * IsWeak);
+ AccessFlags |= (DummyFlag * IsDummy);
}
Block(unsigned EvalID, const Descriptor *Desc, bool IsStatic = false,
- bool IsExtern = false, bool IsWeak = false)
- : EvalID(EvalID), DeclID((unsigned)-1), IsStatic(IsStatic),
- IsExtern(IsExtern), IsDynamic(false), IsWeak(IsWeak), Desc(Desc) {
+ bool IsExtern = false, bool IsWeak = false, bool IsDummy = false)
+ : Desc(Desc), DeclID((unsigned)-1), EvalID(EvalID), IsStatic(IsStatic),
+ IsDynamic(false) {
assert(Desc);
+ AccessFlags |= (ExternFlag * IsExtern);
+ AccessFlags |= (WeakFlag * IsWeak);
+ AccessFlags |= (DummyFlag * IsDummy);
}
/// Returns the block's descriptor.
@@ -64,13 +75,15 @@ public:
/// Checks if the block has any live pointers.
bool hasPointers() const { return Pointers; }
/// Checks if the block is extern.
- bool isExtern() const { return IsExtern; }
+ bool isExtern() const { return AccessFlags & ExternFlag; }
/// Checks if the block has static storage duration.
bool isStatic() const { return IsStatic; }
/// Checks if the block is temporary.
bool isTemporary() const { return Desc->IsTemporary; }
- bool isWeak() const { return IsWeak; }
+ bool isWeak() const { return AccessFlags & WeakFlag; }
bool isDynamic() const { return IsDynamic; }
+ bool isDummy() const { return AccessFlags & DummyFlag; }
+ bool isDead() const { return AccessFlags & DeadFlag; }
/// Returns the size of the block.
unsigned getSize() const { return Desc->getAllocSize(); }
/// Returns the declaration ID.
@@ -103,6 +116,10 @@ public:
return reinterpret_cast<const std::byte *>(this) + sizeof(Block);
}
+ template <typename T> T deref() const {
+ return *reinterpret_cast<const T *>(data());
+ }
+
/// Invokes the constructor.
void invokeCtor() {
assert(!IsInitialized);
@@ -126,6 +143,8 @@ public:
void dump() const { dump(llvm::errs()); }
void dump(llvm::raw_ostream &OS) const;
+ bool isAccessible() const { return AccessFlags == 0; }
+
private:
friend class Pointer;
friend class DeadBlock;
@@ -133,10 +152,13 @@ private:
friend class DynamicAllocator;
Block(unsigned EvalID, const Descriptor *Desc, bool IsExtern, bool IsStatic,
- bool IsWeak, bool IsDead)
- : EvalID(EvalID), IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true),
- IsDynamic(false), IsWeak(IsWeak), Desc(Desc) {
+ bool IsWeak, bool IsDummy, bool IsDead)
+ : Desc(Desc), EvalID(EvalID), IsStatic(IsStatic) {
assert(Desc);
+ AccessFlags |= (ExternFlag * IsExtern);
+ AccessFlags |= (DeadFlag * IsDead);
+ AccessFlags |= (WeakFlag * IsWeak);
+ AccessFlags |= (DummyFlag * IsDummy);
}
/// Deletes a dead block at the end of its lifetime.
@@ -150,27 +172,23 @@ private:
bool hasPointer(const Pointer *P) const;
#endif
- const unsigned EvalID = ~0u;
+ /// Pointer to the stack slot descriptor.
+ const Descriptor *Desc;
/// Start of the chain of pointers.
Pointer *Pointers = nullptr;
/// Unique identifier of the declaration.
std::optional<unsigned> DeclID;
+ const unsigned EvalID = ~0u;
/// Flag indicating if the block has static storage duration.
bool IsStatic = false;
- /// Flag indicating if the block is an extern.
- bool IsExtern = false;
- /// Flag indicating if the pointer is dead. This is only ever
- /// set once, when converting the Block to a DeadBlock.
- bool IsDead = false;
/// Flag indicating if the block contents have been initialized
/// via invokeCtor.
bool IsInitialized = false;
/// Flag indicating if this block has been allocated via dynamic
/// memory allocation (e.g. malloc).
bool IsDynamic = false;
- bool IsWeak = false;
- /// Pointer to the stack slot descriptor.
- const Descriptor *Desc;
+ /// AccessFlags containing IsExtern, IsDead, IsWeak, and IsDummy bits.
+ uint8_t AccessFlags = 0;
};
/// Descriptor for a dead block.
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index f908d02..14f6929 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -276,7 +276,7 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
if (!CheckLive(S, OpPC, StrPtr, AK_Read))
return false;
- if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
+ if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
return false;
assert(StrPtr.getFieldDesc()->isPrimitiveArray());
@@ -1544,8 +1544,7 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
// Composite arrays
if (IsArray) {
const Descriptor *Desc =
- S.P.createDescriptor(NewCall, ElemType.getTypePtr(),
- IsArray ? std::nullopt : Descriptor::InlineDescMD);
+ S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
Block *B =
Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
DynamicAllocator::Form::Operator);
@@ -1558,9 +1557,8 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
QualType AllocType = S.getASTContext().getConstantArrayType(
ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
- const Descriptor *Desc =
- S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
- IsArray ? std::nullopt : Descriptor::InlineDescMD);
+ const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
+ Descriptor::InlineDescMD);
Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
DynamicAllocator::Form::Operator);
assert(B);
@@ -1785,6 +1783,13 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
return false;
QualType DestElemType = getElemType(DestPtr);
+ if (DestElemType->isIncompleteType()) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_ltor_incomplete_type)
+ << DestElemType;
+ return false;
+ }
+
size_t RemainingDestElems;
if (DestPtr.getFieldDesc()->isArray()) {
RemainingDestElems = DestPtr.isUnknownSizeArray()
@@ -2232,7 +2237,7 @@ static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
return false;
if (!CheckMutable(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr, AK_Read))
+ if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
return false;
}
@@ -2754,7 +2759,7 @@ bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isInvalidDecl())
return false;
const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
@@ -2787,7 +2792,7 @@ bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isInvalidDecl())
return false;
const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
@@ -2799,7 +2804,8 @@ bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
return false;
// Add the offset to the base.
- Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(
+ BaseRT->getOriginalDecl()->getDefinitionOrSelf()));
break;
}
case OffsetOfNode::Identifier:
diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp
index 9342192..18400b10 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.cpp
+++ b/clang/lib/AST/ByteCode/InterpFrame.cpp
@@ -169,7 +169,7 @@ void InterpFrame::describe(llvm::raw_ostream &OS) const {
} else if (const auto *M = dyn_cast<CXXMethodDecl>(F)) {
print(OS, This, S.getASTContext(),
S.getASTContext().getLValueReferenceType(
- S.getASTContext().getRecordType(M->getParent())));
+ S.getASTContext().getCanonicalTagType(M->getParent())));
OS << ".";
}
}
@@ -231,6 +231,10 @@ Pointer InterpFrame::getLocalPointer(unsigned Offset) const {
return Pointer(localBlock(Offset));
}
+Block *InterpFrame::getLocalBlock(unsigned Offset) const {
+ return localBlock(Offset);
+}
+
Pointer InterpFrame::getParamPointer(unsigned Off) {
// Return the block if it was created previously.
if (auto Pt = Params.find(Off); Pt != Params.end())
diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h
index cfebe93..4be5391 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.h
+++ b/clang/lib/AST/ByteCode/InterpFrame.h
@@ -86,6 +86,7 @@ public:
/// Returns a pointer to a local variables.
Pointer getLocalPointer(unsigned Offset) const;
+ Block *getLocalBlock(unsigned Offset) const;
/// Returns the value of an argument.
template <typename T> const T &getParam(unsigned Offset) const {
diff --git a/clang/lib/AST/ByteCode/InterpState.cpp b/clang/lib/AST/ByteCode/InterpState.cpp
index a06b125..b5f0f9a 100644
--- a/clang/lib/AST/ByteCode/InterpState.cpp
+++ b/clang/lib/AST/ByteCode/InterpState.cpp
@@ -76,8 +76,9 @@ bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
void InterpState::deallocate(Block *B) {
assert(B);
- const Descriptor *Desc = B->getDescriptor();
- assert(Desc);
+ assert(!B->isDynamic());
+ assert(!B->isStatic());
+ assert(!B->isDead());
// The block might have a pointer saved in a field in its data
// that points to the block itself. We call the dtor first,
@@ -87,6 +88,7 @@ void InterpState::deallocate(Block *B) {
if (B->IsInitialized)
B->invokeDtor();
+ assert(!B->isInitialized());
if (B->hasPointers()) {
size_t Size = B->getSize();
// Allocate a new block, transferring over pointers.
@@ -95,24 +97,20 @@ void InterpState::deallocate(Block *B) {
auto *D = new (Memory) DeadBlock(DeadBlocks, B);
// Since the block doesn't hold any actual data anymore, we can just
// memcpy() everything over.
- std::memcpy(D->rawData(), B->rawData(), Desc->getAllocSize());
- D->B.IsInitialized = B->IsInitialized;
-
- // We moved the contents over to the DeadBlock.
- B->IsInitialized = false;
+ std::memcpy(D->rawData(), B->rawData(), Size);
+ D->B.IsInitialized = false;
}
}
bool InterpState::maybeDiagnoseDanglingAllocations() {
- bool NoAllocationsLeft = (Alloc.getNumAllocations() == 0);
+ bool NoAllocationsLeft = !Alloc.hasAllocations();
if (!checkingPotentialConstantExpression()) {
- for (const auto &It : Alloc.allocation_sites()) {
- assert(It.second.size() > 0);
+ for (const auto &[Source, Site] : Alloc.allocation_sites()) {
+ assert(!Site.empty());
- const Expr *Source = It.first;
CCEDiag(Source->getExprLoc(), diag::note_constexpr_memory_leak)
- << (It.second.size() - 1) << Source->getSourceRange();
+ << (Site.size() - 1) << Source->getSourceRange();
}
}
// Keep evaluating before C++20, since the CXXNewExpr wasn't valid there
diff --git a/clang/lib/AST/ByteCode/MemberPointer.h b/clang/lib/AST/ByteCode/MemberPointer.h
index b17ce25..8dd75ca 100644
--- a/clang/lib/AST/ByteCode/MemberPointer.h
+++ b/clang/lib/AST/ByteCode/MemberPointer.h
@@ -51,6 +51,12 @@ public:
FunctionPointer toFunctionPointer(const Context &Ctx) const;
+ bool isBaseCastPossible() const {
+ if (PtrOffset < 0)
+ return true;
+ return static_cast<uint64_t>(PtrOffset) <= Base.getByteOffset();
+ }
+
Pointer getBase() const {
if (PtrOffset < 0)
return Base.atField(-PtrOffset);
diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp
index dec2088..4d70ae5 100644
--- a/clang/lib/AST/ByteCode/Pointer.cpp
+++ b/clang/lib/AST/ByteCode/Pointer.cpp
@@ -689,7 +689,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
assert(Record && "Missing record descriptor");
bool Ok = true;
- if (RT->getDecl()->isUnion()) {
+ if (RT->getOriginalDecl()->isUnion()) {
const FieldDecl *ActiveField = nullptr;
APValue Value;
for (const auto &F : Record->fields()) {
@@ -728,14 +728,15 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
for (unsigned I = 0; I < NB; ++I) {
const Record::Base *BD = Record->getBase(I);
- QualType BaseTy = Ctx.getASTContext().getRecordType(BD->Decl);
+ QualType BaseTy = Ctx.getASTContext().getCanonicalTagType(BD->Decl);
const Pointer &BP = Ptr.atField(BD->Offset);
Ok &= Composite(BaseTy, BP, R.getStructBase(I));
}
for (unsigned I = 0; I < NV; ++I) {
const Record::Base *VD = Record->getVirtualBase(I);
- QualType VirtBaseTy = Ctx.getASTContext().getRecordType(VD->Decl);
+ QualType VirtBaseTy =
+ Ctx.getASTContext().getCanonicalTagType(VD->Decl);
const Pointer &VP = Ptr.atField(VD->Offset);
Ok &= Composite(VirtBaseTy, VP, R.getStructBase(NB + I));
}
diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h
index 5bafc5b..94c83a0 100644
--- a/clang/lib/AST/ByteCode/Pointer.h
+++ b/clang/lib/AST/ByteCode/Pointer.h
@@ -282,7 +282,7 @@ public:
bool isLive() const {
if (!isBlockPointer())
return true;
- return asBlockPointer().Pointee && !asBlockPointer().Pointee->IsDead;
+ return asBlockPointer().Pointee && !asBlockPointer().Pointee->isDead();
}
/// Checks if the item is a field in an object.
bool isField() const {
@@ -568,10 +568,9 @@ public:
if (!isBlockPointer())
return false;
- if (!asBlockPointer().Pointee)
- return false;
-
- return getDeclDesc()->isDummy();
+ if (const Block *Pointee = asBlockPointer().Pointee)
+ return Pointee->isDummy();
+ return false;
}
/// Checks if an object or a subfield is mutable.
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index 4daa4ab..2843b32 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -180,17 +180,15 @@ unsigned Program::getOrCreateDummy(const DeclTy &D) {
Desc = allocateDescriptor(D);
assert(Desc);
- Desc->makeDummy();
-
- assert(Desc->isDummy());
// Allocate a block for storage.
unsigned I = Globals.size();
auto *G = new (Allocator, Desc->getAllocSize())
Global(Ctx.getEvalID(), getCurrentDecl(), Desc, /*IsStatic=*/true,
- /*IsExtern=*/false, IsWeak);
+ /*IsExtern=*/false, IsWeak, /*IsDummy=*/true);
G->block()->invokeCtor();
+ assert(G->block()->isDummy());
Globals.push_back(G);
DummyVariables[D.getOpaqueValue()] = I;
@@ -325,7 +323,7 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
const auto *RT = Spec.getType()->getAs<RecordType>();
if (!RT)
return nullptr;
- const RecordDecl *BD = RT->getDecl();
+ const RecordDecl *BD = RT->getOriginalDecl()->getDefinitionOrSelf();
const Record *BR = getOrCreateRecord(BD);
const Descriptor *Desc = GetBaseDesc(BD, BR);
@@ -342,7 +340,7 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
if (!RT)
return nullptr;
- const RecordDecl *BD = RT->getDecl();
+ const RecordDecl *BD = RT->getOriginalDecl()->getDefinitionOrSelf();
const Record *BR = getOrCreateRecord(BD);
const Descriptor *Desc = GetBaseDesc(BD, BR);
@@ -399,7 +397,8 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
// Classes and structures.
if (const auto *RT = Ty->getAs<RecordType>()) {
- if (const auto *Record = getOrCreateRecord(RT->getDecl()))
+ if (const auto *Record =
+ getOrCreateRecord(RT->getOriginalDecl()->getDefinitionOrSelf()))
return allocateDescriptor(D, Record, MDSize, IsConst, IsTemporary,
IsMutable, IsVolatile);
return allocateDescriptor(D, MDSize);
diff --git a/clang/lib/AST/ByteCode/Record.cpp b/clang/lib/AST/ByteCode/Record.cpp
index 1d4ac71..a7934cc 100644
--- a/clang/lib/AST/ByteCode/Record.cpp
+++ b/clang/lib/AST/ByteCode/Record.cpp
@@ -51,7 +51,7 @@ const Record::Base *Record::getBase(const RecordDecl *FD) const {
const Record::Base *Record::getBase(QualType T) const {
if (auto *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
return BaseMap.lookup(RD);
}
return nullptr;
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
index f037616..e4b77ed 100644
--- a/clang/lib/AST/CXXInheritance.cpp
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -132,8 +132,8 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches) const {
if (!Ty)
return false;
- CXXRecordDecl *Base =
- cast_if_present<CXXRecordDecl>(Ty->getDecl()->getDefinition());
+ CXXRecordDecl *Base = cast_if_present<CXXRecordDecl>(
+ Ty->getOriginalDecl()->getDefinition());
if (!Base ||
(Base->isDependentContext() &&
!Base->isCurrentInstantiation(Record))) {
@@ -256,7 +256,8 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
BaseSpec.getType()->getAs<TemplateSpecializationType>();
if (!TST) {
if (auto *RT = BaseSpec.getType()->getAs<RecordType>())
- BaseRecord = cast<CXXRecordDecl>(RT->getDecl());
+ BaseRecord = cast<CXXRecordDecl>(RT->getOriginalDecl())
+ ->getDefinitionOrSelf();
} else {
TemplateName TN = TST->getTemplateName();
if (auto *TD =
@@ -336,7 +337,8 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback BaseMatches,
CXXRecordDecl *VBase = nullptr;
if (const RecordType *Record = PE.Base->getType()->getAs<RecordType>())
- VBase = cast<CXXRecordDecl>(Record->getDecl());
+ VBase = cast<CXXRecordDecl>(Record->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!VBase)
break;
@@ -348,7 +350,8 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback BaseMatches,
CXXRecordDecl *HidingClass = nullptr;
if (const RecordType *Record =
HidingP.back().Base->getType()->getAs<RecordType>())
- HidingClass = cast<CXXRecordDecl>(Record->getDecl());
+ HidingClass = cast<CXXRecordDecl>(Record->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!HidingClass)
break;
@@ -468,7 +471,8 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
for (const auto &Base : RD->bases()) {
if (const RecordType *RT = Base.getType()->getAs<RecordType>()) {
- const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (!BaseDecl->isPolymorphic())
continue;
diff --git a/clang/lib/AST/Comment.cpp b/clang/lib/AST/Comment.cpp
index cd73d27..37e21c3 100644
--- a/clang/lib/AST/Comment.cpp
+++ b/clang/lib/AST/Comment.cpp
@@ -147,8 +147,6 @@ static TypeLoc lookThroughTypedefOrTypeAliasLocs(TypeLoc &SrcTL) {
return BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
if (MemberPointerTypeLoc MemberPointerTL = TL.getAs<MemberPointerTypeLoc>())
return MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
- if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>())
- return ETL.getNamedTypeLoc();
return TL;
}
diff --git a/clang/lib/AST/CommentParser.cpp b/clang/lib/AST/CommentParser.cpp
index e61846d..2e5821a 100644
--- a/clang/lib/AST/CommentParser.cpp
+++ b/clang/lib/AST/CommentParser.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentParser.h"
+#include "clang/AST/Comment.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
@@ -569,6 +570,8 @@ BlockCommandComment *Parser::parseBlockCommand() {
InlineCommandComment *Parser::parseInlineCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
+ CommandMarkerKind CMK =
+ Tok.is(tok::backslash_command) ? CMK_Backslash : CMK_At;
const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
const Token CommandTok = Tok;
@@ -580,7 +583,7 @@ InlineCommandComment *Parser::parseInlineCommand() {
InlineCommandComment *IC = S.actOnInlineCommand(
CommandTok.getLocation(), CommandTok.getEndLocation(),
- CommandTok.getCommandID(), Args);
+ CommandTok.getCommandID(), CMK, Args);
if (Args.size() < Info->NumArgs) {
Diag(CommandTok.getEndLocation().getLocWithOffset(1),
diff --git a/clang/lib/AST/CommentSema.cpp b/clang/lib/AST/CommentSema.cpp
index 88520d7..649fba9 100644
--- a/clang/lib/AST/CommentSema.cpp
+++ b/clang/lib/AST/CommentSema.cpp
@@ -363,12 +363,13 @@ void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
InlineCommandComment *
Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd, unsigned CommandID,
+ CommandMarkerKind CommandMarker,
ArrayRef<Comment::Argument> Args) {
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
- return new (Allocator)
- InlineCommandComment(CommandLocBegin, CommandLocEnd, CommandID,
- getInlineCommandRenderKind(CommandName), Args);
+ return new (Allocator) InlineCommandComment(
+ CommandLocBegin, CommandLocEnd, CommandID,
+ getInlineCommandRenderKind(CommandName), CommandMarker, Args);
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
@@ -905,17 +906,9 @@ bool Sema::isClassOrStructOrTagTypedefDecl() {
if (isClassOrStructDeclImpl(ThisDeclInfo->CurrentDecl))
return true;
- if (auto *ThisTypedefDecl = dyn_cast<TypedefDecl>(ThisDeclInfo->CurrentDecl)) {
- auto UnderlyingType = ThisTypedefDecl->getUnderlyingType();
- if (auto ThisElaboratedType = dyn_cast<ElaboratedType>(UnderlyingType)) {
- auto DesugaredType = ThisElaboratedType->desugar();
- if (auto *DesugaredTypePtr = DesugaredType.getTypePtrOrNull()) {
- if (auto *ThisRecordType = dyn_cast<RecordType>(DesugaredTypePtr)) {
- return isClassOrStructDeclImpl(ThisRecordType->getAsRecordDecl());
- }
- }
- }
- }
+ if (auto *ThisTypedefDecl = dyn_cast<TypedefDecl>(ThisDeclInfo->CurrentDecl))
+ if (auto *D = ThisTypedefDecl->getUnderlyingType()->getAsRecordDecl())
+ return isClassOrStructDeclImpl(D);
return false;
}
diff --git a/clang/lib/AST/ComparisonCategories.cpp b/clang/lib/AST/ComparisonCategories.cpp
index 2824410..0c7a7f4 100644
--- a/clang/lib/AST/ComparisonCategories.cpp
+++ b/clang/lib/AST/ComparisonCategories.cpp
@@ -166,7 +166,7 @@ const ComparisonCategoryInfo &ComparisonCategories::getInfoForType(QualType Ty)
QualType ComparisonCategoryInfo::getType() const {
assert(Record);
- return QualType(Record->getTypeForDecl(), 0);
+ return Record->getASTContext().getCanonicalTagType(Record);
}
StringRef ComparisonCategories::getCategoryString(ComparisonCategoryType Kind) {
diff --git a/clang/lib/AST/ComputeDependence.cpp b/clang/lib/AST/ComputeDependence.cpp
index 87334d9..e0cf0de 100644
--- a/clang/lib/AST/ComputeDependence.cpp
+++ b/clang/lib/AST/ComputeDependence.cpp
@@ -500,9 +500,8 @@ ExprDependence clang::computeDependence(OMPIteratorExpr *E) {
ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
auto Deps = ExprDependence::None;
- if (auto *NNS = E->getQualifier())
- Deps |= toExprDependence(NNS->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ Deps |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
if (auto *FirstArg = E->getTemplateArgs()) {
unsigned NumArgs = E->getNumTemplateArgs();
@@ -673,9 +672,8 @@ ExprDependence clang::computeDependence(MemberExpr *E) {
auto D = E->getBase()->getDependence();
D |= getDependenceInExpr(E->getMemberNameInfo());
- if (auto *NNS = E->getQualifier())
- D |= toExprDependence(NNS->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ D |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
@@ -783,9 +781,8 @@ ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) {
if (auto *ST = E->getScopeTypeInfo())
D |= turnTypeToValueDependence(
toExprDependenceAsWritten(ST->getType()->getDependence()));
- if (auto *Q = E->getQualifier())
- D |= toExprDependence(Q->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ D |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
return D;
}
@@ -801,9 +798,8 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent,
if (KnownContainsUnexpandedParameterPack)
Deps |= ExprDependence::UnexpandedPack;
Deps |= getDependenceInExpr(E->getNameInfo());
- if (auto *Q = E->getQualifier())
- Deps |= toExprDependence(Q->getDependence() &
- ~NestedNameSpecifierDependence::Dependent);
+ Deps |= toExprDependence(E->getQualifier().getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
for (auto *D : E->decls()) {
if (D->getDeclContext()->isDependentContext() ||
isa<UnresolvedUsingValueDecl>(D) || isa<TemplateTemplateParmDecl>(D))
@@ -820,8 +816,7 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent,
ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) {
auto D = ExprDependence::TypeValue;
D |= getDependenceInExpr(E->getNameInfo());
- if (auto *Q = E->getQualifier())
- D |= toExprDependence(Q->getDependence());
+ D |= toExprDependence(E->getQualifier().getDependence());
for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
return D;
@@ -872,8 +867,7 @@ ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) {
auto D = ExprDependence::TypeValueInstantiation;
if (!E->isImplicitAccess())
D |= E->getBase()->getDependence();
- if (auto *Q = E->getQualifier())
- D |= toExprDependence(Q->getDependence());
+ D |= toExprDependence(E->getQualifier().getDependence());
D |= getDependenceInExpr(E->getMemberNameInfo());
for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 5471f31..12fe551 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -1693,9 +1693,9 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
return;
}
printNestedNameSpecifier(OS, P);
- if (getDeclName())
- OS << *this;
- else {
+ if (getDeclName()) {
+ printName(OS, P);
+ } else {
// Give the printName override a chance to pick a different name before we
// fall back to "(anonymous)".
SmallString<64> NameBuffer;
@@ -1883,18 +1883,13 @@ bool NamedDecl::declarationReplaces(const NamedDecl *OldD,
// Using declarations can be replaced if they import the same name from the
// same context.
- if (const auto *UD = dyn_cast<UsingDecl>(this)) {
- ASTContext &Context = getASTContext();
- return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) ==
- Context.getCanonicalNestedNameSpecifier(
- cast<UsingDecl>(OldD)->getQualifier());
- }
- if (const auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) {
- ASTContext &Context = getASTContext();
- return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) ==
- Context.getCanonicalNestedNameSpecifier(
- cast<UnresolvedUsingValueDecl>(OldD)->getQualifier());
- }
+ if (const auto *UD = dyn_cast<UsingDecl>(this))
+ return UD->getQualifier().getCanonical() ==
+
+ cast<UsingDecl>(OldD)->getQualifier().getCanonical();
+ if (const auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this))
+ return UUVD->getQualifier().getCanonical() ==
+ cast<UnresolvedUsingValueDecl>(OldD)->getQualifier().getCanonical();
if (isRedeclarable(getKind())) {
if (getCanonicalDecl() != OldD->getCanonicalDecl())
@@ -2864,7 +2859,8 @@ VarDecl::needsDestruction(const ASTContext &Ctx) const {
bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
assert(hasInit() && "Expect initializer to check for flexible array init");
auto *Ty = getType()->getAs<RecordType>();
- if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ if (!Ty ||
+ !Ty->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return false;
auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
if (!List)
@@ -2879,7 +2875,10 @@ bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
assert(hasInit() && "Expect initializer to check for flexible array init");
auto *Ty = getType()->getAs<RecordType>();
- if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ if (!Ty)
+ return CharUnits::Zero();
+ const RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
+ if (!Ty || !RD->hasFlexibleArrayMember())
return CharUnits::Zero();
auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
if (!List || List->getNumInits() == 0)
@@ -2889,7 +2888,7 @@ CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
if (!InitTy)
return CharUnits::Zero();
CharUnits FlexibleArraySize = Ctx.getTypeSizeInChars(InitTy);
- const ASTRecordLayout &RL = Ctx.getASTRecordLayout(Ty->getDecl());
+ const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD);
CharUnits FlexibleArrayOffset =
Ctx.toCharUnitsFromBits(RL.getFieldOffset(RL.getFieldCount() - 1));
if (FlexibleArrayOffset + FlexibleArraySize < RL.getSize())
@@ -2991,7 +2990,10 @@ bool ParmVarDecl::isDestroyedInCallee() const {
// FIXME: isParamDestroyedInCallee() should probably imply
// isDestructedType()
const auto *RT = getType()->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee() &&
+ if (RT &&
+ RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee() &&
getType().isDestructedType())
return true;
@@ -3502,7 +3504,7 @@ bool FunctionDecl::isUsableAsGlobalAllocationFunctionInConstantEvaluation(
while (const auto *TD = T->getAs<TypedefType>())
T = TD->getDecl()->getUnderlyingType();
const IdentifierInfo *II =
- T->castAs<EnumType>()->getDecl()->getIdentifier();
+ T->castAs<EnumType>()->getOriginalDecl()->getIdentifier();
if (II && II->isStr("__hot_cold_t"))
Consume();
}
@@ -4653,7 +4655,7 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
return false;
if (const auto *Record = getType()->getAs<RecordType>())
- return Record->getDecl()->isAnonymousStructOrUnion();
+ return Record->getOriginalDecl()->isAnonymousStructOrUnion();
return false;
}
@@ -4713,7 +4715,7 @@ bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
const auto *RT = getType()->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
if (!RD) {
assert(isInvalidDecl() && "valid field has incomplete type");
return false;
@@ -4836,10 +4838,6 @@ TagDecl *TagDecl::getCanonicalDecl() { return getFirstDecl(); }
void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
TypedefNameDeclOrQualifier = TDD;
- if (const Type *T = getTypeForDecl()) {
- (void)T;
- assert(T->isLinkageValid());
- }
assert(isLinkageValid());
}
@@ -4867,25 +4865,16 @@ void TagDecl::completeDefinition() {
}
TagDecl *TagDecl::getDefinition() const {
- if (isCompleteDefinition())
+ if (isCompleteDefinition() || isBeingDefined())
return const_cast<TagDecl *>(this);
- // If it's possible for us to have an out-of-date definition, check now.
- if (mayHaveOutOfDateDef()) {
- if (IdentifierInfo *II = getIdentifier()) {
- if (II->isOutOfDate()) {
- updateOutOfDate(*II);
- }
- }
- }
-
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(this))
return CXXRD->getDefinition();
- for (auto *R : redecls())
- if (R->isCompleteDefinition())
+ for (TagDecl *R :
+ redecl_range(redecl_iterator(getNextRedeclaration()), redecl_iterator()))
+ if (R->isCompleteDefinition() || R->isBeingDefined())
return R;
-
return nullptr;
}
@@ -4919,7 +4908,7 @@ void TagDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
// is already printed as part of the type.
PrintingPolicy Copy(Policy);
Copy.SuppressScope = true;
- getASTContext().getTagDeclType(this).print(OS, Copy);
+ QualType(getASTContext().getCanonicalTagType(this)).print(OS, Copy);
return;
}
// Otherwise, do the normal printing.
@@ -4963,19 +4952,13 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
EnumDecl *PrevDecl, bool IsScoped,
bool IsScopedUsingClassTag, bool IsFixed) {
- auto *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
- IsScoped, IsScopedUsingClassTag, IsFixed);
- Enum->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
- C.getTypeDeclType(Enum, PrevDecl);
- return Enum;
+ return new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl, IsScoped,
+ IsScopedUsingClassTag, IsFixed);
}
EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
- EnumDecl *Enum =
- new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
- nullptr, nullptr, false, false, false);
- Enum->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
- return Enum;
+ return new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
+ nullptr, nullptr, false, false, false);
}
SourceRange EnumDecl::getIntegerTypeRange() const {
@@ -5035,7 +5018,7 @@ EnumDecl *EnumDecl::getTemplateInstantiationPattern() const {
EnumDecl *ED = getInstantiatedFromMemberEnum();
while (auto *NewED = ED->getInstantiatedFromMemberEnum())
ED = NewED;
- return getDefinitionOrSelf(ED);
+ return ::getDefinitionOrSelf(ED);
}
}
@@ -5125,21 +5108,15 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl* PrevDecl) {
- RecordDecl *R = new (C, DC) RecordDecl(Record, TK, C, DC,
- StartLoc, IdLoc, Id, PrevDecl);
- R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
-
- C.getTypeDeclType(R, PrevDecl);
- return R;
+ return new (C, DC)
+ RecordDecl(Record, TK, C, DC, StartLoc, IdLoc, Id, PrevDecl);
}
RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C,
GlobalDeclID ID) {
- RecordDecl *R = new (C, ID)
+ return new (C, ID)
RecordDecl(Record, TagTypeKind::Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
- R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
- return R;
}
bool RecordDecl::isLambda() const {
@@ -5163,7 +5140,7 @@ bool RecordDecl::isOrContainsUnion() const {
if (const RecordDecl *Def = getDefinition()) {
for (const FieldDecl *FD : Def->fields()) {
const RecordType *RT = FD->getType()->getAs<RecordType>();
- if (RT && RT->getDecl()->isOrContainsUnion())
+ if (RT && RT->getOriginalDecl()->isOrContainsUnion())
return true;
}
}
@@ -5295,8 +5272,9 @@ const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
return I;
if (const auto *RT = I->getType()->getAs<RecordType>())
- if (const FieldDecl *NamedDataMember =
- RT->getDecl()->findFirstNamedDataMember())
+ if (const FieldDecl *NamedDataMember = RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->findFirstNamedDataMember())
return NamedDataMember;
}
@@ -5658,14 +5636,14 @@ void TypedefNameDecl::anchor() {}
TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
if (auto *TT = getTypeSourceInfo()->getType()->getAs<TagType>()) {
- auto *OwningTypedef = TT->getDecl()->getTypedefNameForAnonDecl();
+ auto *OwningTypedef = TT->getOriginalDecl()->getTypedefNameForAnonDecl();
auto *ThisTypedef = this;
if (AnyRedecl && OwningTypedef) {
OwningTypedef = OwningTypedef->getCanonicalDecl();
ThisTypedef = ThisTypedef->getCanonicalDecl();
}
if (OwningTypedef == ThisTypedef)
- return TT->getDecl();
+ return TT->getOriginalDecl()->getDefinitionOrSelf();
}
return nullptr;
@@ -5674,7 +5652,7 @@ TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
bool TypedefNameDecl::isTransparentTagSlow() const {
auto determineIsTransparent = [&]() {
if (auto *TT = getUnderlyingType()->getAs<TagType>()) {
- if (auto *TD = TT->getDecl()) {
+ if (auto *TD = TT->getOriginalDecl()) {
if (TD->getName() != getName())
return false;
SourceLocation TTLoc = getLocation();
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 13c46fd..680a4d7 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -58,10 +58,6 @@ using namespace clang;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
-void Decl::updateOutOfDate(IdentifierInfo &II) const {
- getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
-}
-
#define DECL(DERIVED, BASE) \
static_assert(alignof(Decl) >= alignof(DERIVED##Decl), \
"Alignment sufficient after objects prepended to " #DERIVED);
@@ -489,8 +485,7 @@ bool Decl::isFlexibleArrayMemberLike(
// Look through typedefs.
if (TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
- const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
- TInfo = TDL->getTypeSourceInfo();
+ TInfo = TTL.getDecl()->getTypeSourceInfo();
continue;
}
@@ -1512,30 +1507,19 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::ObjCCategoryImpl:
return this;
- default:
- if (getDeclKind() >= Decl::firstTag && getDeclKind() <= Decl::lastTag) {
- // If this is a tag type that has a definition or is currently
- // being defined, that definition is our primary context.
- auto *Tag = cast<TagDecl>(this);
-
- if (TagDecl *Def = Tag->getDefinition())
- return Def;
-
- if (const auto *TagTy = dyn_cast<TagType>(Tag->getTypeForDecl())) {
- // Note, TagType::getDecl returns the (partial) definition one exists.
- TagDecl *PossiblePartialDef = TagTy->getDecl();
- if (PossiblePartialDef->isBeingDefined())
- return PossiblePartialDef;
- } else {
- assert(isa<InjectedClassNameType>(Tag->getTypeForDecl()));
- }
-
- return Tag;
- }
+ // If this is a tag type that has a definition or is currently
+ // being defined, that definition is our primary context.
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::CXXRecord:
+ return cast<CXXRecordDecl>(this)->getDefinitionOrSelf();
+ case Decl::Record:
+ case Decl::Enum:
+ return cast<TagDecl>(this)->getDefinitionOrSelf();
+ default:
assert(getDeclKind() >= Decl::firstFunction &&
- getDeclKind() <= Decl::lastFunction &&
- "Unknown DeclContext kind");
+ getDeclKind() <= Decl::lastFunction && "Unknown DeclContext kind");
return this;
}
}
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 037a28c4..50b1a1d 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -132,16 +132,9 @@ CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, const ASTContext &C,
CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
- CXXRecordDecl *PrevDecl,
- bool DelayTypeCreation) {
- auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc, IdLoc, Id,
- PrevDecl);
- R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
-
- // FIXME: DelayTypeCreation seems like such a hack
- if (!DelayTypeCreation)
- C.getTypeDeclType(R, PrevDecl);
- return R;
+ CXXRecordDecl *PrevDecl) {
+ return new (C, DC)
+ CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc, IdLoc, Id, PrevDecl);
}
CXXRecordDecl *
@@ -154,10 +147,7 @@ CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
R->setBeingDefined(true);
R->DefinitionData = new (C) struct LambdaDefinitionData(
R, Info, DependencyKind, IsGeneric, CaptureDefault);
- R->setMayHaveOutOfDateDef(false);
R->setImplicit(true);
-
- C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
return R;
}
@@ -166,7 +156,6 @@ CXXRecordDecl *CXXRecordDecl::CreateDeserialized(const ASTContext &C,
auto *R = new (C, ID)
CXXRecordDecl(CXXRecord, TagTypeKind::Struct, C, nullptr,
SourceLocation(), SourceLocation(), nullptr, nullptr);
- R->setMayHaveOutOfDateDef(false);
return R;
}
@@ -178,7 +167,7 @@ static bool hasRepeatedBaseClass(const CXXRecordDecl *StartRD) {
SmallVector<const CXXRecordDecl*, 8> WorkList = {StartRD};
while (!WorkList.empty()) {
const CXXRecordDecl *RD = WorkList.pop_back_val();
- if (RD->getTypeForDecl()->isDependentType())
+ if (RD->isDependentType())
continue;
for (const CXXBaseSpecifier &BaseSpec : RD->bases()) {
if (const CXXRecordDecl *B = BaseSpec.getType()->getAsCXXRecordDecl()) {
@@ -228,7 +217,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (BaseType->isDependentType())
continue;
auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// C++2a [class]p7:
// A standard-layout class is a class that:
@@ -1218,7 +1208,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
bool IsZeroSize = Field->isZeroSize(Context);
if (const auto *RecordTy = T->getAs<RecordType>()) {
- auto *FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
+ auto *FieldRec = cast<CXXRecordDecl>(RecordTy->getOriginalDecl());
if (FieldRec->getDefinition()) {
addedClassSubobject(FieldRec);
@@ -1925,7 +1915,8 @@ static void CollectVisibleConversions(
= CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
bool BaseInVirtual = InVirtual || I.isVirtual();
- auto *Base = cast<CXXRecordDecl>(RT->getDecl());
+ auto *Base =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
*HiddenTypes, Output, VOutput, HiddenVBaseCs);
}
@@ -1963,9 +1954,11 @@ static void CollectVisibleConversions(ASTContext &Context,
const auto *RT = I.getType()->getAs<RecordType>();
if (!RT) continue;
- CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
- I.isVirtual(), I.getAccessSpecifier(),
- HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
+ CollectVisibleConversions(
+ Context,
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf(),
+ I.isVirtual(), I.getAccessSpecifier(), HiddenTypes, Output, VBaseCs,
+ HiddenVBaseCs);
}
// Add any unhidden conversions provided by virtual bases.
@@ -2125,11 +2118,10 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
ASTContext &Context = getASTContext();
- QualType ClassType = Context.getTypeDeclType(this);
+ CanQualType ClassType = Context.getCanonicalTagType(this);
- DeclarationName Name
- = Context.DeclarationNames.getCXXDestructorName(
- Context.getCanonicalType(ClassType));
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXDestructorName(ClassType);
DeclContext::lookup_result R = lookup(Name);
@@ -2159,6 +2151,29 @@ bool CXXRecordDecl::isInjectedClassName() const {
return false;
}
+bool CXXRecordDecl::hasInjectedClassType() const {
+ switch (getDeclKind()) {
+ case Decl::ClassTemplatePartialSpecialization:
+ return true;
+ case Decl::ClassTemplateSpecialization:
+ return false;
+ case Decl::CXXRecord:
+ return getDescribedClassTemplate() != nullptr;
+ default:
+ llvm_unreachable("unexpected decl kind");
+ }
+}
+
+CanQualType CXXRecordDecl::getCanonicalTemplateSpecializationType(
+ const ASTContext &Ctx) const {
+ if (auto *RD = dyn_cast<ClassTemplatePartialSpecializationDecl>(this))
+ return RD->getCanonicalInjectedSpecializationType(Ctx);
+ if (const ClassTemplateDecl *TD = getDescribedClassTemplate();
+ TD && !isa<ClassTemplateSpecializationDecl>(this))
+ return TD->getCanonicalInjectedSpecializationType(Ctx);
+ return CanQualType();
+}
+
static bool isDeclContextInNamespace(const DeclContext *DC) {
while (!DC->isTranslationUnit()) {
if (DC->isNamespace())
@@ -2272,7 +2287,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
Context.getDiagnostics().Report(
AT->getLocation(),
diag::warn_cxx20_compat_requires_explicit_init_non_aggregate)
- << AT << FD << Context.getRecordType(this);
+ << AT << FD << Context.getCanonicalTagType(this);
}
}
@@ -2284,7 +2299,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
if (const auto *AT = FD->getAttr<ExplicitInitAttr>())
Context.getDiagnostics().Report(AT->getLocation(),
diag::warn_attribute_needs_aggregate)
- << AT << Context.getRecordType(this);
+ << AT << Context.getCanonicalTagType(this);
}
setHasUninitializedExplicitInitFields(false);
}
@@ -2296,8 +2311,8 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
for (const auto &B : bases()) {
- const auto *BaseDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ const auto *BaseDecl = cast<CXXRecordDecl>(
+ B.getType()->castAs<RecordType>()->getOriginalDecl());
if (BaseDecl->isAbstract())
return true;
}
@@ -2460,7 +2475,8 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT)
continue;
- const auto *Base = cast<CXXRecordDecl>(RT->getDecl());
+ const auto *Base =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (CXXMethodDecl *D = this->getCorrespondingMethodInClass(Base))
AddFinalOverrider(D);
}
@@ -2712,8 +2728,7 @@ bool CXXMethodDecl::isCopyAssignmentOperator() const {
ParamType = Ref->getPointeeType();
ASTContext &Context = getASTContext();
- QualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ CanQualType ClassType = Context.getCanonicalTagType(getParent());
return Context.hasSameUnqualifiedType(ClassType, ParamType);
}
@@ -2733,8 +2748,7 @@ bool CXXMethodDecl::isMoveAssignmentOperator() const {
ParamType = ParamType->getPointeeType();
ASTContext &Context = getASTContext();
- QualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ CanQualType ClassType = Context.getCanonicalTagType(getParent());
return Context.hasSameUnqualifiedType(ClassType, ParamType);
}
@@ -2769,7 +2783,7 @@ CXXMethodDecl::overridden_methods() const {
static QualType getThisObjectType(ASTContext &C, const FunctionProtoType *FPT,
const CXXRecordDecl *Decl) {
- QualType ClassTy = C.getTypeDeclType(Decl);
+ CanQualType ClassTy = C.getCanonicalTagType(Decl);
return C.getQualifiedType(ClassTy, FPT->getMethodQuals());
}
@@ -3027,11 +3041,9 @@ bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// Is it a reference to our class type?
ASTContext &Context = getASTContext();
- CanQualType PointeeType
- = Context.getCanonicalType(ParamRefType->getPointeeType());
- CanQualType ClassTy
- = Context.getCanonicalType(Context.getTagDeclType(getParent()));
- if (PointeeType.getUnqualifiedType() != ClassTy)
+ QualType PointeeType = ParamRefType->getPointeeType();
+ CanQualType ClassTy = Context.getCanonicalTagType(getParent());
+ if (!Context.hasSameUnqualifiedType(PointeeType, ClassTy))
return false;
// FIXME: other qualifiers?
@@ -3066,15 +3078,11 @@ bool CXXConstructorDecl::isSpecializationCopyingObject() const {
const ParmVarDecl *Param = getParamDecl(0);
ASTContext &Context = getASTContext();
- CanQualType ParamType = Context.getCanonicalType(Param->getType());
+ CanQualType ParamType = Param->getType()->getCanonicalTypeUnqualified();
// Is it the same as our class type?
- CanQualType ClassTy
- = Context.getCanonicalType(Context.getTagDeclType(getParent()));
- if (ParamType.getUnqualifiedType() != ClassTy)
- return false;
-
- return true;
+ CanQualType ClassTy = Context.getCanonicalTagType(getParent());
+ return ParamType == ClassTy;
}
void CXXDestructorDecl::anchor() {}
@@ -3371,7 +3379,7 @@ ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
}
CXXRecordDecl *ConstructorUsingShadowDecl::getNominatedBaseClass() const {
- return getIntroducer()->getQualifier()->getAsRecordDecl();
+ return getIntroducer()->getQualifier().getAsRecordDecl();
}
void BaseUsingDecl::anchor() {}
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index f4265dd0..196057f 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -202,8 +202,7 @@ void Decl::printGroup(Decl** Begin, unsigned NumDecls,
}
Decl** End = Begin + NumDecls;
- TagDecl* TD = dyn_cast<TagDecl>(*Begin);
- if (TD)
+ if (isa<TagDecl>(*Begin))
++Begin;
PrintingPolicy SubPolicy(Policy);
@@ -211,13 +210,9 @@ void Decl::printGroup(Decl** Begin, unsigned NumDecls,
bool isFirst = true;
for ( ; Begin != End; ++Begin) {
if (isFirst) {
- if(TD)
- SubPolicy.IncludeTagDefinition = true;
- SubPolicy.SuppressSpecifiers = false;
isFirst = false;
} else {
- if (!isFirst) Out << ", ";
- SubPolicy.IncludeTagDefinition = false;
+ Out << ", ";
SubPolicy.SuppressSpecifiers = true;
}
@@ -487,10 +482,12 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
QualType CurDeclType = getDeclType(*D);
if (!Decls.empty() && !CurDeclType.isNull()) {
QualType BaseType = GetBaseType(CurDeclType);
- if (!BaseType.isNull() && isa<ElaboratedType>(BaseType) &&
- cast<ElaboratedType>(BaseType)->getOwnedTagDecl() == Decls[0]) {
- Decls.push_back(*D);
- continue;
+ if (const auto *TT = dyn_cast_or_null<TagType>(BaseType);
+ TT && TT->isTagOwned()) {
+ if (TT->getOriginalDecl() == Decls[0]) {
+ Decls.push_back(*D);
+ continue;
+ }
}
}
@@ -662,16 +659,6 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
Out << Proto;
}
-static void MaybePrintTagKeywordIfSupressingScopes(PrintingPolicy &Policy,
- QualType T,
- llvm::raw_ostream &Out) {
- StringRef prefix = T->isClassType() ? "class "
- : T->isStructureType() ? "struct "
- : T->isUnionType() ? "union "
- : "";
- Out << prefix;
-}
-
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (!D->getDescribedFunctionTemplate() &&
!D->isFunctionTemplateSpecialization()) {
@@ -721,11 +708,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += D->getQualifiedNameAsString();
} else {
llvm::raw_string_ostream OS(Proto);
- if (!Policy.SuppressScope) {
- if (const NestedNameSpecifier *NS = D->getQualifier()) {
- NS->print(OS, Policy);
- }
- }
+ if (!Policy.SuppressScope)
+ D->getQualifier().print(OS, Policy);
D->getNameInfo().printName(OS, Policy);
}
@@ -833,10 +817,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << Proto << " -> ";
Proto.clear();
}
- if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
- !Policy.SuppressUnwrittenScope)
- MaybePrintTagKeywordIfSupressingScopes(Policy, AFT->getReturnType(),
- Out);
AFT->getReturnType().print(Out, Policy, Proto);
Proto.clear();
}
@@ -995,10 +975,6 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
}
- if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
- !Policy.SuppressUnwrittenScope)
- MaybePrintTagKeywordIfSupressingScopes(Policy, T, Out);
-
printDeclType(T, (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters &&
D->getIdentifier())
? D->getIdentifier()->deuglifiedName()
@@ -1028,7 +1004,6 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
- SubPolicy.IncludeTagDefinition = false;
Init->printPretty(Out, nullptr, SubPolicy, Indentation, "\n", &Context);
if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << ")";
@@ -1086,15 +1061,13 @@ void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
Out << "using namespace ";
- if (D->getQualifier())
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << *D->getNominatedNamespaceAsWritten();
}
void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
Out << "namespace " << *D << " = ";
- if (D->getQualifier())
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << *D->getAliasedNamespace();
}
@@ -1115,8 +1088,7 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Out << ' ';
if (D->getIdentifier()) {
- if (auto *NNS = D->getQualifier())
- NNS->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << *D;
if (auto *S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
@@ -1746,7 +1718,7 @@ void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
Out << "using ";
if (D->hasTypename())
Out << "typename ";
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
// Use the correct record name when the using declaration is used for
// inheriting constructors.
@@ -1768,14 +1740,14 @@ void DeclPrinter::VisitUsingEnumDecl(UsingEnumDecl *D) {
void
DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
Out << "using typename ";
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << D->getDeclName();
}
void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
if (!D->isAccessDeclaration())
Out << "using ";
- D->getQualifier()->print(Out, Policy);
+ D->getQualifier().print(Out, Policy);
Out << D->getDeclName();
}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index bc4a299..8a6fb33 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -632,7 +632,8 @@ ClassTemplateDecl::findPartialSpecialization(QualType T) {
ASTContext &Context = getASTContext();
for (ClassTemplatePartialSpecializationDecl &P :
getPartialSpecializations()) {
- if (Context.hasSameType(P.getInjectedSpecializationType(), T))
+ if (Context.hasSameType(P.getCanonicalInjectedSpecializationType(Context),
+ T))
return P.getMostRecentDecl();
}
@@ -651,28 +652,20 @@ ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
return nullptr;
}
-QualType
-ClassTemplateDecl::getInjectedClassNameSpecialization() {
+CanQualType ClassTemplateDecl::getCanonicalInjectedSpecializationType(
+ const ASTContext &Ctx) const {
Common *CommonPtr = getCommonPtr();
- if (!CommonPtr->InjectedClassNameType.isNull())
- return CommonPtr->InjectedClassNameType;
-
- // C++0x [temp.dep.type]p2:
- // The template argument list of a primary template is a template argument
- // list in which the nth template argument has the value of the nth template
- // parameter of the class template. If the nth template parameter is a
- // template parameter pack (14.5.3), the nth template argument is a pack
- // expansion (14.5.3) whose pattern is the name of the template parameter
- // pack.
- ASTContext &Context = getASTContext();
- TemplateName Name = Context.getQualifiedTemplateName(
- /*NNS=*/nullptr, /*TemplateKeyword=*/false, TemplateName(this));
- auto TemplateArgs = getTemplateParameters()->getInjectedTemplateArgs(Context);
- CommonPtr->InjectedClassNameType =
- Context.getTemplateSpecializationType(Name,
- /*SpecifiedArgs=*/TemplateArgs,
- /*CanonicalArgs=*/{});
- return CommonPtr->InjectedClassNameType;
+
+ if (CommonPtr->CanonInjectedTST.isNull()) {
+ SmallVector<TemplateArgument> CanonicalArgs(
+ getTemplateParameters()->getInjectedTemplateArgs(Ctx));
+ Ctx.canonicalizeTemplateArguments(CanonicalArgs);
+ CommonPtr->CanonInjectedTST =
+ CanQualType::CreateUnsafe(Ctx.getCanonicalTemplateSpecializationType(
+ TemplateName(const_cast<ClassTemplateDecl *>(getCanonicalDecl())),
+ CanonicalArgs));
+ }
+ return CommonPtr->CanonInjectedTST;
}
//===----------------------------------------------------------------------===//
@@ -998,7 +991,6 @@ ClassTemplateSpecializationDecl *ClassTemplateSpecializationDecl::Create(
auto *Result = new (Context, DC) ClassTemplateSpecializationDecl(
Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
SpecializedTemplate, Args, StrictPackMatch, PrevDecl);
- Result->setMayHaveOutOfDateDef(false);
// If the template decl is incomplete, copy the external lexical storage from
// the base template. This allows instantiations of incomplete types to
@@ -1008,17 +1000,14 @@ ClassTemplateSpecializationDecl *ClassTemplateSpecializationDecl::Create(
Result->setHasExternalLexicalStorage(
SpecializedTemplate->getTemplatedDecl()->hasExternalLexicalStorage());
- Context.getTypeDeclType(Result, PrevDecl);
return Result;
}
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
GlobalDeclID ID) {
- auto *Result =
- new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
- Result->setMayHaveOutOfDateDef(false);
- return Result;
+ return new (C, ID)
+ ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
}
void ClassTemplateSpecializationDecl::getNameForDiagnostic(
@@ -1180,13 +1169,15 @@ ClassTemplatePartialSpecializationDecl::ClassTemplatePartialSpecializationDecl(
ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
+ CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl)
: ClassTemplateSpecializationDecl(
Context, ClassTemplatePartialSpecialization, TK, DC, StartLoc, IdLoc,
// Tracking StrictPackMatch for Partial
// Specializations is not needed.
SpecializedTemplate, Args, /*StrictPackMatch=*/false, PrevDecl),
- TemplateParams(Params), InstantiatedFromMember(nullptr, false) {
+ TemplateParams(Params), InstantiatedFromMember(nullptr, false),
+ CanonInjectedTST(CanonInjectedTST) {
if (AdoptTemplateParameterList(Params, this))
setInvalidDecl();
}
@@ -1196,24 +1187,31 @@ ClassTemplatePartialSpecializationDecl::Create(
ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
- QualType CanonInjectedType,
+ CanQualType CanonInjectedTST,
ClassTemplatePartialSpecializationDecl *PrevDecl) {
auto *Result = new (Context, DC) ClassTemplatePartialSpecializationDecl(
Context, TK, DC, StartLoc, IdLoc, Params, SpecializedTemplate, Args,
- PrevDecl);
+ CanonInjectedTST, PrevDecl);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
- Result->setMayHaveOutOfDateDef(false);
-
- Context.getInjectedClassNameType(Result, CanonInjectedType);
return Result;
}
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
GlobalDeclID ID) {
- auto *Result = new (C, ID) ClassTemplatePartialSpecializationDecl(C);
- Result->setMayHaveOutOfDateDef(false);
- return Result;
+ return new (C, ID) ClassTemplatePartialSpecializationDecl(C);
+}
+
+CanQualType
+ClassTemplatePartialSpecializationDecl::getCanonicalInjectedSpecializationType(
+ const ASTContext &Ctx) const {
+ if (CanonInjectedTST.isNull()) {
+ CanonInjectedTST =
+ CanQualType::CreateUnsafe(Ctx.getCanonicalTemplateSpecializationType(
+ TemplateName(getSpecializedTemplate()->getCanonicalDecl()),
+ getTemplateArgs().asArray()));
+ }
+ return CanonInjectedTST;
}
SourceRange ClassTemplatePartialSpecializationDecl::getSourceRange() const {
diff --git a/clang/lib/AST/DeclarationName.cpp b/clang/lib/AST/DeclarationName.cpp
index ae5fcf6..6c7b995 100644
--- a/clang/lib/AST/DeclarationName.cpp
+++ b/clang/lib/AST/DeclarationName.cpp
@@ -115,12 +115,12 @@ static void printCXXConstructorDestructorName(QualType ClassType,
Policy.adjustForCPlusPlus();
if (const RecordType *ClassRec = ClassType->getAs<RecordType>()) {
- ClassRec->getDecl()->printName(OS, Policy);
+ ClassRec->getOriginalDecl()->printName(OS, Policy);
return;
}
if (Policy.SuppressTemplateArgsInCXXConstructors) {
if (auto *InjTy = ClassType->getAs<InjectedClassNameType>()) {
- InjTy->getDecl()->printName(OS, Policy);
+ InjTy->getOriginalDecl()->printName(OS, Policy);
return;
}
}
@@ -184,7 +184,7 @@ void DeclarationName::print(raw_ostream &OS,
OS << "operator ";
QualType Type = getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>()) {
- OS << *Rec->getDecl();
+ OS << *Rec->getOriginalDecl();
return;
}
// We know we're printing C++ here, ensure we print 'bool' properly.
diff --git a/clang/lib/AST/DynamicRecursiveASTVisitor.cpp b/clang/lib/AST/DynamicRecursiveASTVisitor.cpp
index b478e7a..8821cd3 100644
--- a/clang/lib/AST/DynamicRecursiveASTVisitor.cpp
+++ b/clang/lib/AST/DynamicRecursiveASTVisitor.cpp
@@ -115,8 +115,12 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
bool TraverseAST(ASTContext &AST) { return Visitor.TraverseAST(AST); }
bool TraverseAttr(Attr *At) { return Visitor.TraverseAttr(At); }
bool TraverseDecl(Decl *D) { return Visitor.TraverseDecl(D); }
- bool TraverseType(QualType T) { return Visitor.TraverseType(T); }
- bool TraverseTypeLoc(TypeLoc TL) { return Visitor.TraverseTypeLoc(TL); }
+ bool TraverseType(QualType T, bool TraverseQualifier = true) {
+ return Visitor.TraverseType(T, TraverseQualifier);
+ }
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) {
+ return Visitor.TraverseTypeLoc(TL, TraverseQualifier);
+ }
bool TraverseStmt(Stmt *S) { return Visitor.TraverseStmt(S); }
bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
@@ -172,7 +176,7 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
return Visitor.TraverseLambdaCapture(LE, C, Init);
}
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS) {
return Visitor.TraverseNestedNameSpecifier(NNS);
}
@@ -241,8 +245,8 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
// Types.
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
- bool Traverse##CLASS##Type(CLASS##Type *T) { \
- return Visitor.Traverse##CLASS##Type(T); \
+ bool Traverse##CLASS##Type(CLASS##Type *T, bool TraverseQualifier) { \
+ return Visitor.Traverse##CLASS##Type(T, TraverseQualifier); \
}
#include "clang/AST/TypeNodes.inc"
@@ -255,8 +259,8 @@ template <bool Const> struct Impl : RecursiveASTVisitor<Impl<Const>> {
// TypeLocs.
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
- bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL) { \
- return Visitor.Traverse##CLASS##TypeLoc(TL); \
+ bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL, bool TraverseQualifier) { \
+ return Visitor.Traverse##CLASS##TypeLoc(TL, TraverseQualifier); \
}
#include "clang/AST/TypeLocNodes.def"
@@ -297,7 +301,6 @@ FORWARD_TO_BASE(TraverseAttr, Attr, *)
FORWARD_TO_BASE(TraverseConstructorInitializer, CXXCtorInitializer, *)
FORWARD_TO_BASE(TraverseDecl, Decl, *)
FORWARD_TO_BASE(TraverseStmt, Stmt, *)
-FORWARD_TO_BASE(TraverseNestedNameSpecifier, NestedNameSpecifier, *)
FORWARD_TO_BASE(TraverseTemplateInstantiations, ClassTemplateDecl, *)
FORWARD_TO_BASE(TraverseTemplateInstantiations, VarTemplateDecl, *)
FORWARD_TO_BASE(TraverseTemplateInstantiations, FunctionTemplateDecl, *)
@@ -314,8 +317,22 @@ FORWARD_TO_BASE_EXACT(TraverseTemplateArgument, const TemplateArgument &)
FORWARD_TO_BASE_EXACT(TraverseTemplateArguments, ArrayRef<TemplateArgument>)
FORWARD_TO_BASE_EXACT(TraverseTemplateArgumentLoc, const TemplateArgumentLoc &)
FORWARD_TO_BASE_EXACT(TraverseTemplateName, TemplateName)
-FORWARD_TO_BASE_EXACT(TraverseType, QualType)
-FORWARD_TO_BASE_EXACT(TraverseTypeLoc, TypeLoc)
+FORWARD_TO_BASE_EXACT(TraverseNestedNameSpecifier, NestedNameSpecifier)
+
+template <bool Const>
+bool DynamicRecursiveASTVisitorBase<Const>::TraverseType(
+ QualType T, bool TraverseQualifier) {
+ return Impl<Const>(*this).RecursiveASTVisitor<Impl<Const>>::TraverseType(
+ T, TraverseQualifier);
+}
+
+template <bool Const>
+bool DynamicRecursiveASTVisitorBase<Const>::TraverseTypeLoc(
+ TypeLoc TL, bool TraverseQualifier) {
+ return Impl<Const>(*this).RecursiveASTVisitor<Impl<Const>>::TraverseTypeLoc(
+ TL, TraverseQualifier);
+}
+
FORWARD_TO_BASE_EXACT(TraverseTypeConstraint, const TypeConstraint *)
FORWARD_TO_BASE_EXACT(TraverseObjCProtocolLoc, ObjCProtocolLoc)
FORWARD_TO_BASE_EXACT(TraverseNestedNameSpecifierLoc, NestedNameSpecifierLoc)
@@ -354,13 +371,25 @@ bool DynamicRecursiveASTVisitorBase<Const>::dataTraverseNode(
// Declare Traverse*() and friends for all concrete Type classes.
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
- FORWARD_TO_BASE(Traverse##CLASS##Type, CLASS##Type, *) \
+ template <bool Const> \
+ bool DynamicRecursiveASTVisitorBase<Const>::Traverse##CLASS##Type( \
+ MaybeConst<CLASS##Type> *T, bool TraverseQualifier) { \
+ return Impl<Const>(*this) \
+ .RecursiveASTVisitor<Impl<Const>>::Traverse##CLASS##Type( \
+ const_cast<CLASS##Type *>(T), TraverseQualifier); \
+ } \
FORWARD_TO_BASE(WalkUpFrom##CLASS##Type, CLASS##Type, *)
#include "clang/AST/TypeNodes.inc"
#define ABSTRACT_TYPELOC(CLASS, BASE)
#define TYPELOC(CLASS, BASE) \
- FORWARD_TO_BASE_EXACT(Traverse##CLASS##TypeLoc, CLASS##TypeLoc)
+ template <bool Const> \
+ bool DynamicRecursiveASTVisitorBase<Const>::Traverse##CLASS##TypeLoc( \
+ CLASS##TypeLoc TL, bool TraverseQualifier) { \
+ return Impl<Const>(*this) \
+ .RecursiveASTVisitor<Impl<Const>>::Traverse##CLASS##TypeLoc( \
+ TL, TraverseQualifier); \
+ }
#include "clang/AST/TypeLocNodes.def"
#define TYPELOC(CLASS, BASE) \
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index cd9672d..7cac655 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -75,8 +75,7 @@ const CXXRecordDecl *Expr::getBestDynamicClassType() const {
return nullptr;
const RecordType *Ty = DerivedType->castAs<RecordType>();
- Decl *D = Ty->getDecl();
- return cast<CXXRecordDecl>(D);
+ return cast<CXXRecordDecl>(Ty->getOriginalDecl())->getDefinitionOrSelf();
}
const Expr *Expr::skipRValueSubobjectAdjustments(
@@ -92,7 +91,9 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
E->getType()->isRecordType()) {
E = CE->getSubExpr();
const auto *Derived =
- cast<CXXRecordDecl>(E->getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ E->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
Adjustments.push_back(SubobjectAdjustment(CE, Derived));
continue;
}
@@ -268,7 +269,7 @@ QualType Expr::getEnumCoercedType(const ASTContext &Ctx) const {
if (const auto *ECD = getEnumConstantDecl()) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
if (ED->isCompleteDefinition())
- return Ctx.getTypeDeclType(ED);
+ return Ctx.getCanonicalTagType(ED);
}
return getType();
}
@@ -2031,7 +2032,8 @@ CXXBaseSpecifier **CastExpr::path_buffer() {
const FieldDecl *CastExpr::getTargetFieldForToUnionCast(QualType unionType,
QualType opType) {
- auto RD = unionType->castAs<RecordType>()->getDecl();
+ auto RD =
+ unionType->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
return getTargetFieldForToUnionCast(RD, opType);
}
@@ -3221,7 +3223,7 @@ static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
/// isTemporaryObject - Determines if this expression produces a
/// temporary of the given class type.
bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
- if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
+ if (!C.hasSameUnqualifiedType(getType(), C.getCanonicalTagType(TempTy)))
return false;
const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
@@ -3407,7 +3409,10 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
if (ILE->getType()->isRecordType()) {
unsigned ElementNo = 0;
- RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = ILE->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// In C++17, bases were added to the list of members used by aggregate
// initialization.
@@ -4050,8 +4055,10 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
return NPCK_CXX11_nullptr;
if (const RecordType *UT = getType()->getAsUnionType())
- if (!Ctx.getLangOpts().CPlusPlus11 &&
- UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!Ctx.getLangOpts().CPlusPlus11 && UT &&
+ UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>())
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
const Expr *InitExpr = CLE->getInitializer();
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index a099e97..97ae4a0 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -1319,7 +1319,7 @@ LambdaExpr *LambdaExpr::Create(const ASTContext &Context, CXXRecordDecl *Class,
bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
- QualType T = Context.getTypeDeclType(Class);
+ CanQualType T = Context.getCanonicalTagType(Class);
unsigned Size = totalSizeToAlloc<Stmt *>(CaptureInits.size() + 1);
void *Mem = Context.Allocate(Size);
@@ -1687,10 +1687,9 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() {
// It can't be dependent: after all, we were actually able to do the
// lookup.
CXXRecordDecl *Record = nullptr;
- auto *NNS = getQualifier();
- if (NNS && NNS->getKind() != NestedNameSpecifier::Super) {
- const Type *T = getQualifier()->getAsType();
- assert(T && "qualifier in member expression does not name type");
+ if (NestedNameSpecifier Qualifier = getQualifier();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Type) {
+ const Type *T = getQualifier().getAsType();
Record = T->getAsCXXRecordDecl();
assert(Record && "qualifier in member expression does not name record");
}
diff --git a/clang/lib/AST/ExprConcepts.cpp b/clang/lib/AST/ExprConcepts.cpp
index ac0e566..36f910d 100644
--- a/clang/lib/AST/ExprConcepts.cpp
+++ b/clang/lib/AST/ExprConcepts.cpp
@@ -41,10 +41,10 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
assert(!Loc->getNestedNameSpecifierLoc() ||
(!Loc->getNestedNameSpecifierLoc()
.getNestedNameSpecifier()
- ->isInstantiationDependent() &&
+ .isInstantiationDependent() &&
!Loc->getNestedNameSpecifierLoc()
.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()));
+ .containsUnexpandedParameterPack()));
assert((!isValueDependent() || isInstantiationDependent()) &&
"should not be value-dependent");
}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 3679327..5cf2b9a 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -401,7 +401,7 @@ namespace {
assert(!Invalid && "invalid designator has no subobject type");
return MostDerivedPathLength == Entries.size()
? MostDerivedType
- : Ctx.getRecordType(getAsBaseClass(Entries.back()));
+ : Ctx.getCanonicalTagType(getAsBaseClass(Entries.back()));
}
/// Update this designator to refer to the first element within this array.
@@ -2623,7 +2623,8 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
Value.getUnionValue(), Kind, Value.getUnionField(), CheckedTemps);
}
if (Value.isStruct()) {
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ RecordDecl *RD =
+ Type->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
@@ -4109,7 +4110,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
}
// Next subobject is a class, struct or union field.
- RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = ObjType->castAs<RecordType>()->getOriginalDecl();
if (RD->isUnion()) {
const FieldDecl *UnionField = O->getUnionField();
if (!UnionField ||
@@ -4144,7 +4145,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
O = &O->getStructBase(getBaseIndex(Derived, Base));
- ObjType = getSubobjectType(ObjType, Info.Ctx.getRecordType(Base));
+ ObjType = getSubobjectType(ObjType, Info.Ctx.getCanonicalTagType(Base));
}
}
}
@@ -6363,7 +6364,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl();
assert(C && "dynamic_cast target is not void pointer nor class");
- CanQualType CQT = Info.Ctx.getCanonicalType(Info.Ctx.getRecordType(C));
+ CanQualType CQT = Info.Ctx.getCanonicalTagType(C);
auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
// C++ [expr.dynamic.cast]p9:
@@ -6389,7 +6390,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
}
Info.FFDiag(E, diag::note_constexpr_dynamic_cast_to_reference_failed)
<< DiagKind << Ptr.Designator.getType(Info.Ctx)
- << Info.Ctx.getRecordType(DynType->Type)
+ << Info.Ctx.getCanonicalTagType(DynType->Type)
<< E->getType().getUnqualifiedType();
return false;
};
@@ -6886,8 +6887,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// FIXME: This immediately starts the lifetime of all members of
// an anonymous struct. It would be preferable to strictly start
// member lifetime in initialization order.
- Success &=
- handleDefaultInitValue(Info.Ctx.getRecordType(CD), *Value);
+ Success &= handleDefaultInitValue(Info.Ctx.getCanonicalTagType(CD),
+ *Value);
}
// Store Subobject as its parent before updating it for the last element
// in the chain.
@@ -7794,7 +7795,8 @@ class BufferToAPValueConverter {
}
std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
- QualType RepresentationType = Ty->getDecl()->getIntegerType();
+ QualType RepresentationType =
+ Ty->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
assert(!RepresentationType.isNull() &&
"enum forward decl should be caught by Sema");
const auto *AsBuiltin =
@@ -8528,7 +8530,7 @@ public:
if (auto *DD = dyn_cast<CXXDestructorDecl>(FD)) {
assert(This && "no 'this' pointer for destructor call");
return HandleDestruction(Info, E, *This,
- Info.Ctx.getRecordType(DD->getParent())) &&
+ Info.Ctx.getCanonicalTagType(DD->getParent())) &&
CallScope.destroy();
}
@@ -8589,8 +8591,10 @@ public:
const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
if (!FD) return Error(E);
assert(!FD->getType()->isReferenceType() && "prvalue reference?");
- assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
- FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ assert(
+ BaseTy->castAs<RecordType>()->getOriginalDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() &&
+ "record / field mismatch");
// Note: there is no lvalue base here. But this case should only ever
// happen in C or in C++98, where we cannot be evaluating a constexpr
@@ -8817,8 +8821,10 @@ public:
const ValueDecl *MD = E->getMemberDecl();
if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
- assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
- FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ assert(
+ BaseTy->castAs<RecordType>()->getOriginalDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() &&
+ "record / field mismatch");
(void)BaseTy;
if (!HandleLValueMember(this->Info, E, Result, FD))
return false;
@@ -9257,8 +9263,8 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
if (!DynType)
return false;
- TypeInfo =
- TypeInfoLValue(Info.Ctx.getRecordType(DynType->Type).getTypePtr());
+ TypeInfo = TypeInfoLValue(
+ Info.Ctx.getCanonicalTagType(DynType->Type).getTypePtr());
}
return Success(APValue::LValueBase::getTypeInfo(TypeInfo, E->getType()));
@@ -10818,7 +10824,8 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
}
bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ T->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isInvalidDecl()) return false;
if (RD->isUnion()) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
@@ -10887,8 +10894,10 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
const Expr *ExprToVisit, ArrayRef<Expr *> Args) {
- const RecordDecl *RD =
- ExprToVisit->getType()->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD = ExprToVisit->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
@@ -11116,7 +11125,10 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
Result = APValue(APValue::UninitStruct(), 0, 2);
Array.moveInto(Result.getStructField(0));
- RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *Record = E->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
RecordDecl::field_iterator Field = Record->field_begin();
assert(Field != Record->field_end() &&
Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -11628,7 +11640,13 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
case Builtin::BI__builtin_elementwise_add_sat:
- case Builtin::BI__builtin_elementwise_sub_sat: {
+ case Builtin::BI__builtin_elementwise_sub_sat:
+ case clang::X86::BI__builtin_ia32_pmulhuw128:
+ case clang::X86::BI__builtin_ia32_pmulhuw256:
+ case clang::X86::BI__builtin_ia32_pmulhuw512:
+ case clang::X86::BI__builtin_ia32_pmulhw128:
+ case clang::X86::BI__builtin_ia32_pmulhw256:
+ case clang::X86::BI__builtin_ia32_pmulhw512: {
APValue SourceLHS, SourceRHS;
if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
!EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
@@ -11653,6 +11671,18 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
APSInt(LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS),
DestEltTy->isUnsignedIntegerOrEnumerationType())));
break;
+ case clang::X86::BI__builtin_ia32_pmulhuw128:
+ case clang::X86::BI__builtin_ia32_pmulhuw256:
+ case clang::X86::BI__builtin_ia32_pmulhuw512:
+ ResultElements.push_back(APValue(APSInt(llvm::APIntOps::mulhu(LHS, RHS),
+ /*isUnsigned=*/true)));
+ break;
+ case clang::X86::BI__builtin_ia32_pmulhw128:
+ case clang::X86::BI__builtin_ia32_pmulhw256:
+ case clang::X86::BI__builtin_ia32_pmulhw512:
+ ResultElements.push_back(APValue(APSInt(llvm::APIntOps::mulhs(LHS, RHS),
+ /*isUnsigned=*/false)));
+ break;
}
}
@@ -12889,7 +12919,10 @@ static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T,
const LValue &LV, CharUnits &Size) {
if (!T.isNull() && T->isStructureType() &&
- T->getAsStructureType()->getDecl()->hasFlexibleArrayMember())
+ T->getAsStructureType()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember())
if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>())
if (const auto *VD = dyn_cast<VarDecl>(V))
if (VD->hasInit())
@@ -15136,7 +15169,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return Error(OOE);
- RecordDecl *RD = RT->getDecl();
+ RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
unsigned i = MemberDecl->getFieldIndex();
@@ -15158,7 +15191,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return Error(OOE);
- RecordDecl *RD = RT->getDecl();
+ RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
@@ -15169,7 +15202,8 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
return Error(OOE);
// Add the offset to the base.
- Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(
+ BaseRT->getOriginalDecl()->getDefinitionOrSelf()));
break;
}
}
@@ -15348,7 +15382,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (Info.Ctx.getLangOpts().CPlusPlus && DestType->isEnumeralType()) {
const EnumType *ET = dyn_cast<EnumType>(DestType.getCanonicalType());
- const EnumDecl *ED = ET->getDecl();
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
// Check that the value is within the range of the enumeration values.
//
// This corressponds to [expr.static.cast]p10 which says:
@@ -18010,7 +18044,8 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
// Fabricate an arbitrary expression on the stack and pretend that it
// is a temporary being used as the 'this' pointer.
LValue This;
- ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
+ ImplicitValueInitExpr VIE(RD ? Info.Ctx.getCanonicalTagType(RD)
+ : Info.Ctx.IntTy);
This.set({&VIE, Info.CurrentCall->Index});
ArrayRef<const Expr*> Args;
diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp
index 112b756d..502a3e6 100644
--- a/clang/lib/AST/FormatString.cpp
+++ b/clang/lib/AST/FormatString.cpp
@@ -417,10 +417,11 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
// If the enum is incomplete we know nothing about the underlying type.
// Assume that it's 'int'. Do not use the underlying type for a scoped
// enumeration.
- if (!ETy->getDecl()->isComplete())
+ const EnumDecl *ED = ETy->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete())
return NoMatch;
if (ETy->isUnscopedEnumerationType())
- argTy = ETy->getDecl()->getIntegerType();
+ argTy = ED->getIntegerType();
}
if (const auto *BT = argTy->getAs<BuiltinType>()) {
@@ -466,10 +467,11 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
// If the enum is incomplete we know nothing about the underlying type.
// Assume that it's 'int'. Do not use the underlying type for a scoped
// enumeration as that needs an exact match.
- if (!ETy->getDecl()->isComplete())
+ const EnumDecl *ED = ETy->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete())
argTy = C.IntTy;
else if (ETy->isUnscopedEnumerationType())
- argTy = ETy->getDecl()->getIntegerType();
+ argTy = ED->getIntegerType();
}
if (argTy->isSaturatedFixedPointType())
diff --git a/clang/lib/AST/InheritViz.cpp b/clang/lib/AST/InheritViz.cpp
index 1dafed8..c03492c 100644
--- a/clang/lib/AST/InheritViz.cpp
+++ b/clang/lib/AST/InheritViz.cpp
@@ -89,8 +89,8 @@ void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
Out << " \"];\n";
// Display the base classes.
- const auto *Decl =
- static_cast<const CXXRecordDecl *>(Type->castAs<RecordType>()->getDecl());
+ const auto *Decl = static_cast<const CXXRecordDecl *>(
+ Type->castAs<RecordType>()->getOriginalDecl());
for (const auto &Base : Decl->bases()) {
QualType CanonBaseType = Context.getCanonicalType(Base.getType());
@@ -133,7 +133,7 @@ InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
/// viewInheritance - Display the inheritance hierarchy of this C++
/// class using GraphViz.
void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
- QualType Self = Context.getTypeDeclType(this);
+ QualType Self = Context.getCanonicalTagType(this);
int FD;
SmallString<128> Filename;
diff --git a/clang/lib/AST/ItaniumCXXABI.cpp b/clang/lib/AST/ItaniumCXXABI.cpp
index 6ceedd6..43a8bcd 100644
--- a/clang/lib/AST/ItaniumCXXABI.cpp
+++ b/clang/lib/AST/ItaniumCXXABI.cpp
@@ -42,10 +42,10 @@ namespace {
///
/// Returns the name of anonymous union VarDecl or nullptr if it is not found.
static const IdentifierInfo *findAnonymousUnionVarDeclName(const VarDecl& VD) {
- const RecordType *RT = VD.getType()->getAs<RecordType>();
- assert(RT && "type of VarDecl is expected to be RecordType.");
- assert(RT->getDecl()->isUnion() && "RecordType is expected to be a union.");
- if (const FieldDecl *FD = RT->getDecl()->findFirstNamedDataMember()) {
+ const auto *RT = VD.getType()->castAs<RecordType>();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ assert(RD->isUnion() && "RecordType is expected to be a union.");
+ if (const FieldDecl *FD = RD->findFirstNamedDataMember()) {
return FD->getIdentifier();
}
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 5233648..a7380a1 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -463,9 +463,7 @@ public:
void mangleVendorType(StringRef Name);
private:
-
bool mangleSubstitution(const NamedDecl *ND);
- bool mangleSubstitution(NestedNameSpecifier *NNS);
bool mangleSubstitution(QualType T);
bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
@@ -479,21 +477,15 @@ private:
addSubstitution(reinterpret_cast<uintptr_t>(ND));
}
- void addSubstitution(NestedNameSpecifier *NNS) {
- NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
-
- addSubstitution(reinterpret_cast<uintptr_t>(NNS));
- }
void addSubstitution(QualType T);
void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
// Destructive copy substitutions from other mangler.
void extendSubstitutions(CXXNameMangler* Other);
- void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ void mangleUnresolvedPrefix(NestedNameSpecifier Qualifier,
bool recursive = false);
- void mangleUnresolvedName(NestedNameSpecifier *qualifier,
- DeclarationName name,
+ void mangleUnresolvedName(NestedNameSpecifier Qualifier, DeclarationName name,
const TemplateArgumentLoc *TemplateArgs,
unsigned NumTemplateArgs,
unsigned KnownArity = UnknownArity);
@@ -542,7 +534,7 @@ private:
void mangleNestedNameWithClosurePrefix(GlobalDecl GD,
const NamedDecl *PrefixND,
const AbiTagList *AdditionalAbiTags);
- void manglePrefix(NestedNameSpecifier *qualifier);
+ void manglePrefix(NestedNameSpecifier Qualifier);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void manglePrefix(QualType type);
void mangleTemplatePrefix(GlobalDecl GD, bool NoFunction=false);
@@ -588,12 +580,10 @@ private:
void mangleMemberExprBase(const Expr *base, bool isArrow);
void mangleMemberExpr(const Expr *base, bool isArrow,
- NestedNameSpecifier *qualifier,
- NamedDecl *firstQualifierLookup,
- DeclarationName name,
+ NestedNameSpecifier Qualifier,
+ NamedDecl *firstQualifierLookup, DeclarationName name,
const TemplateArgumentLoc *TemplateArgs,
- unsigned NumTemplateArgs,
- unsigned knownArity);
+ unsigned NumTemplateArgs, unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
void mangleRequirement(SourceLocation RequiresExprLoc,
@@ -1334,6 +1324,21 @@ void CXXNameMangler::manglePrefix(QualType type) {
mangleTemplateArgs(Template, DTST->template_arguments());
addSubstitution(QualType(DTST, 0));
}
+ } else if (const auto *DNT = type->getAs<DependentNameType>()) {
+ // Clang 14 and before did not consider this substitutable.
+ bool Clang14Compat = isCompatibleWith(LangOptions::ClangABI::Ver14);
+ if (!Clang14Compat && mangleSubstitution(QualType(DNT, 0)))
+ return;
+
+ // Member expressions can have these without prefixes, but that
+ // should end up in mangleUnresolvedPrefix instead.
+ assert(DNT->getQualifier());
+ manglePrefix(DNT->getQualifier());
+
+ mangleSourceName(DNT->getIdentifier());
+
+ if (!Clang14Compat)
+ addSubstitution(QualType(DNT, 0));
} else {
// We use the QualType mangle type variant here because it handles
// substitutions.
@@ -1345,7 +1350,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
///
/// \param recursive - true if this is being called recursively,
/// i.e. if there is more prefix "to the right".
-void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier Qualifier,
bool recursive) {
// x, ::x
@@ -1362,8 +1367,11 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
// <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E
// <base-unresolved-name>
- switch (qualifier->getKind()) {
- case NestedNameSpecifier::Global:
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
+
+ case NestedNameSpecifier::Kind::Global:
Out << "gs";
// We want an 'sr' unless this is the entire NNS.
@@ -1373,27 +1381,29 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
// We never want an 'E' here.
return;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
llvm_unreachable("Can't mangle __super specifier");
- case NestedNameSpecifier::Namespace:
- if (qualifier->getPrefix())
- mangleUnresolvedPrefix(qualifier->getPrefix(),
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = Qualifier.getAsNamespaceAndPrefix();
+ if (Prefix)
+ mangleUnresolvedPrefix(Prefix,
/*recursive*/ true);
else
Out << "sr";
- mangleSourceNameWithAbiTags(qualifier->getAsNamespace());
+ mangleSourceNameWithAbiTags(Namespace);
break;
+ }
- case NestedNameSpecifier::TypeSpec: {
- const Type *type = qualifier->getAsType();
+ case NestedNameSpecifier::Kind::Type: {
+ const Type *type = Qualifier.getAsType();
// We only want to use an unresolved-type encoding if this is one of:
// - a decltype
// - a template type parameter
// - a template template parameter with arguments
// In all of these cases, we should have no prefix.
- if (NestedNameSpecifier *Prefix = qualifier->getPrefix()) {
+ if (NestedNameSpecifier Prefix = type->getPrefix()) {
mangleUnresolvedPrefix(Prefix,
/*recursive=*/true);
} else {
@@ -1406,18 +1416,6 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
break;
}
-
- case NestedNameSpecifier::Identifier:
- // Member expressions can have these without prefixes.
- if (qualifier->getPrefix())
- mangleUnresolvedPrefix(qualifier->getPrefix(),
- /*recursive*/ true);
- else
- Out << "sr";
-
- mangleSourceName(qualifier->getAsIdentifier());
- // An Identifier has no type information, so we can't emit abi tags for it.
- break;
}
// If this was the innermost part of the NNS, and we fell out to
@@ -1429,10 +1427,11 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
/// Mangle an unresolved-name, which is generally used for names which
/// weren't resolved to specific entities.
void CXXNameMangler::mangleUnresolvedName(
- NestedNameSpecifier *qualifier, DeclarationName name,
+ NestedNameSpecifier Qualifier, DeclarationName name,
const TemplateArgumentLoc *TemplateArgs, unsigned NumTemplateArgs,
unsigned knownArity) {
- if (qualifier) mangleUnresolvedPrefix(qualifier);
+ if (Qualifier)
+ mangleUnresolvedPrefix(Qualifier);
switch (name.getNameKind()) {
// <base-unresolved-name> ::= <simple-id>
case DeclarationName::Identifier:
@@ -1581,7 +1580,10 @@ void CXXNameMangler::mangleUnqualifiedName(
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
- const RecordDecl *RD = VD->getType()->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD = VD->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// Itanium C++ ABI 5.1.2:
//
@@ -2167,49 +2169,22 @@ void CXXNameMangler::mangleLambdaSig(const CXXRecordDecl *Lambda) {
Lambda->getLambdaStaticInvoker());
}
-void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
- switch (qualifier->getKind()) {
- case NestedNameSpecifier::Global:
+void CXXNameMangler::manglePrefix(NestedNameSpecifier Qualifier) {
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
// nothing
return;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
llvm_unreachable("Can't mangle __super specifier");
- case NestedNameSpecifier::Namespace:
- mangleName(qualifier->getAsNamespace()->getNamespace());
+ case NestedNameSpecifier::Kind::Namespace:
+ mangleName(Qualifier.getAsNamespaceAndPrefix().Namespace->getNamespace());
return;
- case NestedNameSpecifier::TypeSpec:
- if (NestedNameSpecifier *Prefix = qualifier->getPrefix()) {
- const auto *DTST =
- cast<DependentTemplateSpecializationType>(qualifier->getAsType());
- QualType NewT = getASTContext().getDependentTemplateSpecializationType(
- DTST->getKeyword(),
- {Prefix, DTST->getDependentTemplateName().getName(),
- /*HasTemplateKeyword=*/true},
- DTST->template_arguments(), /*IsCanonical=*/true);
- manglePrefix(NewT);
- return;
- }
- manglePrefix(QualType(qualifier->getAsType(), 0));
- return;
-
- case NestedNameSpecifier::Identifier:
- // Clang 14 and before did not consider this substitutable.
- bool Clang14Compat = isCompatibleWith(LangOptions::ClangABI::Ver14);
- if (!Clang14Compat && mangleSubstitution(qualifier))
- return;
-
- // Member expressions can have these without prefixes, but that
- // should end up in mangleUnresolvedPrefix instead.
- assert(qualifier->getPrefix());
- manglePrefix(qualifier->getPrefix());
-
- mangleSourceName(qualifier->getAsIdentifier());
-
- if (!Clang14Compat)
- addSubstitution(qualifier);
+ case NestedNameSpecifier::Kind::Type:
+ manglePrefix(QualType(Qualifier.getAsType(), 0));
return;
}
@@ -2269,8 +2244,7 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
if (!Clang11Compat && mangleSubstitution(Template))
return;
- if (NestedNameSpecifier *Qualifier = Dependent->getQualifier())
- manglePrefix(Qualifier);
+ manglePrefix(Dependent->getQualifier());
if (Clang11Compat && mangleSubstitution(Template))
return;
@@ -2525,7 +2499,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Enum:
case Type::Record:
- mangleSourceNameWithAbiTags(cast<TagType>(Ty)->getDecl());
+ mangleSourceNameWithAbiTags(
+ cast<TagType>(Ty)->getOriginalDecl()->getDefinitionOrSelf());
break;
case Type::TemplateSpecialization: {
@@ -2586,8 +2561,9 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
}
case Type::InjectedClassName:
- mangleSourceNameWithAbiTags(
- cast<InjectedClassNameType>(Ty)->getDecl());
+ mangleSourceNameWithAbiTags(cast<InjectedClassNameType>(Ty)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf());
break;
case Type::DependentName:
@@ -2608,9 +2584,6 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Using:
return mangleUnresolvedTypeOrSimpleId(cast<UsingType>(Ty)->desugar(),
Prefix);
- case Type::Elaborated:
- return mangleUnresolvedTypeOrSimpleId(
- cast<ElaboratedType>(Ty)->getNamedType(), Prefix);
}
return false;
@@ -3838,7 +3811,7 @@ void CXXNameMangler::mangleType(const RecordType *T) {
mangleType(static_cast<const TagType*>(T));
}
void CXXNameMangler::mangleType(const TagType *T) {
- mangleName(T->getDecl());
+ mangleName(T->getOriginalDecl()->getDefinitionOrSelf());
}
// <type> ::= <array-type>
@@ -3875,16 +3848,10 @@ void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
// <pointer-to-member-type> ::= M <class type> <member type>
void CXXNameMangler::mangleType(const MemberPointerType *T) {
Out << 'M';
- if (auto *RD = T->getMostRecentCXXRecordDecl()) {
+ if (auto *RD = T->getMostRecentCXXRecordDecl())
mangleCXXRecordDecl(RD);
- } else {
- NestedNameSpecifier *NNS = T->getQualifier();
- if (auto *II = NNS->getAsIdentifier())
- mangleType(getASTContext().getDependentNameType(
- ElaboratedTypeKeyword::None, NNS->getPrefix(), II));
- else
- manglePrefix(NNS);
- }
+ else
+ mangleType(QualType(T->getQualifier().getAsType(), 0));
QualType PointeeType = T->getPointeeType();
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleType(FPT);
@@ -4471,7 +4438,8 @@ void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
// Mangle injected class name types as if the user had written the
// specialization out fully. It may not actually be possible to see
// this mangling, though.
- mangleType(T->getInjectedSpecializationType());
+ mangleType(T->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ getASTContext()));
}
void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
@@ -4747,7 +4715,7 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) {
// Ignore member expressions involving anonymous unions.
while (const auto *RT = Base->getType()->getAs<RecordType>()) {
- if (!RT->getDecl()->isAnonymousStructOrUnion())
+ if (!RT->getOriginalDecl()->isAnonymousStructOrUnion())
break;
const auto *ME = dyn_cast<MemberExpr>(Base);
if (!ME)
@@ -4768,9 +4736,8 @@ void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) {
}
/// Mangles a member expression.
-void CXXNameMangler::mangleMemberExpr(const Expr *base,
- bool isArrow,
- NestedNameSpecifier *qualifier,
+void CXXNameMangler::mangleMemberExpr(const Expr *base, bool isArrow,
+ NestedNameSpecifier Qualifier,
NamedDecl *firstQualifierLookup,
DeclarationName member,
const TemplateArgumentLoc *TemplateArgs,
@@ -4780,7 +4747,7 @@ void CXXNameMangler::mangleMemberExpr(const Expr *base,
// ::= pt <expression> <unresolved-name>
if (base)
mangleMemberExprBase(base, isArrow);
- mangleUnresolvedName(qualifier, member, TemplateArgs, NumTemplateArgs, arity);
+ mangleUnresolvedName(Qualifier, member, TemplateArgs, NumTemplateArgs, arity);
}
/// Look at the callee of the given call expression and determine if
@@ -5230,7 +5197,7 @@ recurse:
const auto *PDE = cast<CXXPseudoDestructorExpr>(E);
if (const Expr *Base = PDE->getBase())
mangleMemberExprBase(Base, PDE->isArrow());
- NestedNameSpecifier *Qualifier = PDE->getQualifier();
+ NestedNameSpecifier Qualifier = PDE->getQualifier();
if (TypeSourceInfo *ScopeInfo = PDE->getScopeTypeInfo()) {
if (Qualifier) {
mangleUnresolvedPrefix(Qualifier,
@@ -5855,7 +5822,8 @@ recurse:
// externally-visible declaration, so there's no standard mangling for
// this, but mangling as a literal of the closure type seems reasonable.
Out << "L";
- mangleType(Context.getASTContext().getRecordType(cast<LambdaExpr>(E)->getLambdaClass()));
+ mangleType(Context.getASTContext().getCanonicalTagType(
+ cast<LambdaExpr>(E)->getLambdaClass()));
Out << "E";
break;
}
@@ -6528,7 +6496,7 @@ static QualType getLValueType(ASTContext &Ctx, const APValue &LV) {
dyn_cast<FieldDecl>(E.getAsBaseOrMember().getPointer()))
T = FD->getType();
else
- T = Ctx.getRecordType(
+ T = Ctx.getCanonicalTagType(
cast<CXXRecordDecl>(E.getAsBaseOrMember().getPointer()));
}
return T;
@@ -6895,7 +6863,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
}
TypeSoFar = FD->getType();
} else {
- TypeSoFar = Ctx.getRecordType(cast<CXXRecordDecl>(D));
+ TypeSoFar = Ctx.getCanonicalTagType(cast<CXXRecordDecl>(D));
}
}
}
@@ -7005,14 +6973,6 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
}
-bool CXXNameMangler::mangleSubstitution(NestedNameSpecifier *NNS) {
- assert(NNS->getKind() == NestedNameSpecifier::Identifier &&
- "mangleSubstitution(NestedNameSpecifier *) is only used for "
- "identifier nested name specifiers.");
- NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
- return mangleSubstitution(reinterpret_cast<uintptr_t>(NNS));
-}
-
/// Determine whether the given type has any qualifiers that are relevant for
/// substitutions.
static bool hasMangledSubstitutionQualifiers(QualType T) {
@@ -7023,7 +6983,7 @@ static bool hasMangledSubstitutionQualifiers(QualType T) {
bool CXXNameMangler::mangleSubstitution(QualType T) {
if (!hasMangledSubstitutionQualifiers(T)) {
if (const RecordType *RT = T->getAs<RecordType>())
- return mangleSubstitution(RT->getDecl());
+ return mangleSubstitution(RT->getOriginalDecl()->getDefinitionOrSelf());
}
uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
@@ -7064,7 +7024,7 @@ bool CXXNameMangler::isSpecializedAs(QualType S, llvm::StringRef Name,
return false;
const ClassTemplateSpecializationDecl *SD =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
if (!SD || !SD->getIdentifier()->isStr(Name))
return false;
@@ -7194,7 +7154,7 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
void CXXNameMangler::addSubstitution(QualType T) {
if (!hasMangledSubstitutionQualifiers(T)) {
if (const RecordType *RT = T->getAs<RecordType>()) {
- addSubstitution(RT->getDecl());
+ addSubstitution(RT->getOriginalDecl()->getDefinitionOrSelf());
return;
}
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 64ddb1e..b3f12a1 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -395,8 +395,8 @@ llvm::json::Array JSONNodeDumper::createCastPath(const CastExpr *C) {
for (auto I = C->path_begin(), E = C->path_end(); I != E; ++I) {
const CXXBaseSpecifier *Base = *I;
- const auto *RD =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ const auto *RD = cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl());
llvm::json::Object Val{{"name", RD->getName()}};
if (Base->isVirtual())
@@ -606,9 +606,8 @@ void JSONNodeDumper::VisitTypedefType(const TypedefType *TT) {
}
void JSONNodeDumper::VisitUsingType(const UsingType *TT) {
- JOS.attribute("decl", createBareDeclRef(TT->getFoundDecl()));
- if (!TT->typeMatchesDecl())
- JOS.attribute("type", createQualType(TT->desugar()));
+ JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+ JOS.attribute("type", createQualType(TT->desugar()));
}
void JSONNodeDumper::VisitFunctionType(const FunctionType *T) {
@@ -759,7 +758,15 @@ void JSONNodeDumper::VisitUnaryTransformType(const UnaryTransformType *UTT) {
}
void JSONNodeDumper::VisitTagType(const TagType *TT) {
- JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+ if (NestedNameSpecifier Qualifier = TT->getQualifier()) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ Qualifier.print(OS, PrintPolicy, /*ResolveTemplateArguments=*/true);
+ JOS.attribute("qualifier", Str);
+ }
+ JOS.attribute("decl", createBareDeclRef(TT->getOriginalDecl()));
+ if (TT->isTagOwned())
+ JOS.attribute("isTagOwned", true);
}
void JSONNodeDumper::VisitTemplateTypeParmType(
@@ -809,7 +816,7 @@ void JSONNodeDumper::VisitTemplateSpecializationType(
void JSONNodeDumper::VisitInjectedClassNameType(
const InjectedClassNameType *ICNT) {
- JOS.attribute("decl", createBareDeclRef(ICNT->getDecl()));
+ JOS.attribute("decl", createBareDeclRef(ICNT->getOriginalDecl()));
}
void JSONNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *OIT) {
@@ -821,17 +828,6 @@ void JSONNodeDumper::VisitPackExpansionType(const PackExpansionType *PET) {
JOS.attribute("numExpansions", *N);
}
-void JSONNodeDumper::VisitElaboratedType(const ElaboratedType *ET) {
- if (const NestedNameSpecifier *NNS = ET->getQualifier()) {
- std::string Str;
- llvm::raw_string_ostream OS(Str);
- NNS->print(OS, PrintPolicy, /*ResolveTemplateArgs*/ true);
- JOS.attribute("qualifier", Str);
- }
- if (const TagDecl *TD = ET->getOwnedTagDecl())
- JOS.attribute("ownedTagDecl", createBareDeclRef(TD));
-}
-
void JSONNodeDumper::VisitMacroQualifiedType(const MacroQualifiedType *MQT) {
JOS.attribute("macroName", MQT->getMacroIdentifier()->getName());
}
@@ -902,9 +898,9 @@ void JSONNodeDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *NAD) {
void JSONNodeDumper::VisitUsingDecl(const UsingDecl *UD) {
std::string Name;
- if (const NestedNameSpecifier *NNS = UD->getQualifier()) {
+ if (NestedNameSpecifier Qualifier = UD->getQualifier()) {
llvm::raw_string_ostream SOS(Name);
- NNS->print(SOS, UD->getASTContext().getPrintingPolicy());
+ Qualifier.print(SOS, UD->getASTContext().getPrintingPolicy());
}
Name += UD->getNameAsString();
JOS.attribute("name", Name);
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index e6ea0ad..241c7c3 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -1805,17 +1805,16 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
case TemplateArgument::Declaration: {
const NamedDecl *ND = TA.getAsDecl();
if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
- mangleMemberDataPointer(cast<CXXRecordDecl>(ND->getDeclContext())
- ->getMostRecentNonInjectedDecl(),
- cast<ValueDecl>(ND),
- cast<NonTypeTemplateParmDecl>(Parm),
- TA.getParamTypeForDecl());
+ mangleMemberDataPointer(
+ cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
+ cast<ValueDecl>(ND), cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
- mangleMemberFunctionPointer(
- MD->getParent()->getMostRecentNonInjectedDecl(), MD,
- cast<NonTypeTemplateParmDecl>(Parm), TA.getParamTypeForDecl());
+ mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD,
+ cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
} else {
mangleFunctionPointer(FD, cast<NonTypeTemplateParmDecl>(Parm),
TA.getParamTypeForDecl());
@@ -2021,7 +2020,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
if (RD->isAnonymousStructOrUnion())
continue;
} else {
- ET = getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ ET = getASTContext().getCanonicalTagType(cast<CXXRecordDecl>(D));
// Bug in MSVC: fully qualified name of base class should be used for
// mangling to prevent collisions e.g. on base classes with same names
// in different namespaces.
@@ -3247,11 +3246,11 @@ void MicrosoftCXXNameMangler::mangleTagTypeKind(TagTypeKind TTK) {
}
void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
SourceRange) {
- mangleType(cast<TagType>(T)->getDecl());
+ mangleType(cast<TagType>(T)->getOriginalDecl()->getDefinitionOrSelf());
}
void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers,
SourceRange) {
- mangleType(cast<TagType>(T)->getDecl());
+ mangleType(cast<TagType>(T)->getOriginalDecl()->getDefinitionOrSelf());
}
void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
mangleTagTypeKind(TD->getTagKind());
diff --git a/clang/lib/AST/NestedNameSpecifier.cpp b/clang/lib/AST/NestedNameSpecifier.cpp
index 56f74b9..c6af91f 100644
--- a/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/clang/lib/AST/NestedNameSpecifier.cpp
@@ -15,7 +15,6 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
@@ -35,250 +34,67 @@
using namespace clang;
-NestedNameSpecifier *
-NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
- const NestedNameSpecifier &Mockup) {
+const NamespaceAndPrefixStorage *
+NestedNameSpecifier::MakeNamespaceAndPrefixStorage(
+ const ASTContext &Ctx, const NamespaceBaseDecl *Namespace,
+ NestedNameSpecifier Prefix) {
llvm::FoldingSetNodeID ID;
- Mockup.Profile(ID);
+ NamespaceAndPrefixStorage::Profile(ID, Namespace, Prefix);
void *InsertPos = nullptr;
- NestedNameSpecifier *NNS
- = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
- if (!NNS) {
- NNS =
- new (Context, alignof(NestedNameSpecifier)) NestedNameSpecifier(Mockup);
- Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
+ NamespaceAndPrefixStorage *S =
+ Ctx.NamespaceAndPrefixStorages.FindNodeOrInsertPos(ID, InsertPos);
+ if (!S) {
+ S = new (Ctx, alignof(NamespaceAndPrefixStorage))
+ NamespaceAndPrefixStorage(Namespace, Prefix);
+ Ctx.NamespaceAndPrefixStorages.InsertNode(S, InsertPos);
}
-
- return NNS;
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const IdentifierInfo *II) {
- assert(II && "Identifier cannot be NULL");
- assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
-
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredIdentifier);
- Mockup.Specifier = const_cast<IdentifierInfo *>(II);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const NamespaceBaseDecl *NS) {
- assert(NS && "Namespace cannot be NULL");
- assert((!Prefix ||
- (Prefix->getAsType() == nullptr &&
- Prefix->getAsIdentifier() == nullptr)) &&
- "Broken nested name specifier");
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredDecl);
- Mockup.Specifier = const_cast<NamespaceBaseDecl *>(NS);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix,
- const Type *T) {
- assert(T && "Type cannot be NULL");
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(Prefix);
- Mockup.Prefix.setInt(StoredTypeSpec);
- Mockup.Specifier = const_cast<Type*>(T);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
- const IdentifierInfo *II) {
- assert(II && "Identifier cannot be NULL");
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(nullptr);
- Mockup.Prefix.setInt(StoredIdentifier);
- Mockup.Specifier = const_cast<IdentifierInfo *>(II);
- return FindOrInsert(Context, Mockup);
-}
-
-NestedNameSpecifier *
-NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
- if (!Context.GlobalNestedNameSpecifier)
- Context.GlobalNestedNameSpecifier =
- new (Context, alignof(NestedNameSpecifier)) NestedNameSpecifier();
- return Context.GlobalNestedNameSpecifier;
-}
-
-NestedNameSpecifier *
-NestedNameSpecifier::SuperSpecifier(const ASTContext &Context,
- CXXRecordDecl *RD) {
- NestedNameSpecifier Mockup;
- Mockup.Prefix.setPointer(nullptr);
- Mockup.Prefix.setInt(StoredDecl);
- Mockup.Specifier = RD;
- return FindOrInsert(Context, Mockup);
+ return S;
}
-NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
- if (!Specifier)
- return Global;
-
- switch (Prefix.getInt()) {
- case StoredIdentifier:
- return Identifier;
-
- case StoredDecl: {
- NamedDecl *ND = static_cast<NamedDecl *>(Specifier);
- return isa<CXXRecordDecl>(ND) ? Super : Namespace;
- }
-
- case StoredTypeSpec:
- return TypeSpec;
- }
-
- llvm_unreachable("Invalid NNS Kind!");
-}
-
-/// Retrieve the namespace or namespace alias stored in this nested name
-/// specifier.
-NamespaceBaseDecl *NestedNameSpecifier::getAsNamespace() const {
- if (Prefix.getInt() == StoredDecl)
- return dyn_cast<NamespaceBaseDecl>(static_cast<NamedDecl *>(Specifier));
-
- return nullptr;
-}
-
-/// Retrieve the record declaration stored in this nested name specifier.
-CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
- switch (Prefix.getInt()) {
- case StoredIdentifier:
- return nullptr;
-
- case StoredDecl:
- return dyn_cast<CXXRecordDecl>(static_cast<NamedDecl *>(Specifier));
-
- case StoredTypeSpec:
- return getAsType()->getAsCXXRecordDecl();
+bool NestedNameSpecifier::isFullyQualified() const {
+ switch (getKind()) {
+ case NestedNameSpecifier::Kind::Global:
+ return true;
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return false;
+ case NestedNameSpecifier::Kind::Namespace:
+ return getAsNamespaceAndPrefix().Prefix.isFullyQualified();
+ case NestedNameSpecifier::Kind::Type:
+ return getAsType()->getPrefix().isFullyQualified();
}
-
llvm_unreachable("Invalid NNS Kind!");
}
NestedNameSpecifierDependence NestedNameSpecifier::getDependence() const {
switch (getKind()) {
- case Identifier: {
- // Identifier specifiers always represent dependent types
- auto F = NestedNameSpecifierDependence::Dependent |
- NestedNameSpecifierDependence::Instantiation;
- // Prefix can contain unexpanded template parameters.
- if (getPrefix())
- return F | getPrefix()->getDependence();
- return F;
- }
-
- case Namespace:
- case Global:
- return NestedNameSpecifierDependence::None;
-
- case Super: {
- CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
- for (const auto &Base : RD->bases())
- if (Base.getType()->isDependentType())
- // FIXME: must also be instantiation-dependent.
- return NestedNameSpecifierDependence::Dependent;
+ case Kind::Null:
+ case Kind::Global:
+ case Kind::Namespace:
return NestedNameSpecifierDependence::None;
+ case Kind::MicrosoftSuper: {
+ CXXRecordDecl *RD = getAsMicrosoftSuper();
+ return RD->isDependentContext()
+ ? NestedNameSpecifierDependence::DependentInstantiation |
+ NestedNameSpecifierDependence::Dependent
+ : NestedNameSpecifierDependence::None;
}
-
- case TypeSpec: {
- NestedNameSpecifierDependence Dep =
- toNestedNameSpecifierDependendence(getAsType()->getDependence());
- if (NestedNameSpecifier *Prefix = getPrefix())
- Dep |=
- Prefix->getDependence() & ~NestedNameSpecifierDependence::Dependent;
- return Dep;
- }
+ case Kind::Type:
+ return toNestedNameSpecifierDependence(getAsType()->getDependence());
}
llvm_unreachable("Invalid NNS Kind!");
}
-bool NestedNameSpecifier::isDependent() const {
- return getDependence() & NestedNameSpecifierDependence::Dependent;
-}
-
-bool NestedNameSpecifier::isInstantiationDependent() const {
- return getDependence() & NestedNameSpecifierDependence::Instantiation;
-}
-
-bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
- return getDependence() & NestedNameSpecifierDependence::UnexpandedPack;
-}
-
-bool NestedNameSpecifier::containsErrors() const {
- return getDependence() & NestedNameSpecifierDependence::Error;
-}
-
-const Type *
-NestedNameSpecifier::translateToType(const ASTContext &Context) const {
- NestedNameSpecifier *Prefix = getPrefix();
- switch (getKind()) {
- case SpecifierKind::Identifier:
- return Context
- .getDependentNameType(ElaboratedTypeKeyword::None, Prefix,
- getAsIdentifier())
- .getTypePtr();
- case SpecifierKind::TypeSpec: {
- const Type *T = getAsType();
- switch (T->getTypeClass()) {
- case Type::DependentTemplateSpecialization: {
- const auto *DT = cast<DependentTemplateSpecializationType>(T);
- const DependentTemplateStorage &DTN = DT->getDependentTemplateName();
- return Context
- .getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword::None,
- {Prefix, DTN.getName(), DTN.hasTemplateKeyword()},
- DT->template_arguments())
- .getTypePtr();
- }
- case Type::Record:
- case Type::TemplateSpecialization:
- case Type::Using:
- case Type::Enum:
- case Type::Typedef:
- case Type::UnresolvedUsing:
- return Context
- .getElaboratedType(ElaboratedTypeKeyword::None, Prefix,
- QualType(T, 0))
- .getTypePtr();
- default:
- assert(Prefix == nullptr && "unexpected type with elaboration");
- return T;
- }
- }
- case SpecifierKind::Global:
- case SpecifierKind::Namespace:
- case SpecifierKind::Super:
- // These are not representable as types.
- return nullptr;
- }
- llvm_unreachable("Unhandled SpecifierKind enum");
-}
-
/// Print this nested name specifier to the given output
/// stream.
void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
bool ResolveTemplateArguments,
bool PrintFinalScopeResOp) const {
- if (getPrefix())
- getPrefix()->print(OS, Policy);
-
switch (getKind()) {
- case Identifier:
- OS << getAsIdentifier()->getName();
- break;
-
- case Namespace: {
- NamespaceBaseDecl *Namespace = getAsNamespace();
+ case Kind::Namespace: {
+ auto [Namespace, Prefix] = getAsNamespaceAndPrefix();
+ Prefix.print(OS, Policy);
if (const auto *NS = dyn_cast<NamespaceDecl>(Namespace)) {
assert(!NS->isAnonymousNamespace());
OS << NS->getName();
@@ -287,134 +103,49 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
}
break;
}
-
- case Global:
+ case Kind::Global:
OS << "::";
return;
-
- case Super:
+ case Kind::MicrosoftSuper:
OS << "__super";
break;
-
- case TypeSpec: {
+ case Kind::Type: {
PrintingPolicy InnerPolicy(Policy);
- InnerPolicy.SuppressScope = true;
InnerPolicy.SuppressTagKeyword = true;
QualType(getAsType(), 0).print(OS, InnerPolicy);
break;
}
+ case Kind::Null:
+ return;
}
-
if (PrintFinalScopeResOp)
OS << "::";
}
-LLVM_DUMP_METHOD void NestedNameSpecifier::dump(const LangOptions &LO) const {
- dump(llvm::errs(), LO);
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream *OS,
+ const LangOptions *LO) const {
+ print(OS ? *OS : llvm::errs(), LO ? *LO : LangOptions());
}
-LLVM_DUMP_METHOD void NestedNameSpecifier::dump() const { dump(llvm::errs()); }
-
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(const LangOptions &LO) const {
+ dump(/*OS=*/nullptr, &LO);
+}
LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS) const {
- LangOptions LO;
- dump(OS, LO);
+ dump(&OS);
}
-
LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS,
const LangOptions &LO) const {
- print(OS, PrintingPolicy(LO));
-}
-
-unsigned
-NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
- assert(Qualifier && "Expected a non-NULL qualifier");
-
- // Location of the trailing '::'.
- unsigned Length = sizeof(SourceLocation::UIntTy);
-
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- // Nothing more to add.
- break;
-
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Super:
- // The location of the identifier or namespace name.
- Length += sizeof(SourceLocation::UIntTy);
- break;
-
- case NestedNameSpecifier::TypeSpec:
- // The "void*" that points at the TypeLoc data.
- // Note: the 'template' keyword is part of the TypeLoc.
- Length += sizeof(void *);
- break;
- }
-
- return Length;
-}
-
-unsigned
-NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
- unsigned Length = 0;
- for (; Qualifier; Qualifier = Qualifier->getPrefix())
- Length += getLocalDataLength(Qualifier);
- return Length;
-}
-
-/// Load a (possibly unaligned) source location from a given address
-/// and offset.
-static SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
- SourceLocation::UIntTy Raw;
- memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(Raw));
- return SourceLocation::getFromRawEncoding(Raw);
+ dump(&OS, &LO);
}
-/// Load a (possibly unaligned) pointer from a given address and
-/// offset.
-static void *LoadPointer(void *Data, unsigned Offset) {
- void *Result;
- memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*));
- return Result;
-}
-
-SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+SourceLocation NestedNameSpecifierLoc::getBeginLoc() const {
if (!Qualifier)
- return SourceRange();
-
- unsigned Offset = getDataLength(Qualifier->getPrefix());
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- return LoadSourceLocation(Data, Offset);
-
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Super:
- return SourceRange(
- LoadSourceLocation(Data, Offset),
- LoadSourceLocation(Data, Offset + sizeof(SourceLocation::UIntTy)));
-
- case NestedNameSpecifier::TypeSpec: {
- // The "void*" that points at the TypeLoc data.
- // Note: the 'template' keyword is part of the TypeLoc.
- void *TypeData = LoadPointer(Data, Offset);
- TypeLoc TL(Qualifier->getAsType(), TypeData);
- return SourceRange(TL.getBeginLoc(),
- LoadSourceLocation(Data, Offset + sizeof(void*)));
- }
- }
+ return SourceLocation();
- llvm_unreachable("Invalid NNS Kind!");
-}
-
-TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
- if (Qualifier->getKind() != NestedNameSpecifier::TypeSpec)
- return TypeLoc();
-
- // The "void*" that points at the TypeLoc data.
- unsigned Offset = getDataLength(Qualifier->getPrefix());
- void *TypeData = LoadPointer(Data, Offset);
- return TypeLoc(Qualifier->getAsType(), TypeData);
+ NestedNameSpecifierLoc First = *this;
+ while (NestedNameSpecifierLoc Prefix = First.getAsNamespaceAndPrefix().Prefix)
+ First = Prefix;
+ return First.getLocalSourceRange().getBegin();
}
static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
@@ -516,10 +247,10 @@ operator=(const NestedNameSpecifierLocBuilder &Other) {
return *this;
}
-void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, TypeLoc TL,
- SourceLocation ColonColonLoc) {
- Representation =
- NestedNameSpecifier::Create(Context, Representation, TL.getTypePtr());
+void NestedNameSpecifierLocBuilder::Make(ASTContext &Context, TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ assert(!Representation);
+ Representation = NestedNameSpecifier(TL.getTypePtr());
// Push source-location info into the buffer.
SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity);
@@ -527,23 +258,10 @@ void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, TypeLoc TL,
}
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
- IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation ColonColonLoc) {
- Representation = NestedNameSpecifier::Create(Context, Representation,
- Identifier);
-
- // Push source-location info into the buffer.
- SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity);
- SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
-}
-
-void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
- NamespaceBaseDecl *Namespace,
+ const NamespaceBaseDecl *Namespace,
SourceLocation NamespaceLoc,
SourceLocation ColonColonLoc) {
- Representation = NestedNameSpecifier::Create(Context, Representation,
- Namespace);
+ Representation = NestedNameSpecifier(Context, Namespace, Representation);
// Push source-location info into the buffer.
SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity);
@@ -553,60 +271,48 @@ void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
SourceLocation ColonColonLoc) {
assert(!Representation && "Already have a nested-name-specifier!?");
- Representation = NestedNameSpecifier::GlobalSpecifier(Context);
+ Representation = NestedNameSpecifier::getGlobal();
// Push source-location info into the buffer.
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
-void NestedNameSpecifierLocBuilder::MakeSuper(ASTContext &Context,
- CXXRecordDecl *RD,
- SourceLocation SuperLoc,
- SourceLocation ColonColonLoc) {
- Representation = NestedNameSpecifier::SuperSpecifier(Context, RD);
+void NestedNameSpecifierLocBuilder::MakeMicrosoftSuper(
+ ASTContext &Context, CXXRecordDecl *RD, SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier(RD);
// Push source-location info into the buffer.
SaveSourceLocation(SuperLoc, Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
-void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
- NestedNameSpecifier *Qualifier,
+void NestedNameSpecifierLocBuilder::PushTrivial(ASTContext &Context,
+ NestedNameSpecifier Qualifier,
SourceRange R) {
- Representation = Qualifier;
-
// Construct bogus (but well-formed) source information for the
// nested-name-specifier.
- BufferSize = 0;
- SmallVector<NestedNameSpecifier *, 4> Stack;
- for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
- Stack.push_back(NNS);
- while (!Stack.empty()) {
- NestedNameSpecifier *NNS = Stack.pop_back_val();
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
- break;
-
- case NestedNameSpecifier::TypeSpec: {
- TypeSourceInfo *TSInfo
- = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
- R.getBegin());
- SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
- BufferCapacity);
- break;
- }
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- break;
- }
-
- // Save the location of the '::'.
- SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(),
- Buffer, BufferSize, BufferCapacity);
+ switch (Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ return;
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [_1, Prefix] = Qualifier.getAsNamespaceAndPrefix();
+ PushTrivial(Context, Prefix, R.getBegin());
+ SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
+ break;
+ }
+ case NestedNameSpecifier::Kind::Type: {
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(
+ QualType(Qualifier.getAsType(), 0), R.getBegin());
+ SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
+ BufferCapacity);
+ break;
+ }
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ break;
}
+ SaveSourceLocation(R.getEnd(), Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
@@ -614,7 +320,7 @@ void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
free(Buffer);
if (!Other) {
- Representation = nullptr;
+ Representation = std::nullopt;
BufferSize = 0;
return;
}
diff --git a/clang/lib/AST/ODRHash.cpp b/clang/lib/AST/ODRHash.cpp
index bd87d44..fb95f58 100644
--- a/clang/lib/AST/ODRHash.cpp
+++ b/clang/lib/AST/ODRHash.cpp
@@ -111,34 +111,28 @@ void ODRHash::AddDeclarationNameInfoImpl(DeclarationNameInfo NameInfo) {
}
}
-void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {
- assert(NNS && "Expecting non-null pointer.");
- const auto *Prefix = NNS->getPrefix();
- AddBoolean(Prefix);
- if (Prefix) {
- AddNestedNameSpecifier(Prefix);
- }
- auto Kind = NNS->getKind();
- ID.AddInteger(Kind);
+void ODRHash::AddNestedNameSpecifier(NestedNameSpecifier NNS) {
+ auto Kind = NNS.getKind();
+ ID.AddInteger(llvm::to_underlying(Kind));
switch (Kind) {
- case NestedNameSpecifier::Identifier:
- AddIdentifierInfo(NNS->getAsIdentifier());
- break;
- case NestedNameSpecifier::Namespace:
- AddDecl(NNS->getAsNamespace());
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
+ AddDecl(Namespace);
+ AddNestedNameSpecifier(Prefix);
break;
- case NestedNameSpecifier::TypeSpec:
- AddType(NNS->getAsType());
+ }
+ case NestedNameSpecifier::Kind::Type:
+ AddType(NNS.getAsType());
break;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
break;
}
}
void ODRHash::AddDependentTemplateName(const DependentTemplateStorage &Name) {
- if (NestedNameSpecifier *NNS = Name.getQualifier())
- AddNestedNameSpecifier(NNS);
+ AddNestedNameSpecifier(Name.getQualifier());
if (IdentifierOrOverloadedOperator IO = Name.getName();
const IdentifierInfo *II = IO.getIdentifier())
AddIdentifierInfo(II);
@@ -156,8 +150,7 @@ void ODRHash::AddTemplateName(TemplateName Name) {
break;
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName();
- if (NestedNameSpecifier *NNS = QTN->getQualifier())
- AddNestedNameSpecifier(NNS);
+ AddNestedNameSpecifier(QTN->getQualifier());
AddBoolean(QTN->hasTemplateKeyword());
AddTemplateName(QTN->getUnderlyingTemplate());
break;
@@ -889,11 +882,8 @@ public:
}
}
- void AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {
- Hash.AddBoolean(NNS);
- if (NNS) {
- Hash.AddNestedNameSpecifier(NNS);
- }
+ void AddNestedNameSpecifier(NestedNameSpecifier NNS) {
+ Hash.AddNestedNameSpecifier(NNS);
}
void AddIdentifierInfo(const IdentifierInfo *II) {
@@ -907,52 +897,33 @@ public:
ID.AddInteger(Quals.getAsOpaqueValue());
}
- // Return the RecordType if the typedef only strips away a keyword.
- // Otherwise, return the original type.
- static const Type *RemoveTypedef(const Type *T) {
+ // Handle typedefs which only strip away a keyword.
+ bool handleTypedef(const Type *T) {
const auto *TypedefT = dyn_cast<TypedefType>(T);
- if (!TypedefT) {
- return T;
- }
-
- const TypedefNameDecl *D = TypedefT->getDecl();
- QualType UnderlyingType = D->getUnderlyingType();
-
- if (UnderlyingType.hasLocalQualifiers()) {
- return T;
- }
-
- const auto *ElaboratedT = dyn_cast<ElaboratedType>(UnderlyingType);
- if (!ElaboratedT) {
- return T;
- }
+ if (!TypedefT)
+ return false;
- if (ElaboratedT->getQualifier() != nullptr) {
- return T;
- }
+ QualType UnderlyingType = TypedefT->desugar();
- QualType NamedType = ElaboratedT->getNamedType();
- if (NamedType.hasLocalQualifiers()) {
- return T;
- }
+ if (UnderlyingType.hasLocalQualifiers())
+ return false;
- const auto *RecordT = dyn_cast<RecordType>(NamedType);
- if (!RecordT) {
- return T;
- }
+ const auto *TagT = dyn_cast<TagType>(UnderlyingType);
+ if (!TagT || TagT->getQualifier())
+ return false;
- const IdentifierInfo *TypedefII = TypedefT->getDecl()->getIdentifier();
- const IdentifierInfo *RecordII = RecordT->getDecl()->getIdentifier();
- if (!TypedefII || !RecordII ||
- TypedefII->getName() != RecordII->getName()) {
- return T;
- }
+ if (TypedefT->getDecl()->getIdentifier() !=
+ TagT->getOriginalDecl()->getIdentifier())
+ return false;
- return RecordT;
+ ID.AddInteger(TagT->getTypeClass());
+ VisitTagType(TagT, /*ElaboratedOverride=*/TypedefT);
+ return true;
}
void Visit(const Type *T) {
- T = RemoveTypedef(T);
+ if (handleTypedef(T))
+ return;
ID.AddInteger(T->getTypeClass());
Inherited::Visit(T);
}
@@ -1088,7 +1059,7 @@ public:
}
void VisitInjectedClassNameType(const InjectedClassNameType *T) {
- AddDecl(T->getDecl());
+ AddDecl(T->getOriginalDecl()->getDefinitionOrSelf());
VisitType(T);
}
@@ -1186,14 +1157,17 @@ public:
VisitType(T);
}
- void VisitTagType(const TagType *T) {
- AddDecl(T->getDecl());
+ void VisitTagType(const TagType *T,
+ const TypedefType *ElaboratedOverride = nullptr) {
+ ID.AddInteger(llvm::to_underlying(
+ ElaboratedOverride ? ElaboratedTypeKeyword::None : T->getKeyword()));
+ AddNestedNameSpecifier(ElaboratedOverride
+ ? ElaboratedOverride->getQualifier()
+ : T->getQualifier());
+ AddDecl(T->getOriginalDecl()->getDefinitionOrSelf());
VisitType(T);
}
- void VisitRecordType(const RecordType *T) { VisitTagType(T); }
- void VisitEnumType(const EnumType *T) { VisitTagType(T); }
-
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
ID.AddInteger(T->template_arguments().size());
for (const auto &TA : T->template_arguments()) {
@@ -1211,6 +1185,8 @@ public:
}
void VisitTypedefType(const TypedefType *T) {
+ ID.AddInteger(llvm::to_underlying(T->getKeyword()));
+ AddNestedNameSpecifier(T->getQualifier());
AddDecl(T->getDecl());
VisitType(T);
}
@@ -1247,12 +1223,6 @@ public:
VisitTypeWithKeyword(T);
}
- void VisitElaboratedType(const ElaboratedType *T) {
- AddNestedNameSpecifier(T->getQualifier());
- AddQualType(T->getNamedType());
- VisitTypeWithKeyword(T);
- }
-
void VisitUnaryTransformType(const UnaryTransformType *T) {
AddQualType(T->getUnderlyingType());
AddQualType(T->getBaseType());
@@ -1330,7 +1300,7 @@ void ODRHash::AddStructuralValue(const APValue &Value) {
TypeSoFar = FD->getType();
} else {
TypeSoFar =
- D->getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ D->getASTContext().getCanonicalTagType(cast<CXXRecordDecl>(D));
}
}
}
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index de8b599..588b0dc 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -2350,17 +2350,16 @@ void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
if (Node->getModifierLoc().isValid())
OS << getOpenMPSimpleClauseTypeName(OMPC_reduction, Node->getModifier())
<< ", ";
- NestedNameSpecifier *QualifierLoc =
+ NestedNameSpecifier Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
+ if (!Qualifier && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
+ Qualifier.print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
@@ -2373,17 +2372,16 @@ void OMPClausePrinter::VisitOMPTaskReductionClause(
OMPTaskReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "task_reduction(";
- NestedNameSpecifier *QualifierLoc =
+ NestedNameSpecifier Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
+ if (!Qualifier && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
+ Qualifier.print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
@@ -2395,17 +2393,16 @@ void OMPClausePrinter::VisitOMPTaskReductionClause(
void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "in_reduction(";
- NestedNameSpecifier *QualifierLoc =
+ NestedNameSpecifier Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
+ if (!Qualifier && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
+ Qualifier.print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
@@ -2508,10 +2505,9 @@ template <typename T>
static void PrintMapper(raw_ostream &OS, T *Node,
const PrintingPolicy &Policy) {
OS << '(';
- NestedNameSpecifier *MapperNNS =
+ NestedNameSpecifier MapperNNS =
Node->getMapperQualifierLoc().getNestedNameSpecifier();
- if (MapperNNS)
- MapperNNS->print(OS, Policy);
+ MapperNNS.print(OS, Policy);
OS << Node->getMapperIdInfo() << ')';
}
diff --git a/clang/lib/AST/ParentMapContext.cpp b/clang/lib/AST/ParentMapContext.cpp
index 68dfe4d..acc011c 100644
--- a/clang/lib/AST/ParentMapContext.cpp
+++ b/clang/lib/AST/ParentMapContext.cpp
@@ -438,10 +438,12 @@ private:
DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
&Map.PointerParents);
}
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ bool TraverseTypeLoc(TypeLoc TypeLocNode, bool TraverseQualifier = true) {
return TraverseNode(
TypeLocNode, DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ [&] {
+ return VisitorBase::TraverseTypeLoc(TypeLocNode, TraverseQualifier);
+ },
&Map.OtherParents);
}
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
diff --git a/clang/lib/AST/PrintfFormatString.cpp b/clang/lib/AST/PrintfFormatString.cpp
index bcd44f0..687160c 100644
--- a/clang/lib/AST/PrintfFormatString.cpp
+++ b/clang/lib/AST/PrintfFormatString.cpp
@@ -794,7 +794,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
// If it's an enum, get its underlying type.
if (const EnumType *ETy = QT->getAs<EnumType>())
- QT = ETy->getDecl()->getIntegerType();
+ QT = ETy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
const BuiltinType *BT = QT->getAs<BuiltinType>();
if (!BT) {
diff --git a/clang/lib/AST/QualTypeNames.cpp b/clang/lib/AST/QualTypeNames.cpp
index 9731b3a..ee7fec3 100644
--- a/clang/lib/AST/QualTypeNames.cpp
+++ b/clang/lib/AST/QualTypeNames.cpp
@@ -24,10 +24,9 @@ namespace TypeName {
/// is requested.
/// \param[in] WithGlobalNsPrefix - Indicate whether the global namespace
/// specifier "::" should be prepended or not.
-static NestedNameSpecifier *createNestedNameSpecifier(
- const ASTContext &Ctx,
- const NamespaceDecl *Namesp,
- bool WithGlobalNsPrefix);
+static NestedNameSpecifier
+createNestedNameSpecifier(const ASTContext &Ctx, const NamespaceDecl *Namesp,
+ bool WithGlobalNsPrefix);
/// Create a NestedNameSpecifier for TagDecl and its enclosing
/// scopes.
@@ -39,22 +38,24 @@ static NestedNameSpecifier *createNestedNameSpecifier(
/// qualified names.
/// \param[in] WithGlobalNsPrefix - Indicate whether the global namespace
/// specifier "::" should be prepended or not.
-static NestedNameSpecifier *createNestedNameSpecifier(
- const ASTContext &Ctx, const TypeDecl *TD,
- bool FullyQualify, bool WithGlobalNsPrefix);
+static NestedNameSpecifier createNestedNameSpecifier(const ASTContext &Ctx,
+ const TypeDecl *TD,
+ bool FullyQualify,
+ bool WithGlobalNsPrefix);
-static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
- const ASTContext &Ctx, const Decl *decl,
- bool FullyQualified, bool WithGlobalNsPrefix);
+static NestedNameSpecifier
+createNestedNameSpecifierForScopeOf(const ASTContext &Ctx, const Decl *decl,
+ bool FullyQualified,
+ bool WithGlobalNsPrefix);
-static NestedNameSpecifier *getFullyQualifiedNestedNameSpecifier(
- const ASTContext &Ctx, NestedNameSpecifier *scope, bool WithGlobalNsPrefix);
+static NestedNameSpecifier getFullyQualifiedNestedNameSpecifier(
+ const ASTContext &Ctx, NestedNameSpecifier NNS, bool WithGlobalNsPrefix);
static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
TemplateName &TName,
bool WithGlobalNsPrefix) {
bool Changed = false;
- NestedNameSpecifier *NNS = nullptr;
+ NestedNameSpecifier NNS = std::nullopt;
TemplateDecl *ArgTDecl = TName.getAsTemplateDecl();
// ArgTDecl won't be NULL because we asserted that this isn't a
@@ -65,13 +66,13 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
if (QTName &&
!QTName->hasTemplateKeyword() &&
(NNS = QTName->getQualifier())) {
- NestedNameSpecifier *QNNS = getFullyQualifiedNestedNameSpecifier(
- Ctx, NNS, WithGlobalNsPrefix);
+ NestedNameSpecifier QNNS =
+ getFullyQualifiedNestedNameSpecifier(Ctx, NNS, WithGlobalNsPrefix);
if (QNNS != NNS) {
Changed = true;
NNS = QNNS;
} else {
- NNS = nullptr;
+ NNS = std::nullopt;
}
} else {
NNS = createNestedNameSpecifierForScopeOf(
@@ -116,76 +117,81 @@ static bool getFullyQualifiedTemplateArgument(const ASTContext &Ctx,
}
static const Type *getFullyQualifiedTemplateType(const ASTContext &Ctx,
- const Type *TypePtr,
+ const TagType *TSTRecord,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
bool WithGlobalNsPrefix) {
- // DependentTemplateTypes exist within template declarations and
- // definitions. Therefore we shouldn't encounter them at the end of
- // a translation unit. If we do, the caller has made an error.
- assert(!isa<DependentTemplateSpecializationType>(TypePtr));
- // In case of template specializations, iterate over the arguments
- // and fully qualify them as well.
- if (const auto *TST = dyn_cast<const TemplateSpecializationType>(TypePtr)) {
- bool MightHaveChanged = false;
- SmallVector<TemplateArgument, 4> FQArgs;
- // Cheap to copy and potentially modified by
- // getFullyQualifedTemplateArgument.
- for (TemplateArgument Arg : TST->template_arguments()) {
- MightHaveChanged |= getFullyQualifiedTemplateArgument(
- Ctx, Arg, WithGlobalNsPrefix);
- FQArgs.push_back(Arg);
- }
+ // We are asked to fully qualify and we have a Record Type,
+ // which can point to a template instantiation with no sugar in any of
+ // its template argument, however we still need to fully qualify them.
+
+ const auto *TD = TSTRecord->getOriginalDecl();
+ const auto *TSTDecl = dyn_cast<ClassTemplateSpecializationDecl>(TD);
+ if (!TSTDecl)
+ return Ctx.getTagType(Keyword, Qualifier, TD, /*OwnsTag=*/false)
+ .getTypePtr();
+
+ const TemplateArgumentList &TemplateArgs = TSTDecl->getTemplateArgs();
+
+ bool MightHaveChanged = false;
+ SmallVector<TemplateArgument, 4> FQArgs;
+ for (unsigned int I = 0, E = TemplateArgs.size(); I != E; ++I) {
+ // cheap to copy and potentially modified by
+ // getFullyQualifedTemplateArgument
+ TemplateArgument Arg(TemplateArgs[I]);
+ MightHaveChanged |=
+ getFullyQualifiedTemplateArgument(Ctx, Arg, WithGlobalNsPrefix);
+ FQArgs.push_back(Arg);
+ }
- // If a fully qualified arg is different from the unqualified arg,
- // allocate new type in the AST.
- if (MightHaveChanged) {
- QualType QT = Ctx.getTemplateSpecializationType(
- TST->getTemplateName(), FQArgs,
- /*CanonicalArgs=*/{}, TST->desugar());
- // getTemplateSpecializationType returns a fully qualified
- // version of the specialization itself, so no need to qualify
- // it.
- return QT.getTypePtr();
- }
- } else if (const auto *TSTRecord = dyn_cast<const RecordType>(TypePtr)) {
- // We are asked to fully qualify and we have a Record Type,
- // which can point to a template instantiation with no sugar in any of
- // its template argument, however we still need to fully qualify them.
-
- if (const auto *TSTDecl =
- dyn_cast<ClassTemplateSpecializationDecl>(TSTRecord->getDecl())) {
- const TemplateArgumentList &TemplateArgs = TSTDecl->getTemplateArgs();
-
- bool MightHaveChanged = false;
- SmallVector<TemplateArgument, 4> FQArgs;
- for (unsigned int I = 0, E = TemplateArgs.size(); I != E; ++I) {
- // cheap to copy and potentially modified by
- // getFullyQualifedTemplateArgument
- TemplateArgument Arg(TemplateArgs[I]);
- MightHaveChanged |= getFullyQualifiedTemplateArgument(
- Ctx, Arg, WithGlobalNsPrefix);
- FQArgs.push_back(Arg);
- }
+ if (!MightHaveChanged)
+ return Ctx.getTagType(Keyword, Qualifier, TD, /*OwnsTag=*/false)
+ .getTypePtr();
+ // If a fully qualified arg is different from the unqualified arg,
+ // allocate new type in the AST.
+ TemplateName TN = Ctx.getQualifiedTemplateName(
+ Qualifier, /*TemplateKeyword=*/false,
+ TemplateName(TSTDecl->getSpecializedTemplate()));
+ QualType QT = Ctx.getTemplateSpecializationType(
+ Keyword, TN, FQArgs,
+ /*CanonicalArgs=*/{}, TSTRecord->getCanonicalTypeInternal());
+ // getTemplateSpecializationType returns a fully qualified
+ // version of the specialization itself, so no need to qualify
+ // it.
+ return QT.getTypePtr();
+}
- // If a fully qualified arg is different from the unqualified arg,
- // allocate new type in the AST.
- if (MightHaveChanged) {
- TemplateName TN(TSTDecl->getSpecializedTemplate());
- QualType QT = Ctx.getTemplateSpecializationType(
- TN, FQArgs,
- /*CanonicalArgs=*/{}, TSTRecord->getCanonicalTypeInternal());
- // getTemplateSpecializationType returns a fully qualified
- // version of the specialization itself, so no need to qualify
- // it.
- return QT.getTypePtr();
- }
- }
+static const Type *
+getFullyQualifiedTemplateType(const ASTContext &Ctx,
+ const TemplateSpecializationType *TST,
+ bool WithGlobalNsPrefix) {
+ TemplateName TName = TST->getTemplateName();
+ bool MightHaveChanged =
+ getFullyQualifiedTemplateName(Ctx, TName, WithGlobalNsPrefix);
+ SmallVector<TemplateArgument, 4> FQArgs;
+ // Cheap to copy and potentially modified by
+ // getFullyQualifedTemplateArgument.
+ for (TemplateArgument Arg : TST->template_arguments()) {
+ MightHaveChanged |=
+ getFullyQualifiedTemplateArgument(Ctx, Arg, WithGlobalNsPrefix);
+ FQArgs.push_back(Arg);
}
- return TypePtr;
+
+ if (!MightHaveChanged)
+ return TST;
+
+ QualType NewQT =
+ Ctx.getTemplateSpecializationType(TST->getKeyword(), TName, FQArgs,
+ /*CanonicalArgs=*/{}, TST->desugar());
+ // getTemplateSpecializationType returns a fully qualified
+ // version of the specialization itself, so no need to qualify
+ // it.
+ return NewQT.getTypePtr();
}
-static NestedNameSpecifier *createOuterNNS(const ASTContext &Ctx, const Decl *D,
- bool FullyQualify,
- bool WithGlobalNsPrefix) {
+static NestedNameSpecifier createOuterNNS(const ASTContext &Ctx, const Decl *D,
+ bool FullyQualify,
+ bool WithGlobalNsPrefix) {
const DeclContext *DC = D->getDeclContext();
if (const auto *NS = dyn_cast<NamespaceDecl>(DC)) {
while (NS && NS->isInline()) {
@@ -195,71 +201,63 @@ static NestedNameSpecifier *createOuterNNS(const ASTContext &Ctx, const Decl *D,
if (NS && NS->getDeclName()) {
return createNestedNameSpecifier(Ctx, NS, WithGlobalNsPrefix);
}
- return nullptr; // no starting '::', no anonymous
- } else if (const auto *TD = dyn_cast<TagDecl>(DC)) {
- return createNestedNameSpecifier(Ctx, TD, FullyQualify, WithGlobalNsPrefix);
- } else if (const auto *TDD = dyn_cast<TypedefNameDecl>(DC)) {
- return createNestedNameSpecifier(
- Ctx, TDD, FullyQualify, WithGlobalNsPrefix);
- } else if (WithGlobalNsPrefix && DC->isTranslationUnit()) {
- return NestedNameSpecifier::GlobalSpecifier(Ctx);
+ return std::nullopt; // no starting '::', no anonymous
}
- return nullptr; // no starting '::' if |WithGlobalNsPrefix| is false
+ if (const auto *TD = dyn_cast<TagDecl>(DC))
+ return createNestedNameSpecifier(Ctx, TD, FullyQualify, WithGlobalNsPrefix);
+ if (const auto *TDD = dyn_cast<TypedefNameDecl>(DC))
+ return createNestedNameSpecifier(Ctx, TDD, FullyQualify,
+ WithGlobalNsPrefix);
+ if (WithGlobalNsPrefix && DC->isTranslationUnit())
+ return NestedNameSpecifier::getGlobal();
+ return std::nullopt; // no starting '::' if |WithGlobalNsPrefix| is false
}
/// Return a fully qualified version of this name specifier.
-static NestedNameSpecifier *getFullyQualifiedNestedNameSpecifier(
- const ASTContext &Ctx, NestedNameSpecifier *Scope,
- bool WithGlobalNsPrefix) {
- switch (Scope->getKind()) {
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- // Already fully qualified
- return Scope;
- case NestedNameSpecifier::Namespace:
- return TypeName::createNestedNameSpecifier(
- Ctx, Scope->getAsNamespace()->getNamespace(), WithGlobalNsPrefix);
- case NestedNameSpecifier::Identifier:
- // A function or some other construct that makes it un-namable
- // at the end of the TU. Skip the current component of the name,
- // but use the name of it's prefix.
- return getFullyQualifiedNestedNameSpecifier(
- Ctx, Scope->getPrefix(), WithGlobalNsPrefix);
- case NestedNameSpecifier::TypeSpec: {
- const Type *Type = Scope->getAsType();
- // Find decl context.
- const TagDecl *TD = nullptr;
- if (const TagType *TagDeclType = Type->getAs<TagType>()) {
- TD = TagDeclType->getDecl();
- } else {
- TD = Type->getAsCXXRecordDecl();
- }
- if (TD) {
- return TypeName::createNestedNameSpecifier(Ctx, TD,
- true /*FullyQualified*/,
- WithGlobalNsPrefix);
- } else if (const auto *TDD = dyn_cast<TypedefType>(Type)) {
- return TypeName::createNestedNameSpecifier(Ctx, TDD->getDecl(),
- true /*FullyQualified*/,
- WithGlobalNsPrefix);
- }
+static NestedNameSpecifier getFullyQualifiedNestedNameSpecifier(
+ const ASTContext &Ctx, NestedNameSpecifier Scope, bool WithGlobalNsPrefix) {
+ switch (Scope.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("can't fully qualify the empty nested name specifier");
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ // Already fully qualified
+ return Scope;
+ case NestedNameSpecifier::Kind::Namespace:
+ return TypeName::createNestedNameSpecifier(
+ Ctx, Scope.getAsNamespaceAndPrefix().Namespace->getNamespace(),
+ WithGlobalNsPrefix);
+ case NestedNameSpecifier::Kind::Type: {
+ const Type *Type = Scope.getAsType();
+ // Find decl context.
+ const TypeDecl *TD;
+ if (const TagType *TagDeclType = Type->getAs<TagType>())
+ TD = TagDeclType->getOriginalDecl();
+ else if (const auto *D = dyn_cast<TypedefType>(Type))
+ TD = D->getDecl();
+ else
return Scope;
- }
+ return TypeName::createNestedNameSpecifier(Ctx, TD, /*FullyQualify=*/true,
+ WithGlobalNsPrefix);
+ }
}
llvm_unreachable("bad NNS kind");
}
/// Create a nested name specifier for the declaring context of
/// the type.
-static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
- const ASTContext &Ctx, const Decl *Decl,
- bool FullyQualified, bool WithGlobalNsPrefix) {
+static NestedNameSpecifier
+createNestedNameSpecifierForScopeOf(const ASTContext &Ctx, const Decl *Decl,
+ bool FullyQualified,
+ bool WithGlobalNsPrefix) {
assert(Decl);
const DeclContext *DC = Decl->getDeclContext()->getRedeclContext();
const auto *Outer = dyn_cast<NamedDecl>(DC);
const auto *OuterNS = dyn_cast<NamespaceDecl>(DC);
- if (Outer && !(OuterNS && OuterNS->isAnonymousNamespace())) {
+ if (OuterNS && OuterNS->isAnonymousNamespace())
+ OuterNS = dyn_cast<NamespaceDecl>(OuterNS->getParent());
+ if (Outer) {
if (const auto *CxxDecl = dyn_cast<CXXRecordDecl>(DC)) {
if (ClassTemplateDecl *ClassTempl =
CxxDecl->getDescribedClassTemplate()) {
@@ -288,76 +286,80 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
Ctx, TD, FullyQualified, WithGlobalNsPrefix);
} else if (isa<TranslationUnitDecl>(Outer)) {
// Context is the TU. Nothing needs to be done.
- return nullptr;
+ return std::nullopt;
} else {
// Decl's context was neither the TU, a namespace, nor a
// TagDecl, which means it is a type local to a scope, and not
// accessible at the end of the TU.
- return nullptr;
+ return std::nullopt;
}
} else if (WithGlobalNsPrefix && DC->isTranslationUnit()) {
- return NestedNameSpecifier::GlobalSpecifier(Ctx);
+ return NestedNameSpecifier::getGlobal();
}
- return nullptr;
+ return std::nullopt;
}
/// Create a nested name specifier for the declaring context of
/// the type.
-static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
- const ASTContext &Ctx, const Type *TypePtr,
- bool FullyQualified, bool WithGlobalNsPrefix) {
- if (!TypePtr) return nullptr;
+static NestedNameSpecifier
+createNestedNameSpecifierForScopeOf(const ASTContext &Ctx, const Type *TypePtr,
+ bool FullyQualified,
+ bool WithGlobalNsPrefix) {
+ if (!TypePtr)
+ return std::nullopt;
Decl *Decl = nullptr;
// There are probably other cases ...
if (const auto *TDT = dyn_cast<TypedefType>(TypePtr)) {
Decl = TDT->getDecl();
} else if (const auto *TagDeclType = dyn_cast<TagType>(TypePtr)) {
- Decl = TagDeclType->getDecl();
+ Decl = TagDeclType->getOriginalDecl();
} else if (const auto *TST = dyn_cast<TemplateSpecializationType>(TypePtr)) {
Decl = TST->getTemplateName().getAsTemplateDecl();
} else {
Decl = TypePtr->getAsCXXRecordDecl();
}
- if (!Decl) return nullptr;
+ if (!Decl)
+ return std::nullopt;
return createNestedNameSpecifierForScopeOf(
Ctx, Decl, FullyQualified, WithGlobalNsPrefix);
}
-NestedNameSpecifier *createNestedNameSpecifier(const ASTContext &Ctx,
- const NamespaceDecl *Namespace,
- bool WithGlobalNsPrefix) {
+static NestedNameSpecifier
+createNestedNameSpecifier(const ASTContext &Ctx, const NamespaceDecl *Namespace,
+ bool WithGlobalNsPrefix) {
while (Namespace && Namespace->isInline()) {
// Ignore inline namespace;
Namespace = dyn_cast<NamespaceDecl>(Namespace->getDeclContext());
}
- if (!Namespace) return nullptr;
+ if (!Namespace)
+ return std::nullopt;
- bool FullyQualified = true; // doesn't matter, DeclContexts are namespaces
- return NestedNameSpecifier::Create(
- Ctx,
- createOuterNNS(Ctx, Namespace, FullyQualified, WithGlobalNsPrefix),
- Namespace);
+ bool FullyQualify = true; // doesn't matter, DeclContexts are namespaces
+ return NestedNameSpecifier(
+ Ctx, Namespace,
+ createOuterNNS(Ctx, Namespace, FullyQualify, WithGlobalNsPrefix));
}
-NestedNameSpecifier *createNestedNameSpecifier(const ASTContext &Ctx,
- const TypeDecl *TD,
- bool FullyQualify,
- bool WithGlobalNsPrefix) {
- const Type *TypePtr = TD->getTypeForDecl();
- if (isa<const TemplateSpecializationType>(TypePtr) ||
- isa<const RecordType>(TypePtr)) {
+NestedNameSpecifier createNestedNameSpecifier(const ASTContext &Ctx,
+ const TypeDecl *TD,
+ bool FullyQualify,
+ bool WithGlobalNsPrefix) {
+ const Type *TypePtr = Ctx.getTypeDeclType(TD).getTypePtr();
+ if (auto *RD = dyn_cast<TagType>(TypePtr)) {
// We are asked to fully qualify and we have a Record Type (which
// may point to a template specialization) or Template
// Specialization Type. We need to fully qualify their arguments.
-
- TypePtr = getFullyQualifiedTemplateType(Ctx, TypePtr, WithGlobalNsPrefix);
+ TypePtr = getFullyQualifiedTemplateType(
+ Ctx, RD, ElaboratedTypeKeyword::None,
+ createOuterNNS(Ctx, TD, FullyQualify, WithGlobalNsPrefix),
+ WithGlobalNsPrefix);
+ } else if (auto *TST = dyn_cast<TemplateSpecializationType>(TypePtr)) {
+ TypePtr = getFullyQualifiedTemplateType(Ctx, TST, WithGlobalNsPrefix);
}
-
- return NestedNameSpecifier::Create(
- Ctx, createOuterNNS(Ctx, TD, FullyQualify, WithGlobalNsPrefix), TypePtr);
+ return NestedNameSpecifier(TypePtr);
}
/// Return the fully qualified type, including fully-qualified
@@ -381,7 +383,7 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
Qualifiers Quals = QT.getQualifiers();
// Fully qualify the pointee and class types.
QT = getFullyQualifiedType(QT->getPointeeType(), Ctx, WithGlobalNsPrefix);
- NestedNameSpecifier *Qualifier = getFullyQualifiedNestedNameSpecifier(
+ NestedNameSpecifier Qualifier = getFullyQualifiedNestedNameSpecifier(
Ctx, MPT->getQualifier(), WithGlobalNsPrefix);
QT = Ctx.getMemberPointerType(QT, Qualifier,
MPT->getMostRecentCXXRecordDecl());
@@ -434,45 +436,48 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
QT = Ctx.getQualifiedType(QT, Quals);
}
- NestedNameSpecifier *Prefix = nullptr;
+ if (const auto *TST =
+ dyn_cast<const TemplateSpecializationType>(QT.getTypePtr())) {
+
+ const Type *T = getFullyQualifiedTemplateType(Ctx, TST, WithGlobalNsPrefix);
+ if (T == TST)
+ return QT;
+ return Ctx.getQualifiedType(T, QT.getQualifiers());
+ }
+
// Local qualifiers are attached to the QualType outside of the
// elaborated type. Retrieve them before descending into the
// elaborated type.
Qualifiers PrefixQualifiers = QT.getLocalQualifiers();
QT = QualType(QT.getTypePtr(), 0);
- ElaboratedTypeKeyword Keyword = ElaboratedTypeKeyword::None;
- if (const auto *ETypeInput = dyn_cast<ElaboratedType>(QT.getTypePtr())) {
- QT = ETypeInput->getNamedType();
- assert(!QT.hasLocalQualifiers());
- Keyword = ETypeInput->getKeyword();
- }
// We don't consider the alias introduced by `using a::X` as a new type.
// The qualified name is still a::X.
if (const auto *UT = QT->getAs<UsingType>()) {
- QT = Ctx.getQualifiedType(UT->getUnderlyingType(), PrefixQualifiers);
+ QT = Ctx.getQualifiedType(UT->desugar(), PrefixQualifiers);
return getFullyQualifiedType(QT, Ctx, WithGlobalNsPrefix);
}
// Create a nested name specifier if needed.
- Prefix = createNestedNameSpecifierForScopeOf(Ctx, QT.getTypePtr(),
- true /*FullyQualified*/,
- WithGlobalNsPrefix);
+ NestedNameSpecifier Prefix = createNestedNameSpecifierForScopeOf(
+ Ctx, QT.getTypePtr(), true /*FullyQualified*/, WithGlobalNsPrefix);
// In case of template specializations iterate over the arguments and
// fully qualify them as well.
- if (isa<const TemplateSpecializationType>(QT.getTypePtr()) ||
- isa<const RecordType>(QT.getTypePtr())) {
+ if (const auto *TT = dyn_cast<TagType>(QT.getTypePtr())) {
// We are asked to fully qualify and we have a Record Type (which
// may point to a template specialization) or Template
// Specialization Type. We need to fully qualify their arguments.
const Type *TypePtr = getFullyQualifiedTemplateType(
- Ctx, QT.getTypePtr(), WithGlobalNsPrefix);
+ Ctx, TT, TT->getKeyword(), Prefix, WithGlobalNsPrefix);
QT = QualType(TypePtr, 0);
- }
- if (Prefix || Keyword != ElaboratedTypeKeyword::None) {
- QT = Ctx.getElaboratedType(Keyword, Prefix, QT);
+ } else if (const auto *TT = dyn_cast<TypedefType>(QT.getTypePtr())) {
+ QT = Ctx.getTypedefType(
+ TT->getKeyword(), Prefix, TT->getDecl(),
+ getFullyQualifiedType(TT->desugar(), Ctx, WithGlobalNsPrefix));
+ } else {
+ assert(!Prefix && "Unhandled type node");
}
QT = Ctx.getQualifiedType(QT, PrefixQualifiers);
return QT;
@@ -486,5 +491,12 @@ std::string getFullyQualifiedName(QualType QT,
return FQQT.getAsString(Policy);
}
+NestedNameSpecifier getFullyQualifiedDeclaredContext(const ASTContext &Ctx,
+ const Decl *Decl,
+ bool WithGlobalNsPrefix) {
+ return createNestedNameSpecifierForScopeOf(Ctx, Decl, /*FullyQualified=*/true,
+ WithGlobalNsPrefix);
+}
+
} // end namespace TypeName
} // end namespace clang
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index 760b2fc..f1f21f4 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -2012,8 +2012,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
} else if (const BuiltinType *BTy = BaseTy->getAs<BuiltinType>()) {
performBuiltinTypeAlignmentUpgrade(BTy);
} else if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- assert(RD && "Expected non-null RecordDecl.");
+ const RecordDecl *RD = RT->getOriginalDecl();
const ASTRecordLayout &FieldRecord = Context.getASTRecordLayout(RD);
PreferredAlign = FieldRecord.getPreferredAlignment();
}
@@ -2128,7 +2127,8 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// TODO: Takes no account the alignment of the outer struct
if (FieldOffset % OriginalFieldAlign != 0)
Diag(D->getLocation(), diag::warn_unaligned_access)
- << Context.getTypeDeclType(RD) << D->getName() << D->getType();
+ << Context.getCanonicalTagType(RD) << D->getName()
+ << D->getType();
}
}
@@ -2193,8 +2193,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
InBits = false;
}
Diag(RD->getLocation(), diag::warn_padded_struct_size)
- << Context.getTypeDeclType(RD)
- << PadSize
+ << Context.getCanonicalTagType(RD) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
}
@@ -2212,7 +2211,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
Context.getLangOpts().getClangABICompat() <=
LangOptions::ClangABI::Ver15))
Diag(D->getLocation(), diag::warn_unnecessary_packed)
- << Context.getTypeDeclType(RD);
+ << Context.getCanonicalTagType(RD);
}
}
@@ -2306,7 +2305,7 @@ static void CheckFieldPadding(const ASTContext &Context, bool IsUnion,
Context.getDiagnostics().Report(D->getLocation(),
Diagnostic)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
- << Context.getTypeDeclType(D->getParent()) << PadSize
+ << Context.getCanonicalTagType(D->getParent()) << PadSize
<< (InBits ? 1 : 0) // (byte|bit)
<< D->getIdentifier();
} else {
@@ -2315,7 +2314,7 @@ static void CheckFieldPadding(const ASTContext &Context, bool IsUnion,
Context.getDiagnostics().Report(D->getLocation(),
Diagnostic)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
- << Context.getTypeDeclType(D->getParent()) << PadSize
+ << Context.getCanonicalTagType(D->getParent()) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
}
}
@@ -2714,7 +2713,7 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
else {
if (auto RT =
FD->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
- auto const &Layout = Context.getASTRecordLayout(RT->getDecl());
+ auto const &Layout = Context.getASTRecordLayout(RT->getOriginalDecl());
EndsWithZeroSizedObject = Layout.endsWithZeroSizedObject();
FieldRequiredAlignment = std::max(FieldRequiredAlignment,
Layout.getRequiredAlignment());
@@ -3273,7 +3272,7 @@ void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
Context.getDiagnostics().Report(RD->getLocation(),
diag::warn_padded_struct_size)
- << Context.getTypeDeclType(RD) << PadSize
+ << Context.getCanonicalTagType(RD) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
}
}
@@ -3631,7 +3630,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
auto CXXRD = dyn_cast<CXXRecordDecl>(RD);
PrintOffset(OS, Offset, IndentLevel);
- OS << C.getTypeDeclType(const_cast<RecordDecl *>(RD));
+ OS << C.getCanonicalTagType(const_cast<RecordDecl *>(RD));
if (Description)
OS << ' ' << Description;
if (CXXRD && CXXRD->isEmpty())
@@ -3697,8 +3696,8 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
// Recursively dump fields of record type.
if (auto RT = Field->getType()->getAs<RecordType>()) {
- DumpRecordLayout(OS, RT->getDecl(), C, FieldOffset, IndentLevel,
- Field->getName().data(),
+ DumpRecordLayout(OS, RT->getOriginalDecl()->getDefinitionOrSelf(), C,
+ FieldOffset, IndentLevel, Field->getName().data(),
/*PrintSizeInfo=*/false,
/*IncludeVirtualBases=*/true);
continue;
@@ -3781,7 +3780,7 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
// in libFrontend.
const ASTRecordLayout &Info = getASTRecordLayout(RD);
- OS << "Type: " << getTypeDeclType(RD) << "\n";
+ OS << "Type: " << getCanonicalTagType(RD) << "\n";
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
diff --git a/clang/lib/AST/ScanfFormatString.cpp b/clang/lib/AST/ScanfFormatString.cpp
index 1227edd..31c001d 100644
--- a/clang/lib/AST/ScanfFormatString.cpp
+++ b/clang/lib/AST/ScanfFormatString.cpp
@@ -432,9 +432,10 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
// If it's an enum, get its underlying type.
if (const EnumType *ETy = PT->getAs<EnumType>()) {
// Don't try to fix incomplete enums.
- if (!ETy->getDecl()->isComplete())
+ const EnumDecl *ED = ETy->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete())
return false;
- PT = ETy->getDecl()->getIntegerType();
+ PT = ED->getIntegerType();
}
const BuiltinType *BT = PT->getAs<BuiltinType>();
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 6ba5ec8..afccba8 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -454,10 +454,7 @@ void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
else
OS << "__if_not_exists (";
- if (NestedNameSpecifier *Qualifier
- = Node->getQualifierLoc().getNestedNameSpecifier())
- Qualifier->print(OS, Policy);
-
+ Node->getQualifierLoc().getNestedNameSpecifier().print(OS, Policy);
OS << Node->getNameInfo() << ") ";
PrintRawCompoundStmt(Node->getSubStmt());
@@ -1309,8 +1306,7 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
TPOD->printAsExpr(OS, Policy);
return;
}
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
@@ -1359,8 +1355,7 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
void StmtPrinter::VisitDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *Node) {
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
@@ -1369,8 +1364,7 @@ void StmtPrinter::VisitDependentScopeDeclRefExpr(
}
void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
- if (Node->getQualifier())
- Node->getQualifier()->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
@@ -1778,8 +1772,7 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
if (FD->isAnonymousStructOrUnion())
return;
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
@@ -2177,9 +2170,7 @@ void StmtPrinter::VisitMSPropertyRefExpr(MSPropertyRefExpr *Node) {
OS << "->";
else
OS << ".";
- if (NestedNameSpecifier *Qualifier =
- Node->getQualifierLoc().getNestedNameSpecifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifierLoc().getNestedNameSpecifier().print(OS, Policy);
OS << Node->getPropertyDecl()->getDeclName();
}
@@ -2509,8 +2500,7 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
OS << "->";
else
OS << '.';
- if (E->getQualifier())
- E->getQualifier()->print(OS, Policy);
+ E->getQualifier().print(OS, Policy);
OS << "~";
if (const IdentifierInfo *II = E->getDestroyedTypeIdentifier())
@@ -2572,8 +2562,7 @@ void StmtPrinter::VisitCXXDependentScopeMemberExpr(
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
@@ -2586,8 +2575,7 @@ void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
- if (NestedNameSpecifier *Qualifier = Node->getQualifier())
- Qualifier->print(OS, Policy);
+ Node->getQualifier().print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
@@ -2678,8 +2666,7 @@ void StmtPrinter::VisitCXXParenListInitExpr(CXXParenListInitExpr *Node) {
void StmtPrinter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) {
NestedNameSpecifierLoc NNS = E->getNestedNameSpecifierLoc();
- if (NNS)
- NNS.getNestedNameSpecifier()->print(OS, Policy);
+ NNS.getNestedNameSpecifier().print(OS, Policy);
if (E->getTemplateKWLoc().isValid())
OS << "template ";
OS << E->getFoundDecl()->getName();
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 0297f9c..5fee884 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -65,7 +65,7 @@ namespace {
/// Visit a nested-name-specifier that occurs within an expression
/// or statement.
- virtual void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) = 0;
+ virtual void VisitNestedNameSpecifier(NestedNameSpecifier NNS) = 0;
/// Visit a template name that occurs within an expression or
/// statement.
@@ -167,10 +167,10 @@ namespace {
ID.AddPointer(II);
}
- void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) override {
+ void VisitNestedNameSpecifier(NestedNameSpecifier NNS) override {
if (Canonical)
- NNS = Context.getCanonicalNestedNameSpecifier(NNS);
- ID.AddPointer(NNS);
+ NNS = NNS.getCanonical();
+ NNS.Profile(ID);
}
void VisitTemplateName(TemplateName Name) override {
@@ -226,11 +226,10 @@ namespace {
void VisitTemplateName(TemplateName Name) override {
Hash.AddTemplateName(Name);
}
- void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) override {
- ID.AddBoolean(NNS);
- if (NNS) {
+ void VisitNestedNameSpecifier(NestedNameSpecifier NNS) override {
+ ID.AddBoolean(bool(NNS));
+ if (NNS)
Hash.AddNestedNameSpecifier(NNS);
- }
}
};
}
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index 7a0f740..76050ce 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -57,7 +57,7 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out,
if (Policy.UseEnumerators) {
if (const EnumType *ET = T->getAs<EnumType>()) {
- for (const EnumConstantDecl *ECD : ET->getDecl()->enumerators()) {
+ for (const EnumConstantDecl *ECD : ET->getOriginalDecl()->enumerators()) {
// In Sema::CheckTemplateArugment, enum template arguments value are
// extended to the size of the integer underlying the enum type. This
// may create a size difference between the enum value and template
@@ -596,6 +596,29 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
+TemplateArgumentLoc::TemplateArgumentLoc(ASTContext &Ctx,
+ const TemplateArgument &Argument,
+ SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc)
+ : Argument(Argument),
+ LocInfo(Ctx, TemplateKWLoc, QualifierLoc, TemplateNameLoc, EllipsisLoc) {
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ Argument.getAsTemplateOrTemplatePattern().getQualifier());
+}
+
+NestedNameSpecifierLoc TemplateArgumentLoc::getTemplateQualifierLoc() const {
+ if (Argument.getKind() != TemplateArgument::Template &&
+ Argument.getKind() != TemplateArgument::TemplateExpansion)
+ return NestedNameSpecifierLoc();
+ return NestedNameSpecifierLoc(
+ Argument.getAsTemplateOrTemplatePattern().getQualifier(),
+ LocInfo.getTemplate()->QualifierLocData);
+}
+
SourceRange TemplateArgumentLoc::getSourceRange() const {
switch (Argument.getKind()) {
case TemplateArgument::Expression:
@@ -702,10 +725,11 @@ const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
}
clang::TemplateArgumentLocInfo::TemplateArgumentLocInfo(
- ASTContext &Ctx, NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateNameLoc, SourceLocation EllipsisLoc) {
+ ASTContext &Ctx, SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc) {
TemplateTemplateArgLocInfo *Template = new (Ctx) TemplateTemplateArgLocInfo;
- Template->Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Template->TemplateKwLoc = TemplateKWLoc;
Template->QualifierLocData = QualifierLoc.getOpaqueData();
Template->TemplateNameLoc = TemplateNameLoc;
Template->EllipsisLoc = EllipsisLoc;
diff --git a/clang/lib/AST/TemplateName.cpp b/clang/lib/AST/TemplateName.cpp
index 5b7abc4..c171516 100644
--- a/clang/lib/AST/TemplateName.cpp
+++ b/clang/lib/AST/TemplateName.cpp
@@ -289,10 +289,30 @@ QualifiedTemplateName *TemplateName::getAsQualifiedTemplateName() const {
return dyn_cast_if_present<QualifiedTemplateName *>(Storage);
}
+QualifiedTemplateName *
+TemplateName::getAsAdjustedQualifiedTemplateName() const {
+ for (std::optional<TemplateName> Cur = *this; Cur;
+ Cur = Cur->desugar(/*IgnoreDeduced=*/true))
+ if (QualifiedTemplateName *N = Cur->getAsQualifiedTemplateName())
+ return N;
+ return nullptr;
+}
+
DependentTemplateName *TemplateName::getAsDependentTemplateName() const {
return Storage.dyn_cast<DependentTemplateName *>();
}
+NestedNameSpecifier TemplateName::getQualifier() const {
+ for (std::optional<TemplateName> Cur = *this; Cur;
+ Cur = Cur->desugar(/*IgnoreDeduced=*/true)) {
+ if (DependentTemplateName *N = Cur->getAsDependentTemplateName())
+ return N->getQualifier();
+ if (QualifiedTemplateName *N = Cur->getAsQualifiedTemplateName())
+ return N->getQualifier();
+ }
+ return std::nullopt;
+}
+
UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
if (Decl *D = Storage.dyn_cast<Decl *>())
if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
@@ -303,24 +323,21 @@ UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
}
DependentTemplateStorage::DependentTemplateStorage(
- NestedNameSpecifier *Qualifier, IdentifierOrOverloadedOperator Name,
+ NestedNameSpecifier Qualifier, IdentifierOrOverloadedOperator Name,
bool HasTemplateKeyword)
: Qualifier(Qualifier, HasTemplateKeyword), Name(Name) {
- assert((!Qualifier || Qualifier->isDependent()) &&
+ assert((!Qualifier || Qualifier.isDependent()) &&
"Qualifier must be dependent");
}
TemplateNameDependence DependentTemplateStorage::getDependence() const {
- auto D = TemplateNameDependence::DependentInstantiation;
- if (NestedNameSpecifier *Qualifier = getQualifier())
- D |= toTemplateNameDependence(Qualifier->getDependence());
- return D;
+ return toTemplateNameDependence(getQualifier().getDependence()) |
+ TemplateNameDependence::DependentInstantiation;
}
void DependentTemplateStorage::print(raw_ostream &OS,
const PrintingPolicy &Policy) const {
- if (NestedNameSpecifier *NNS = getQualifier())
- NNS->print(OS, Policy);
+ getQualifier().print(OS, Policy);
if (hasTemplateKeyword())
OS << "template ";
@@ -363,16 +380,13 @@ TemplateNameDependence TemplateName::getDependence() const {
case NameKind::QualifiedTemplate: {
QualifiedTemplateName *S = getAsQualifiedTemplateName();
TemplateNameDependence D = S->getUnderlyingTemplate().getDependence();
- if (NestedNameSpecifier *NNS = S->getQualifier())
- D |= toTemplateNameDependence(NNS->getDependence());
+ D |= toTemplateNameDependence(S->getQualifier().getDependence());
return D;
}
case NameKind::DependentTemplate: {
DependentTemplateName *S = getAsDependentTemplateName();
- auto D = TemplateNameDependence::DependentInstantiation;
- if (NestedNameSpecifier *Qualifier = S->getQualifier())
- D |= toTemplateNameDependence(Qualifier->getDependence());
- return D;
+ return toTemplateNameDependence(S->getQualifier().getDependence()) |
+ TemplateNameDependence::DependentInstantiation;
}
case NameKind::SubstTemplateTemplateParm: {
auto *S = getAsSubstTemplateTemplateParm();
@@ -434,18 +448,20 @@ void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
Template = cast<TemplateDecl>(Template->getCanonicalDecl());
if (handleAnonymousTTP(Template, OS))
return;
- if (Qual == Qualified::None)
+ if (Qual == Qualified::None || Policy.SuppressScope) {
OS << *Template;
- else
- Template->printQualifiedName(OS, Policy);
+ } else {
+ PrintingPolicy NestedNamePolicy = Policy;
+ NestedNamePolicy.SuppressUnwrittenScope = true;
+ Template->printQualifiedName(OS, NestedNamePolicy);
+ }
} else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
if (Policy.PrintAsCanonical) {
QTN->getUnderlyingTemplate().print(OS, Policy, Qual);
return;
}
- if (NestedNameSpecifier *NNS = QTN->getQualifier();
- Qual != Qualified::None && NNS)
- NNS->print(OS, Policy);
+ if (Qual != Qualified::None)
+ QTN->getQualifier().print(OS, Policy);
if (QTN->hasTemplateKeyword())
OS << "template ";
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 6b524cf..0856160 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -1037,35 +1037,34 @@ void clang::TextNodeDumper::dumpTemplateSpecializationKind(
}
}
-void clang::TextNodeDumper::dumpNestedNameSpecifier(const NestedNameSpecifier *NNS) {
+void clang::TextNodeDumper::dumpNestedNameSpecifier(NestedNameSpecifier NNS) {
if (!NNS)
return;
AddChild([=] {
OS << "NestedNameSpecifier";
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- OS << " Identifier";
- OS << " '" << NNS->getAsIdentifier()->getName() << "'";
- break;
- case NestedNameSpecifier::Namespace:
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
OS << " "; // "Namespace" is printed as the decl kind.
- dumpBareDeclRef(NNS->getAsNamespace());
+ dumpBareDeclRef(Namespace);
+ dumpNestedNameSpecifier(Prefix);
break;
- case NestedNameSpecifier::TypeSpec:
+ }
+ case NestedNameSpecifier::Kind::Type:
OS << " TypeSpec";
- dumpType(QualType(NNS->getAsType(), 0));
+ dumpType(QualType(NNS.getAsType(), 0));
break;
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
OS << " Global";
break;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
OS << " Super";
break;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
-
- dumpNestedNameSpecifier(NNS->getPrefix());
});
}
@@ -1401,8 +1400,8 @@ static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
if (!First)
OS << " -> ";
- const auto *RD =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ const auto *RD = cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl());
if (Base->isVirtual())
OS << "virtual ";
@@ -2112,19 +2111,32 @@ void TextNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) {
}
void TextNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
dumpDeclRef(T->getDecl());
}
void TextNodeDumper::VisitUsingType(const UsingType *T) {
- dumpDeclRef(T->getFoundDecl());
- if (!T->typeMatchesDecl())
- OS << " divergent";
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
+ dumpDeclRef(T->getDecl());
+ dumpType(T->desugar());
}
void TextNodeDumper::VisitTypedefType(const TypedefType *T) {
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
dumpDeclRef(T->getDecl());
- if (!T->typeMatchesDecl())
+ if (!T->typeMatchesDecl()) {
OS << " divergent";
+ dumpType(T->desugar());
+ }
}
void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) {
@@ -2138,7 +2150,17 @@ void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) {
}
void TextNodeDumper::VisitTagType(const TagType *T) {
- dumpDeclRef(T->getDecl());
+ if (T->isCanonicalUnqualified())
+ OS << " canonical";
+ if (T->isTagOwned())
+ OS << " owns_tag";
+ if (T->isInjected())
+ OS << " injected";
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
+ dumpNestedNameSpecifier(T->getQualifier());
+ dumpDeclRef(T->getOriginalDecl());
}
void TextNodeDumper::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
@@ -2182,12 +2204,15 @@ void TextNodeDumper::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
if (T->isTypeAlias())
OS << " alias";
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << ' ' << TypeWithKeyword::getKeywordName(K);
dumpTemplateName(T->getTemplateName(), "name");
}
void TextNodeDumper::VisitInjectedClassNameType(
const InjectedClassNameType *T) {
- dumpDeclRef(T->getDecl());
+ dumpDeclRef(T->getOriginalDecl());
}
void TextNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
@@ -2778,8 +2803,7 @@ void TextNodeDumper::VisitTemplateTemplateParmDecl(
void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) {
OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ D->getQualifier().print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
dumpNestedNameSpecifier(D->getQualifier());
}
@@ -2792,16 +2816,14 @@ void TextNodeDumper::VisitUsingEnumDecl(const UsingEnumDecl *D) {
void TextNodeDumper::VisitUnresolvedUsingTypenameDecl(
const UnresolvedUsingTypenameDecl *D) {
OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ D->getQualifier().print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
}
void TextNodeDumper::VisitUnresolvedUsingValueDecl(
const UnresolvedUsingValueDecl *D) {
OS << ' ';
- if (D->getQualifier())
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ D->getQualifier().print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
dumpType(D->getType());
}
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 141edc8..c382e58 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -109,12 +109,14 @@ bool Qualifiers::isTargetAddressSpaceSupersetOf(LangAS A, LangAS B,
const IdentifierInfo *QualType::getBaseTypeIdentifier() const {
const Type *ty = getTypePtr();
NamedDecl *ND = nullptr;
+ if (const auto *DNT = ty->getAs<DependentNameType>())
+ return DNT->getIdentifier();
if (ty->isPointerOrReferenceType())
return ty->getPointeeType().getBaseTypeIdentifier();
- else if (ty->isRecordType())
- ND = ty->castAs<RecordType>()->getDecl();
+ if (ty->isRecordType())
+ ND = ty->castAs<RecordType>()->getOriginalDecl();
else if (ty->isEnumeralType())
- ND = ty->castAs<EnumType>()->getDecl();
+ ND = ty->castAs<EnumType>()->getOriginalDecl();
else if (ty->getTypeClass() == Type::Typedef)
ND = ty->castAs<TypedefType>()->getDecl();
else if (ty->isArrayType())
@@ -671,13 +673,13 @@ const Type *Type::getUnqualifiedDesugaredType() const {
bool Type::isClassType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isClass();
+ return RT->getOriginalDecl()->isClass();
return false;
}
bool Type::isStructureType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isStruct();
+ return RT->getOriginalDecl()->isStruct();
return false;
}
@@ -685,7 +687,7 @@ bool Type::isStructureTypeWithFlexibleArrayMember() const {
const auto *RT = getAs<RecordType>();
if (!RT)
return false;
- const auto *Decl = RT->getDecl();
+ const auto *Decl = RT->getOriginalDecl()->getDefinitionOrSelf();
if (!Decl->isStruct())
return false;
return Decl->hasFlexibleArrayMember();
@@ -693,19 +695,21 @@ bool Type::isStructureTypeWithFlexibleArrayMember() const {
bool Type::isObjCBoxableRecordType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->hasAttr<ObjCBoxableAttr>();
+ return RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasAttr<ObjCBoxableAttr>();
return false;
}
bool Type::isInterfaceType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isInterface();
+ return RT->getOriginalDecl()->isInterface();
return false;
}
bool Type::isStructureOrClassType() const {
if (const auto *RT = getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
+ RecordDecl *RD = RT->getOriginalDecl();
return RD->isStruct() || RD->isClass() || RD->isInterface();
}
return false;
@@ -719,7 +723,7 @@ bool Type::isVoidPointerType() const {
bool Type::isUnionType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->isUnion();
+ return RT->getOriginalDecl()->isUnion();
return false;
}
@@ -736,7 +740,7 @@ bool Type::isComplexIntegerType() const {
bool Type::isScopedEnumeralType() const {
if (const auto *ET = getAs<EnumType>())
- return ET->getDecl()->isScoped();
+ return ET->getOriginalDecl()->isScoped();
return false;
}
@@ -770,13 +774,13 @@ QualType Type::getPointeeType() const {
const RecordType *Type::getAsStructureType() const {
// If this is directly a structure type, return it.
if (const auto *RT = dyn_cast<RecordType>(this)) {
- if (RT->getDecl()->isStruct())
+ if (RT->getOriginalDecl()->isStruct())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
if (const auto *RT = dyn_cast<RecordType>(CanonicalType)) {
- if (!RT->getDecl()->isStruct())
+ if (!RT->getOriginalDecl()->isStruct())
return nullptr;
// If this is a typedef for a structure type, strip the typedef off without
@@ -789,13 +793,13 @@ const RecordType *Type::getAsStructureType() const {
const RecordType *Type::getAsUnionType() const {
// If this is directly a union type, return it.
if (const auto *RT = dyn_cast<RecordType>(this)) {
- if (RT->getDecl()->isUnion())
+ if (RT->getOriginalDecl()->isUnion())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
if (const auto *RT = dyn_cast<RecordType>(CanonicalType)) {
- if (!RT->getDecl()->isUnion())
+ if (!RT->getOriginalDecl()->isUnion())
return nullptr;
// If this is a typedef for a union type, strip the typedef off without
@@ -1272,9 +1276,6 @@ public:
TRIVIAL_TYPE_CLASS(Record)
TRIVIAL_TYPE_CLASS(Enum)
- // FIXME: Non-trivial to implement, but important for C++
- SUGARED_TYPE_CLASS(Elaborated)
-
QualType VisitAttributedType(const AttributedType *T) {
QualType modifiedType = recurse(T->getModifiedType());
if (modifiedType.isNull())
@@ -1921,25 +1922,32 @@ const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
return nullptr;
if (const auto *RT = PointeeType->getAs<RecordType>())
- return dyn_cast<CXXRecordDecl>(RT->getDecl());
+ return dyn_cast<CXXRecordDecl>(
+ RT->getOriginalDecl()->getDefinitionOrSelf());
return nullptr;
}
CXXRecordDecl *Type::getAsCXXRecordDecl() const {
- return dyn_cast_or_null<CXXRecordDecl>(getAsTagDecl());
+ const auto *TT = dyn_cast<TagType>(CanonicalType);
+ if (!isa_and_present<RecordType, InjectedClassNameType>(TT))
+ return nullptr;
+ auto *TD = TT->getOriginalDecl();
+ if (!isa<InjectedClassNameType>(TT) && !isa<CXXRecordDecl>(TD))
+ return nullptr;
+ return cast<CXXRecordDecl>(TD)->getDefinitionOrSelf();
}
RecordDecl *Type::getAsRecordDecl() const {
- return dyn_cast_or_null<RecordDecl>(getAsTagDecl());
+ const auto *TT = dyn_cast<TagType>(CanonicalType);
+ if (!isa_and_present<RecordType, InjectedClassNameType>(TT))
+ return nullptr;
+ return cast<RecordDecl>(TT->getOriginalDecl())->getDefinitionOrSelf();
}
TagDecl *Type::getAsTagDecl() const {
- if (const auto *TT = getAs<TagType>())
- return TT->getDecl();
- if (const auto *Injected = getAs<InjectedClassNameType>())
- return Injected->getDecl();
-
+ if (const auto *TT = dyn_cast<TagType>(CanonicalType))
+ return TT->getOriginalDecl()->getDefinitionOrSelf();
return nullptr;
}
@@ -1951,6 +1959,35 @@ Type::getAsNonAliasTemplateSpecializationType() const {
return TST;
}
+NestedNameSpecifier Type::getPrefix() const {
+ switch (getTypeClass()) {
+ case Type::DependentName:
+ return cast<DependentNameType>(this)->getQualifier();
+ case Type::TemplateSpecialization: {
+ QualifiedTemplateName *S = cast<TemplateSpecializationType>(this)
+ ->getTemplateName()
+ .getAsAdjustedQualifiedTemplateName();
+ return S ? S->getQualifier() : std::nullopt;
+ }
+ case Type::DependentTemplateSpecialization:
+ return cast<DependentTemplateSpecializationType>(this)
+ ->getDependentTemplateName()
+ .getQualifier();
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName:
+ return cast<TagType>(this)->getQualifier();
+ case Type::Typedef:
+ return cast<TypedefType>(this)->getQualifier();
+ case Type::UnresolvedUsing:
+ return cast<UnresolvedUsingType>(this)->getQualifier();
+ case Type::Using:
+ return cast<UsingType>(this)->getQualifier();
+ default:
+ return std::nullopt;
+ }
+}
+
bool Type::hasAttr(attr::Kind AK) const {
const Type *Cur = this;
while (const auto *AT = Cur->getAs<AttributedType>()) {
@@ -1989,10 +2026,6 @@ public:
return Visit(T->getReplacementType());
}
- Type *VisitElaboratedType(const ElaboratedType *T) {
- return Visit(T->getNamedType());
- }
-
Type *VisitPointerType(const PointerType *T) {
return Visit(T->getPointeeType());
}
@@ -2114,7 +2147,7 @@ bool Type::isIntegralType(const ASTContext &Ctx) const {
// Complete enum types are integral in C.
if (!Ctx.getLangOpts().CPlusPlus)
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->isComplete();
+ return IsEnumDeclComplete(ET->getOriginalDecl());
return isBitIntType();
}
@@ -2131,7 +2164,7 @@ bool Type::isIntegralOrUnscopedEnumerationType() const {
bool Type::isUnscopedEnumerationType() const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return !ET->getDecl()->isScoped();
+ return !ET->getOriginalDecl()->isScoped();
return false;
}
@@ -2216,8 +2249,10 @@ bool Type::isSignedIntegerType() const {
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
- return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ const auto *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete() || ED->isScoped())
+ return false;
+ return ED->getIntegerType()->isSignedIntegerType();
}
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
@@ -2232,9 +2267,12 @@ bool Type::isSignedIntegerOrEnumerationType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isSignedInteger();
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType);
- ET && ET->getDecl()->isComplete())
- return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
+ const auto *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete())
+ return false;
+ return ED->getIntegerType()->isSignedIntegerType();
+ }
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
return IT->isSigned();
@@ -2261,8 +2299,10 @@ bool Type::isUnsignedIntegerType() const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
- return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ const auto *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete() || ED->isScoped())
+ return false;
+ return ED->getIntegerType()->isUnsignedIntegerType();
}
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
@@ -2277,9 +2317,12 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isUnsignedInteger();
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType);
- ET && ET->getDecl()->isComplete())
- return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
+ const auto *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete())
+ return false;
+ return ED->getIntegerType()->isUnsignedIntegerType();
+ }
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
return IT->isUnsigned();
@@ -2328,8 +2371,10 @@ bool Type::isRealType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Ibm128;
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
+ const auto *ED = ET->getOriginalDecl();
+ return !ED->isScoped() && ED->getDefinitionOrSelf()->isComplete();
+ }
return isBitIntType();
}
@@ -2337,14 +2382,16 @@ bool Type::isArithmeticType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Ibm128;
- if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
//
// C++0x: Enumerations are not arithmetic types. For now, just return
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
- return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
+ const auto *ED = ET->getOriginalDecl();
+ return !ED->isScoped() && ED->getDefinitionOrSelf()->isComplete();
+ }
return isa<ComplexType>(CanonicalType) || isBitIntType();
}
@@ -2352,8 +2399,8 @@ bool Type::hasBooleanRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isBooleanType();
if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
- return ET->getDecl()->isComplete() &&
- ET->getDecl()->getIntegerType()->isBooleanType();
+ const auto *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ return ED->isComplete() && ED->getIntegerType()->isBooleanType();
}
if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
return IT->getNumBits() == 1;
@@ -2385,7 +2432,10 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
} else if (isa<MemberPointerType>(T)) {
return STK_MemberPointer;
} else if (isa<EnumType>(T)) {
- assert(cast<EnumType>(T)->getDecl()->isComplete());
+ assert(cast<EnumType>(T)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isComplete());
return STK_Integral;
} else if (const auto *CT = dyn_cast<ComplexType>(T)) {
if (CT->getElementType()->isRealFloatingType())
@@ -2409,7 +2459,8 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
/// includes union types.
bool Type::isAggregateType() const {
if (const auto *Record = dyn_cast<RecordType>(CanonicalType)) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(
+ Record->getOriginalDecl()->getDefinitionOrSelf()))
return ClassDecl->isAggregate();
return true;
@@ -2443,7 +2494,8 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
// be completed.
return isVoidType();
case Enum: {
- EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
+ EnumDecl *EnumD =
+ cast<EnumType>(CanonicalType)->getOriginalDecl()->getDefinitionOrSelf();
if (Def)
*Def = EnumD;
return !EnumD->isComplete();
@@ -2451,13 +2503,17 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
case Record: {
// A tagged type (struct/union/enum/class) is incomplete if the decl is a
// forward declaration, but not a full definition (C99 6.2.5p22).
- RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
+ RecordDecl *Rec = cast<RecordType>(CanonicalType)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (Def)
*Def = Rec;
return !Rec->isCompleteDefinition();
}
case InjectedClassName: {
- CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)->getDecl();
+ CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (!Rec->isBeingDefined())
return false;
if (Def)
@@ -2739,9 +2795,9 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
return true;
case Type::Record:
- if (const auto *ClassDecl =
- dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
- return ClassDecl->isPOD();
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(
+ cast<RecordType>(CanonicalType)->getOriginalDecl()))
+ return ClassDecl->getDefinitionOrSelf()->isPOD();
// C struct/union is POD.
return true;
@@ -2782,7 +2838,8 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
return true;
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
// C++20 [class]p6:
// A trivial class is a class that is trivially copyable, and
// has one or more eligible default constructors such that each is
@@ -2841,14 +2898,17 @@ static bool isTriviallyCopyableTypeImpl(const QualType &type,
return true;
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
if (IsCopyConstructible) {
return ClassDecl->isTriviallyCopyConstructible();
} else {
return ClassDecl->isTriviallyCopyable();
}
}
- return !RT->getDecl()->isNonTrivialToPrimitiveCopy();
+ return !RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isNonTrivialToPrimitiveCopy();
}
// No other types can match.
return false;
@@ -2938,7 +2998,9 @@ QualType::PrimitiveDefaultInitializeKind
QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
if (const auto *RT =
getTypePtr()->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize())
+ if (RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isNonTrivialToPrimitiveDefaultInitialize())
return PDIK_Struct;
switch (getQualifiers().getObjCLifetime()) {
@@ -2954,7 +3016,9 @@ QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const {
if (const auto *RT =
getTypePtr()->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (RT->getDecl()->isNonTrivialToPrimitiveCopy())
+ if (RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isNonTrivialToPrimitiveCopy())
return PCK_Struct;
Qualifiers Qs = getQualifiers();
@@ -3022,8 +3086,8 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
// -- all non-static data members and base classes of literal types
//
// We resolve DR1361 by ignoring the second bullet.
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return ClassDecl->isLiteral();
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
+ return ClassDecl->getDefinitionOrSelf()->isLiteral();
return true;
}
@@ -3076,8 +3140,8 @@ bool Type::isStandardLayoutType() const {
if (BaseTy->isScalarType() || BaseTy->isVectorType())
return true;
if (const auto *RT = BaseTy->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (!ClassDecl->isStandardLayout())
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
+ if (!ClassDecl->getDefinitionOrSelf()->isStandardLayout())
return false;
// Default to 'true' for non-C++ class types.
@@ -3119,7 +3183,9 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
if (BaseTy->isScalarType() || BaseTy->isVectorType())
return true;
if (const auto *RT = BaseTy->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ ClassDecl = ClassDecl->getDefinitionOrSelf();
// C++11 [class]p10:
// A POD struct is a non-union class that is both a trivial class [...]
if (!ClassDecl->isTrivial())
@@ -3159,8 +3225,9 @@ bool Type::isNothrowT() const {
bool Type::isAlignValT() const {
if (const auto *ET = getAs<EnumType>()) {
- IdentifierInfo *II = ET->getDecl()->getIdentifier();
- if (II && II->isStr("align_val_t") && ET->getDecl()->isInStdNamespace())
+ const auto *ED = ET->getOriginalDecl();
+ IdentifierInfo *II = ED->getIdentifier();
+ if (II && II->isStr("align_val_t") && ED->isInStdNamespace())
return true;
}
return false;
@@ -3168,8 +3235,9 @@ bool Type::isAlignValT() const {
bool Type::isStdByteType() const {
if (const auto *ET = getAs<EnumType>()) {
- IdentifierInfo *II = ET->getDecl()->getIdentifier();
- if (II && II->isStr("byte") && ET->getDecl()->isInStdNamespace())
+ const auto *ED = ET->getOriginalDecl();
+ IdentifierInfo *II = ED->getIdentifier();
+ if (II && II->isStr("byte") && ED->isInStdNamespace())
return true;
}
return false;
@@ -3188,7 +3256,6 @@ bool Type::isSpecifierType() const {
case TemplateTypeParm:
case SubstTemplateTypeParm:
case TemplateSpecialization:
- case Elaborated:
case DependentName:
case DependentTemplateSpecialization:
case ObjCInterface:
@@ -3199,8 +3266,7 @@ bool Type::isSpecifierType() const {
}
}
-ElaboratedTypeKeyword
-TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
+ElaboratedTypeKeyword KeywordHelpers::getKeywordForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
default:
return ElaboratedTypeKeyword::None;
@@ -3219,7 +3285,7 @@ TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
}
}
-TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+TagTypeKind KeywordHelpers::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
case TST_class:
return TagTypeKind::Class;
@@ -3237,7 +3303,7 @@ TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
}
ElaboratedTypeKeyword
-TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
+KeywordHelpers::getKeywordForTagTypeKind(TagTypeKind Kind) {
switch (Kind) {
case TagTypeKind::Class:
return ElaboratedTypeKeyword::Class;
@@ -3254,7 +3320,7 @@ TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
}
TagTypeKind
-TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
+KeywordHelpers::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ElaboratedTypeKeyword::Class:
return TagTypeKind::Class;
@@ -3273,7 +3339,7 @@ TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
llvm_unreachable("Unknown elaborated type keyword.");
}
-bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+bool KeywordHelpers::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ElaboratedTypeKeyword::None:
case ElaboratedTypeKeyword::Typename:
@@ -3288,7 +3354,7 @@ bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
llvm_unreachable("Unknown elaborated type keyword.");
}
-StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
+StringRef KeywordHelpers::getKeywordName(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ElaboratedTypeKeyword::None:
return {};
@@ -3338,13 +3404,21 @@ void DependentTemplateSpecializationType::Profile(
bool Type::isElaboratedTypeSpecifier() const {
ElaboratedTypeKeyword Keyword;
- if (const auto *Elab = dyn_cast<ElaboratedType>(this))
- Keyword = Elab->getKeyword();
+ if (const auto *TST = dyn_cast<TemplateSpecializationType>(this))
+ Keyword = TST->getKeyword();
else if (const auto *DepName = dyn_cast<DependentNameType>(this))
Keyword = DepName->getKeyword();
else if (const auto *DepTST =
dyn_cast<DependentTemplateSpecializationType>(this))
Keyword = DepTST->getKeyword();
+ else if (const auto *T = dyn_cast<TagType>(this))
+ Keyword = T->getKeyword();
+ else if (const auto *T = dyn_cast<TypedefType>(this))
+ Keyword = T->getKeyword();
+ else if (const auto *T = dyn_cast<UnresolvedUsingType>(this))
+ Keyword = T->getKeyword();
+ else if (const auto *T = dyn_cast<UsingType>(this))
+ Keyword = T->getKeyword();
else
return false;
@@ -4011,34 +4085,53 @@ StringRef CountAttributedType::getAttributeName(bool WithMacroPrefix) const {
#undef ENUMERATE_ATTRS
}
-TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D,
- QualType UnderlyingType, bool HasTypeDifferentFromDecl)
- : Type(tc, UnderlyingType.getCanonicalType(),
- toSemanticDependence(UnderlyingType->getDependence())),
+TypedefType::TypedefType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TypedefNameDecl *D, QualType UnderlyingType,
+ bool HasTypeDifferentFromDecl)
+ : TypeWithKeyword(
+ Keyword, TC, UnderlyingType.getCanonicalType(),
+ toSemanticDependence(UnderlyingType->getDependence()) |
+ (Qualifier
+ ? toTypeDependence(Qualifier.getDependence() &
+ ~NestedNameSpecifierDependence::Dependent)
+ : TypeDependence{})),
Decl(const_cast<TypedefNameDecl *>(D)) {
- TypedefBits.hasTypeDifferentFromDecl = HasTypeDifferentFromDecl;
- if (!typeMatchesDecl())
- *getTrailingObjects() = UnderlyingType;
+ if ((TypedefBits.hasQualifier = !!Qualifier))
+ *getTrailingObjects<NestedNameSpecifier>() = Qualifier;
+ if ((TypedefBits.hasTypeDifferentFromDecl = HasTypeDifferentFromDecl))
+ *getTrailingObjects<QualType>() = UnderlyingType;
}
QualType TypedefType::desugar() const {
- return typeMatchesDecl() ? Decl->getUnderlyingType() : *getTrailingObjects();
-}
-
-UsingType::UsingType(const UsingShadowDecl *Found, QualType Underlying,
- QualType Canon)
- : Type(Using, Canon, toSemanticDependence(Canon->getDependence())),
- Found(const_cast<UsingShadowDecl *>(Found)) {
- UsingBits.hasTypeDifferentFromDecl = !Underlying.isNull();
- if (!typeMatchesDecl())
- *getTrailingObjects() = Underlying;
-}
-
-QualType UsingType::getUnderlyingType() const {
- return typeMatchesDecl()
- ? QualType(
- cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(), 0)
- : *getTrailingObjects();
+ return typeMatchesDecl() ? Decl->getUnderlyingType()
+ : *getTrailingObjects<QualType>();
+}
+
+UnresolvedUsingType::UnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const UnresolvedUsingTypenameDecl *D,
+ const Type *CanonicalType)
+ : TypeWithKeyword(
+ Keyword, UnresolvedUsing, QualType(CanonicalType, 0),
+ TypeDependence::DependentInstantiation |
+ (Qualifier
+ ? toTypeDependence(Qualifier.getDependence() &
+ ~NestedNameSpecifierDependence::Dependent)
+ : TypeDependence{})),
+ Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {
+ if ((UnresolvedUsingBits.hasQualifier = !!Qualifier))
+ *getTrailingObjects<NestedNameSpecifier>() = Qualifier;
+}
+
+UsingType::UsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const UsingShadowDecl *D,
+ QualType UnderlyingType)
+ : TypeWithKeyword(Keyword, Using, UnderlyingType.getCanonicalType(),
+ toSemanticDependence(UnderlyingType->getDependence())),
+ D(const_cast<UsingShadowDecl *>(D)), UnderlyingType(UnderlyingType) {
+ if ((UsingBits.hasQualifier = !!Qualifier))
+ *getTrailingObjects() = Qualifier;
}
QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); }
@@ -4212,24 +4305,79 @@ UnaryTransformType::UnaryTransformType(QualType BaseType,
: Type(UnaryTransform, CanonicalType, BaseType->getDependence()),
BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind) {}
-TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
- : Type(TC, can,
- D->isDependentType() ? TypeDependence::DependentInstantiation
- : TypeDependence::None),
- decl(const_cast<TagDecl *>(D)) {}
-
-static TagDecl *getInterestingTagDecl(TagDecl *decl) {
- for (auto *I : decl->redecls()) {
- if (I->isCompleteDefinition() || I->isBeingDefined())
- return I;
+TagType::TagType(TypeClass TC, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, const TagDecl *Tag,
+ bool OwnsTag, bool ISInjected, const Type *CanonicalType)
+ : TypeWithKeyword(
+ Keyword, TC, QualType(CanonicalType, 0),
+ (Tag->isDependentType() ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) |
+ (Qualifier
+ ? toTypeDependence(Qualifier.getDependence() &
+ ~NestedNameSpecifierDependence::Dependent)
+ : TypeDependence{})),
+ decl(const_cast<TagDecl *>(Tag)) {
+ if ((TagTypeBits.HasQualifier = !!Qualifier))
+ getTrailingQualifier() = Qualifier;
+ TagTypeBits.OwnsTag = !!OwnsTag;
+ TagTypeBits.IsInjected = ISInjected;
+}
+
+void *TagType::getTrailingPointer() const {
+ switch (getTypeClass()) {
+ case Type::Enum:
+ return const_cast<EnumType *>(cast<EnumType>(this) + 1);
+ case Type::Record:
+ return const_cast<RecordType *>(cast<RecordType>(this) + 1);
+ case Type::InjectedClassName:
+ return const_cast<InjectedClassNameType *>(
+ cast<InjectedClassNameType>(this) + 1);
+ default:
+ llvm_unreachable("unexpected type class");
}
- // If there's no definition (not even in progress), return what we have.
- return decl;
}
-TagDecl *TagType::getDecl() const { return getInterestingTagDecl(decl); }
+NestedNameSpecifier &TagType::getTrailingQualifier() const {
+ assert(TagTypeBits.HasQualifier);
+ return *reinterpret_cast<NestedNameSpecifier *>(llvm::alignAddr(
+ getTrailingPointer(), llvm::Align::Of<NestedNameSpecifier *>()));
+}
+
+NestedNameSpecifier TagType::getQualifier() const {
+ return TagTypeBits.HasQualifier ? getTrailingQualifier() : std::nullopt;
+}
+
+ClassTemplateDecl *TagType::getTemplateDecl() const {
+ auto *Decl = dyn_cast<CXXRecordDecl>(decl);
+ if (!Decl)
+ return nullptr;
+ if (auto *RD = dyn_cast<ClassTemplateSpecializationDecl>(Decl))
+ return RD->getSpecializedTemplate();
+ return Decl->getDescribedClassTemplate();
+}
-bool TagType::isBeingDefined() const { return getDecl()->isBeingDefined(); }
+TemplateName TagType::getTemplateName(const ASTContext &Ctx) const {
+ auto *TD = getTemplateDecl();
+ if (!TD)
+ return TemplateName();
+ if (isCanonicalUnqualified())
+ return TemplateName(TD);
+ return Ctx.getQualifiedTemplateName(getQualifier(), /*TemplateKeyword=*/false,
+ TemplateName(TD));
+}
+
+ArrayRef<TemplateArgument>
+TagType::getTemplateArgs(const ASTContext &Ctx) const {
+ auto *Decl = dyn_cast<CXXRecordDecl>(decl);
+ if (!Decl)
+ return {};
+
+ if (auto *RD = dyn_cast<ClassTemplateSpecializationDecl>(Decl))
+ return RD->getTemplateArgs().asArray();
+ if (ClassTemplateDecl *TD = Decl->getDescribedClassTemplate())
+ return TD->getTemplateParameters()->getInjectedTemplateArgs(Ctx);
+ return {};
+}
bool RecordType::hasConstFields() const {
std::vector<const RecordType *> RecordTypeList;
@@ -4237,8 +4385,10 @@ bool RecordType::hasConstFields() const {
unsigned NextToCheckIndex = 0;
while (RecordTypeList.size() > NextToCheckIndex) {
- for (FieldDecl *FD :
- RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ for (FieldDecl *FD : RecordTypeList[NextToCheckIndex]
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields()) {
QualType FieldTy = FD->getType();
if (FieldTy.isConstQualified())
return true;
@@ -4253,6 +4403,13 @@ bool RecordType::hasConstFields() const {
return false;
}
+InjectedClassNameType::InjectedClassNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ const TagDecl *TD, bool IsInjected,
+ const Type *CanonicalType)
+ : TagType(TypeClass::InjectedClassName, Keyword, Qualifier, TD,
+ /*OwnsTag=*/false, IsInjected, CanonicalType) {}
+
AttributedType::AttributedType(QualType canon, const Attr *attr,
QualType modified, QualType equivalent)
: AttributedType(canon, attr->getKind(), attr, modified, equivalent) {}
@@ -4340,10 +4497,6 @@ bool AttributedType::isCallingConv() const {
llvm_unreachable("invalid attr kind");
}
-CXXRecordDecl *InjectedClassNameType::getDecl() const {
- return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
-}
-
IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
}
@@ -4467,16 +4620,17 @@ bool TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
}
TemplateSpecializationType::TemplateSpecializationType(
- TemplateName T, bool IsAlias, ArrayRef<TemplateArgument> Args,
- QualType Underlying)
- : Type(TemplateSpecialization,
- Underlying.isNull() ? QualType(this, 0)
- : Underlying.getCanonicalType(),
- (Underlying.isNull()
- ? TypeDependence::DependentInstantiation
- : toSemanticDependence(Underlying->getDependence())) |
- (toTypeDependence(T.getDependence()) &
- TypeDependence::UnexpandedPack)),
+ ElaboratedTypeKeyword Keyword, TemplateName T, bool IsAlias,
+ ArrayRef<TemplateArgument> Args, QualType Underlying)
+ : TypeWithKeyword(
+ Keyword, TemplateSpecialization,
+ Underlying.isNull() ? QualType(this, 0)
+ : Underlying.getCanonicalType(),
+ (Underlying.isNull()
+ ? TypeDependence::DependentInstantiation
+ : toSemanticDependence(Underlying->getDependence())) |
+ (toTypeDependence(T.getDependence()) &
+ TypeDependence::UnexpandedPack)),
Template(T) {
TemplateSpecializationTypeBits.NumArgs = Args.size();
TemplateSpecializationTypeBits.TypeAlias = IsAlias;
@@ -4699,7 +4853,8 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::Record:
case Type::Enum: {
- const TagDecl *Tag = cast<TagType>(T)->getDecl();
+ const TagDecl *Tag =
+ cast<TagType>(T)->getOriginalDecl()->getDefinitionOrSelf();
// C++ [basic.link]p8:
// - it is a class or enumeration type that is named (or has a name
@@ -4726,12 +4881,9 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::MemberPointer: {
const auto *MPT = cast<MemberPointerType>(T);
CachedProperties Cls = [&] {
- if (auto *RD = MPT->getMostRecentCXXRecordDecl())
- return Cache::get(QualType(RD->getTypeForDecl(), 0));
- if (const Type *T = MPT->getQualifier()->getAsType())
- return Cache::get(T);
- // Treat as a dependent type.
- return CachedProperties(Linkage::External, false);
+ if (MPT->isSugared())
+ MPT = cast<MemberPointerType>(MPT->getCanonicalTypeInternal());
+ return Cache::get(MPT->getQualifier().getAsType());
}();
return merge(Cls, Cache::get(MPT->getPointeeType()));
}
@@ -4811,7 +4963,8 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::Record:
case Type::Enum:
- return getDeclLinkageAndVisibility(cast<TagType>(T)->getDecl());
+ return getDeclLinkageAndVisibility(
+ cast<TagType>(T)->getOriginalDecl()->getDefinitionOrSelf());
case Type::Complex:
return computeTypeLinkageInfo(cast<ComplexType>(T)->getElementType());
@@ -4827,8 +4980,8 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
LinkageInfo LV;
if (auto *D = MPT->getMostRecentCXXRecordDecl()) {
LV.merge(getDeclLinkageAndVisibility(D));
- } else if (auto *Ty = MPT->getQualifier()->getAsType()) {
- LV.merge(computeTypeLinkageInfo(Ty));
+ } else {
+ LV.merge(computeTypeLinkageInfo(MPT->getQualifier().getAsType()));
}
LV.merge(computeTypeLinkageInfo(MPT->getPointeeType()));
return LV;
@@ -5014,7 +5167,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
llvm_unreachable("unknown builtin type");
case Type::Record: {
- const RecordDecl *RD = cast<RecordType>(type)->getDecl();
+ const RecordDecl *RD = cast<RecordType>(type)->getOriginalDecl();
// For template specializations, look only at primary template attributes.
// This is a consistent regardless of whether the instantiation is known.
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -5212,14 +5365,18 @@ bool Type::isCARCBridgableType() const {
/// Check if the specified type is the CUDA device builtin surface type.
bool Type::isCUDADeviceBuiltinSurfaceType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>();
+ return RT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>();
return false;
}
/// Check if the specified type is the CUDA device builtin texture type.
bool Type::isCUDADeviceBuiltinTextureType() const {
if (const auto *RT = getAs<RecordType>())
- return RT->getDecl()->hasAttr<CUDADeviceBuiltinTextureTypeAttr>();
+ return RT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<CUDADeviceBuiltinTextureTypeAttr>();
return false;
}
@@ -5246,6 +5403,15 @@ bool Type::isHLSLResourceRecord() const {
return HLSLAttributedResourceType::findHandleTypeOnResource(this) != nullptr;
}
+bool Type::isHLSLResourceRecordArray() const {
+ const Type *Ty = getUnqualifiedDesugaredType();
+ if (!Ty->isArrayType())
+ return false;
+ while (isa<ConstantArrayType>(Ty))
+ Ty = Ty->getArrayElementTypeNoTypeQual();
+ return Ty->isHLSLResourceRecord();
+}
+
bool Type::isHLSLIntangibleType() const {
const Type *Ty = getUnqualifiedDesugaredType();
@@ -5283,7 +5449,7 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
}
if (const auto *RT = type->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl();
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
/// Check if this is a C++ object with a non-trivial destructor.
if (CXXRD->hasDefinition() && !CXXRD->hasTrivialDestructor())
@@ -5291,7 +5457,7 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
} else {
/// Check if this is a C struct that is non-trivial to destroy or an array
/// that contains such a struct.
- if (RD->isNonTrivialToPrimitiveDestroy())
+ if (RD->getDefinitionOrSelf()->isNonTrivialToPrimitiveDestroy())
return DK_nontrivial_c_struct;
}
}
@@ -5301,16 +5467,16 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
bool MemberPointerType::isSugared() const {
CXXRecordDecl *D1 = getMostRecentCXXRecordDecl(),
- *D2 = getQualifier()->getAsRecordDecl();
+ *D2 = getQualifier().getAsRecordDecl();
assert(!D1 == !D2);
return D1 != D2 && D1->getCanonicalDecl() != D2->getCanonicalDecl();
}
void MemberPointerType::Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
- const NestedNameSpecifier *Qualifier,
+ const NestedNameSpecifier Qualifier,
const CXXRecordDecl *Cls) {
ID.AddPointer(Pointee.getAsOpaquePtr());
- ID.AddPointer(Qualifier);
+ Qualifier.Profile(ID);
if (Cls)
ID.AddPointer(Cls->getCanonicalDecl());
}
@@ -5318,14 +5484,14 @@ void MemberPointerType::Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
CXXRecordDecl *MemberPointerType::getCXXRecordDecl() const {
return dyn_cast<MemberPointerType>(getCanonicalTypeInternal())
->getQualifier()
- ->getAsRecordDecl();
+ .getAsRecordDecl();
}
CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
auto *RD = getCXXRecordDecl();
if (!RD)
return nullptr;
- return RD->getMostRecentNonInjectedDecl();
+ return RD->getMostRecentDecl();
}
void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp
index 5c45c59..fbe8772 100644
--- a/clang/lib/AST/TypeLoc.cpp
+++ b/clang/lib/AST/TypeLoc.cpp
@@ -195,15 +195,6 @@ SourceLocation TypeLoc::getBeginLoc() const {
TypeLoc LeftMost = Cur;
while (true) {
switch (Cur.getTypeLocClass()) {
- case Elaborated:
- if (Cur.getLocalSourceRange().getBegin().isValid()) {
- LeftMost = Cur;
- break;
- }
- Cur = Cur.getNextTypeLoc();
- if (Cur.isNull())
- break;
- continue;
case FunctionProto:
if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()
->hasTrailingReturn()) {
@@ -275,7 +266,6 @@ SourceLocation TypeLoc::getEndLoc() const {
Last = Cur;
break;
case Qualified:
- case Elaborated:
break;
}
Cur = Cur.getNextTypeLoc();
@@ -313,9 +303,8 @@ bool TypeSpecTypeLoc::isKind(const TypeLoc &TL) {
}
bool TagTypeLoc::isDefinition() const {
- TagDecl *D = getDecl();
- return D->isCompleteDefinition() &&
- (D->getIdentifier() == nullptr || D->getLocation() == getNameLoc());
+ return getTypePtr()->isTagOwned() &&
+ getOriginalDecl()->isCompleteDefinition();
}
// Reimplemented to account for GNU/C++ extension
@@ -482,6 +471,134 @@ TypeLoc TypeLoc::findExplicitQualifierLoc() const {
return {};
}
+NestedNameSpecifierLoc TypeLoc::getPrefix() const {
+ switch (getTypeLocClass()) {
+ case TypeLoc::DependentName:
+ return castAs<DependentNameTypeLoc>().getQualifierLoc();
+ case TypeLoc::TemplateSpecialization:
+ return castAs<TemplateSpecializationTypeLoc>().getQualifierLoc();
+ case TypeLoc::DependentTemplateSpecialization:
+ return castAs<DependentTemplateSpecializationTypeLoc>().getQualifierLoc();
+ case TypeLoc::DeducedTemplateSpecialization:
+ return castAs<DeducedTemplateSpecializationTypeLoc>().getQualifierLoc();
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ return castAs<TagTypeLoc>().getQualifierLoc();
+ case TypeLoc::Typedef:
+ return castAs<TypedefTypeLoc>().getQualifierLoc();
+ case TypeLoc::UnresolvedUsing:
+ return castAs<UnresolvedUsingTypeLoc>().getQualifierLoc();
+ case TypeLoc::Using:
+ return castAs<UsingTypeLoc>().getQualifierLoc();
+ default:
+ return NestedNameSpecifierLoc();
+ }
+}
+
+SourceLocation TypeLoc::getNonPrefixBeginLoc() const {
+ switch (getTypeLocClass()) {
+ case TypeLoc::TemplateSpecialization: {
+ auto TL = castAs<TemplateSpecializationTypeLoc>();
+ SourceLocation Loc = TL.getTemplateKeywordLoc();
+ if (!Loc.isValid())
+ Loc = TL.getTemplateNameLoc();
+ return Loc;
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto TL = castAs<DependentTemplateSpecializationTypeLoc>();
+ SourceLocation Loc = TL.getTemplateKeywordLoc();
+ if (!Loc.isValid())
+ Loc = TL.getTemplateNameLoc();
+ return Loc;
+ }
+ case TypeLoc::DeducedTemplateSpecialization: {
+ auto TL = castAs<DeducedTemplateSpecializationTypeLoc>();
+ SourceLocation Loc = TL.getTemplateKeywordLoc();
+ if (!Loc.isValid())
+ Loc = TL.getTemplateNameLoc();
+ return Loc;
+ }
+ case TypeLoc::DependentName:
+ return castAs<DependentNameTypeLoc>().getNameLoc();
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ return castAs<TagTypeLoc>().getNameLoc();
+ case TypeLoc::Typedef:
+ return castAs<TypedefTypeLoc>().getNameLoc();
+ case TypeLoc::UnresolvedUsing:
+ return castAs<UnresolvedUsingTypeLoc>().getNameLoc();
+ case TypeLoc::Using:
+ return castAs<UsingTypeLoc>().getNameLoc();
+ default:
+ return getBeginLoc();
+ }
+}
+
+SourceLocation TypeLoc::getNonElaboratedBeginLoc() const {
+ // For elaborated types (e.g. `struct a::A`) we want the portion after the
+ // `struct` but including the namespace qualifier, `a::`.
+ switch (getTypeLocClass()) {
+ case TypeLoc::Qualified:
+ return castAs<QualifiedTypeLoc>()
+ .getUnqualifiedLoc()
+ .getNonElaboratedBeginLoc();
+ case TypeLoc::TemplateSpecialization: {
+ auto T = castAs<TemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getTemplateNameLoc();
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto T = castAs<DependentTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getTemplateNameLoc();
+ }
+ case TypeLoc::DeducedTemplateSpecialization: {
+ auto T = castAs<DeducedTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getTemplateNameLoc();
+ }
+ case TypeLoc::DependentName: {
+ auto T = castAs<DependentNameTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName: {
+ auto T = castAs<TagTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::Typedef: {
+ auto T = castAs<TypedefTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::UnresolvedUsing: {
+ auto T = castAs<UnresolvedUsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ case TypeLoc::Using: {
+ auto T = castAs<UsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = T.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return T.getNameLoc();
+ }
+ default:
+ return getBeginLoc();
+ }
+}
+
void ObjCTypeParamTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setNameLoc(Loc);
@@ -555,9 +672,9 @@ static void initializeElaboratedKeyword(TL T, SourceLocation Loc) {
: SourceLocation());
}
-static NestedNameSpecifierLoc
-initializeQualifier(ASTContext &Context, NestedNameSpecifier *Qualifier,
- SourceLocation Loc) {
+static NestedNameSpecifierLoc initializeQualifier(ASTContext &Context,
+ NestedNameSpecifier Qualifier,
+ SourceLocation Loc) {
if (!Qualifier)
return NestedNameSpecifierLoc();
NestedNameSpecifierLocBuilder Builder;
@@ -565,15 +682,6 @@ initializeQualifier(ASTContext &Context, NestedNameSpecifier *Qualifier,
return Builder.getWithLocInContext(Context);
}
-void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
- SourceLocation Loc) {
- if (isEmpty())
- return;
- initializeElaboratedKeyword(*this, Loc);
- setQualifierLoc(
- initializeQualifier(Context, getTypePtr()->getQualifier(), Loc));
-}
-
void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
initializeElaboratedKeyword(*this, Loc);
@@ -596,6 +704,78 @@ DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
Context, getTypePtr()->template_arguments(), getArgInfos(), Loc);
}
+void TemplateSpecializationTypeLoc::set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc,
+ SourceLocation NameLoc,
+ SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc) {
+ TemplateSpecializationLocInfo &Data = *getLocalData();
+
+ Data.ElaboratedKWLoc = ElaboratedKeywordLoc;
+ SourceLocation BeginLoc = ElaboratedKeywordLoc;
+
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()->getTemplateName().getQualifier());
+ Data.QualifierData = QualifierLoc ? QualifierLoc.getOpaqueData() : nullptr;
+ if (QualifierLoc && !BeginLoc.isValid())
+ BeginLoc = QualifierLoc.getBeginLoc();
+
+ Data.TemplateKWLoc = TemplateKeywordLoc;
+ if (!BeginLoc.isValid())
+ BeginLoc = TemplateKeywordLoc;
+
+ Data.NameLoc = NameLoc;
+ if (!BeginLoc.isValid())
+ BeginLoc = NameLoc;
+
+ Data.LAngleLoc = LAngleLoc;
+ Data.SR = SourceRange(BeginLoc, RAngleLoc);
+}
+
+void TemplateSpecializationTypeLoc::set(SourceLocation ElaboratedKeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKeywordLoc,
+ SourceLocation NameLoc,
+ const TemplateArgumentListInfo &TAL) {
+ set(ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
+ TAL.getLAngleLoc(), TAL.getRAngleLoc());
+ MutableArrayRef<TemplateArgumentLocInfo> ArgInfos = getArgLocInfos();
+ assert(TAL.size() == ArgInfos.size());
+ for (unsigned I = 0, N = TAL.size(); I != N; ++I)
+ ArgInfos[I] = TAL[I].getLocInfo();
+}
+
+void TemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ QualifiedTemplateName *Name =
+ getTypePtr()->getTemplateName().getAsAdjustedQualifiedTemplateName();
+
+ SourceLocation ElaboratedKeywordLoc =
+ getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? Loc
+ : SourceLocation();
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (NestedNameSpecifier Qualifier =
+ Name ? Name->getQualifier() : std::nullopt) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, Qualifier, Loc);
+ QualifierLoc = Builder.getWithLocInContext(Context);
+ }
+
+ TemplateArgumentListInfo TAL(Loc, Loc);
+ set(ElaboratedKeywordLoc, QualifierLoc,
+ /*TemplateKeywordLoc=*/Name && Name->hasTemplateKeyword()
+ ? Loc
+ : SourceLocation(),
+ /*NameLoc=*/Loc, /*LAngleLoc=*/Loc, /*RAngleLoc=*/Loc);
+ initializeArgLocs(Context, getTypePtr()->template_arguments(), getArgInfos(),
+ Loc);
+}
+
void TemplateSpecializationTypeLoc::initializeArgLocs(
ASTContext &Context, ArrayRef<TemplateArgument> Args,
TemplateArgumentLocInfo *ArgInfos, SourceLocation Loc) {
@@ -631,7 +811,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(
Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
ArgInfos[i] = TemplateArgumentLocInfo(
- Context, Builder.getWithLocInContext(Context), Loc,
+ Context, Loc, Builder.getWithLocInContext(Context), Loc,
Args[i].getKind() == TemplateArgument::Template ? SourceLocation()
: Loc);
break;
@@ -680,6 +860,14 @@ void AutoTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) {
}
}
+void DeducedTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ initializeElaboratedKeyword(*this, Loc);
+ setQualifierLoc(initializeQualifier(
+ Context, getTypePtr()->getTemplateName().getQualifier(), Loc));
+ setTemplateNameLoc(Loc);
+}
+
namespace {
class GetContainedAutoTypeLocVisitor :
@@ -693,10 +881,6 @@ namespace {
// Only these types can contain the desired 'auto' type.
- TypeLoc VisitElaboratedTypeLoc(ElaboratedTypeLoc T) {
- return Visit(T.getNamedTypeLoc());
- }
-
TypeLoc VisitQualifiedTypeLoc(QualifiedTypeLoc T) {
return Visit(T.getUnqualifiedLoc());
}
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index deb453f..ce5870e 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -133,7 +133,7 @@ public:
void printAfter(QualType T, raw_ostream &OS);
void AppendScope(DeclContext *DC, raw_ostream &OS,
DeclarationName NameInScope);
- void printTag(TagDecl *T, raw_ostream &OS);
+ void printTagType(const TagType *T, raw_ostream &OS);
void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) \
@@ -230,7 +230,6 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::UnaryTransform:
case Type::Record:
case Type::Enum:
- case Type::Elaborated:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
case Type::DeducedTemplateSpecialization:
@@ -504,11 +503,7 @@ void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
OS << '(';
-
- PrintingPolicy InnerPolicy(Policy);
- InnerPolicy.IncludeTagDefinition = false;
- T->getQualifier()->print(OS, InnerPolicy);
-
+ T->getQualifier().print(OS, Policy);
OS << "*";
}
@@ -1211,29 +1206,50 @@ void TypePrinter::printTypeSpec(NamedDecl *D, raw_ostream &OS) {
void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
raw_ostream &OS) {
- printTypeSpec(T->getDecl(), OS);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
+ auto *D = T->getDecl();
+ if (Policy.FullyQualifiedName || T->isCanonicalUnqualified()) {
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ } else {
+ T->getQualifier().print(OS, Policy);
+ }
+ OS << D->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
raw_ostream &OS) {}
void TypePrinter::printUsingBefore(const UsingType *T, raw_ostream &OS) {
- // After `namespace b { using a::X }`, is the type X within B a::X or b::X?
- //
- // - b::X is more formally correct given the UsingType model
- // - b::X makes sense if "re-exporting" a symbol in a new namespace
- // - a::X makes sense if "importing" a symbol for convenience
- //
- // The "importing" use seems much more common, so we print a::X.
- // This could be a policy option, but the right choice seems to rest more
- // with the intent of the code than the caller.
- printTypeSpec(T->getFoundDecl()->getUnderlyingDecl(), OS);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
+ auto *D = T->getDecl();
+ if (Policy.FullyQualifiedName) {
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ } else {
+ T->getQualifier().print(OS, Policy);
+ }
+ OS << D->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printUsingAfter(const UsingType *T, raw_ostream &OS) {}
void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
- printTypeSpec(T->getDecl(), OS);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
+ auto *D = T->getDecl();
+ if (Policy.FullyQualifiedName) {
+ AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ } else {
+ T->getQualifier().print(OS, Policy);
+ }
+ OS << D->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printMacroQualifiedBefore(const MacroQualifiedType *T,
@@ -1354,14 +1370,53 @@ void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
void TypePrinter::printDeducedTemplateSpecializationBefore(
const DeducedTemplateSpecializationType *T, raw_ostream &OS) {
- // If the type has been deduced, print the deduced type.
+ if (ElaboratedTypeKeyword Keyword = T->getKeyword();
+ T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << KeywordHelpers::getKeywordName(Keyword) << ' ';
+
+ TemplateName Name = T->getTemplateName();
+
+ // If the type has been deduced, print the template arguments, as if this was
+ // printing the deduced type, but including elaboration and template name
+ // qualification.
+ // FIXME: There should probably be a policy which controls this.
+ // We would probably want to do this on diagnostics, but not on -ast-print.
+ ArrayRef<TemplateArgument> Args;
+ TemplateDecl *DeducedTD = nullptr;
if (!T->getDeducedType().isNull()) {
- printBefore(T->getDeducedType(), OS);
- } else {
+ if (const auto *TST =
+ dyn_cast<TemplateSpecializationType>(T->getDeducedType())) {
+ DeducedTD = TST->getTemplateName().getAsTemplateDecl(
+ /*IgnoreDeduced=*/true);
+ Args = TST->template_arguments();
+ } else {
+ // Should only get here for canonical types.
+ const auto *CD = cast<ClassTemplateSpecializationDecl>(
+ cast<RecordType>(T->getDeducedType())->getOriginalDecl());
+ DeducedTD = CD->getSpecializedTemplate();
+ Args = CD->getTemplateArgs().asArray();
+ }
+
+ // FIXME: Workaround for alias template CTAD not producing guides which
+ // include the alias template specialization type.
+ // Purposefully disregard qualification when building this TemplateName;
+ // any qualification we might have, might not make sense in the
+ // context this was deduced.
+ if (!declaresSameEntity(DeducedTD, Name.getAsTemplateDecl(
+ /*IgnoreDeduced=*/true)))
+ Name = TemplateName(DeducedTD);
+ }
+
+ {
IncludeStrongLifetimeRAII Strong(Policy);
- T->getTemplateName().print(OS, Policy);
- spaceBeforePlaceHolder(OS);
+ Name.print(OS, Policy);
+ }
+ if (DeducedTD) {
+ printTemplateArgumentList(OS, Args, Policy,
+ DeducedTD->getTemplateParameters());
}
+
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printDeducedTemplateSpecializationAfter(
@@ -1480,30 +1535,37 @@ void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS,
}
}
-void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
- if (Policy.IncludeTagDefinition) {
- PrintingPolicy SubPolicy = Policy;
- SubPolicy.IncludeTagDefinition = false;
- D->print(OS, SubPolicy, Indentation);
+void TypePrinter::printTagType(const TagType *T, raw_ostream &OS) {
+ TagDecl *D = T->getOriginalDecl();
+
+ if (Policy.IncludeTagDefinition && T->isTagOwned()) {
+ D->print(OS, Policy, Indentation);
spaceBeforePlaceHolder(OS);
return;
}
bool HasKindDecoration = false;
- // We don't print tags unless this is an elaborated type.
- // In C, we just assume every RecordType is an elaborated type.
- if (!Policy.SuppressTagKeyword && !D->getTypedefNameForAnonDecl()) {
- HasKindDecoration = true;
- OS << D->getKindName();
- OS << ' ';
+ if (T->isCanonicalUnqualified()) {
+ if (!Policy.SuppressTagKeyword && !D->getTypedefNameForAnonDecl()) {
+ HasKindDecoration = true;
+ OS << D->getKindName();
+ OS << ' ';
+ }
+ } else {
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ OS << ' ';
}
- // Compute the full nested-name-specifier for this type.
- // In C, this will always be empty except when the type
- // being printed is anonymous within other Record.
- if (!Policy.SuppressScope)
+ if (!Policy.FullyQualifiedName && !T->isCanonicalUnqualified()) {
+ T->getQualifier().print(OS, Policy);
+ } else if (!Policy.SuppressScope) {
+ // Compute the full nested-name-specifier for this type.
+ // In C, this will always be empty except when the type
+ // being printed is anonymous within other Record.
AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ }
if (const IdentifierInfo *II = D->getIdentifier())
OS << II->getName();
@@ -1578,9 +1640,11 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
// Print the preferred name if we have one for this type.
if (Policy.UsePreferredNames) {
- for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) {
+ for (const auto *PNA : T->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->specific_attrs<PreferredNameAttr>()) {
if (!declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(),
- T->getDecl()))
+ T->getOriginalDecl()))
continue;
// Find the outermost typedef or alias template.
QualType T = PNA->getTypedefType();
@@ -1594,17 +1658,44 @@ void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
}
}
- printTag(T->getDecl(), OS);
+ printTagType(T, OS);
}
void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) {}
void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) {
- printTag(T->getDecl(), OS);
+ printTagType(T, OS);
}
void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) {}
+void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
+ raw_ostream &OS) {
+ const ASTContext &Ctx = T->getOriginalDecl()->getASTContext();
+ IncludeStrongLifetimeRAII Strong(Policy);
+ T->getTemplateName(Ctx).print(OS, Policy);
+ if (Policy.PrintInjectedClassNameWithArguments) {
+ auto *Decl = T->getOriginalDecl();
+ // FIXME: Use T->getTemplateArgs(Ctx) when that supports as-written
+ // arguments.
+ if (auto *RD = dyn_cast<ClassTemplateSpecializationDecl>(Decl)) {
+ printTemplateArgumentList(OS, RD->getTemplateArgsAsWritten()->arguments(),
+ Policy,
+ T->getTemplateDecl()->getTemplateParameters());
+ } else {
+ ClassTemplateDecl *TD = Decl->getDescribedClassTemplate();
+ assert(TD);
+ printTemplateArgumentList(
+ OS, TD->getTemplateParameters()->getInjectedTemplateArgs(Ctx), Policy,
+ T->getTemplateDecl()->getTemplateParameters());
+ }
+ }
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
+ raw_ostream &OS) {}
+
void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
raw_ostream &OS) {
TemplateTypeParmDecl *D = T->getDecl();
@@ -1671,6 +1762,10 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
raw_ostream &OS, bool FullyQualify) {
IncludeStrongLifetimeRAII Strong(Policy);
+ if (ElaboratedTypeKeyword K = T->getKeyword();
+ K != ElaboratedTypeKeyword::None)
+ OS << TypeWithKeyword::getKeywordName(K) << ' ';
+
TemplateDecl *TD =
T->getTemplateName().getAsTemplateDecl(/*IgnoreDeduced=*/true);
// FIXME: Null TD never exercised in test suite.
@@ -1680,7 +1775,10 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
OS << TD->getName();
} else {
- T->getTemplateName().print(OS, Policy, TemplateName::Qualified::None);
+ T->getTemplateName().print(OS, Policy,
+ !Policy.SuppressScope
+ ? TemplateName::Qualified::AsWritten
+ : TemplateName::Qualified::None);
}
DefaultTemplateArgsPolicyRAII TemplateArgs(Policy);
@@ -1699,77 +1797,6 @@ void TypePrinter::printTemplateSpecializationAfter(
const TemplateSpecializationType *T,
raw_ostream &OS) {}
-void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
- raw_ostream &OS) {
- if (Policy.PrintInjectedClassNameWithArguments)
- return printTemplateSpecializationBefore(T->getInjectedTST(), OS);
-
- IncludeStrongLifetimeRAII Strong(Policy);
- T->getTemplateName().print(OS, Policy);
- spaceBeforePlaceHolder(OS);
-}
-
-void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
- raw_ostream &OS) {}
-
-void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
- raw_ostream &OS) {
- if (Policy.IncludeTagDefinition && T->getOwnedTagDecl()) {
- TagDecl *OwnedTagDecl = T->getOwnedTagDecl();
- assert(OwnedTagDecl->getTypeForDecl() == T->getNamedType().getTypePtr() &&
- "OwnedTagDecl expected to be a declaration for the type");
- PrintingPolicy SubPolicy = Policy;
- SubPolicy.IncludeTagDefinition = false;
- OwnedTagDecl->print(OS, SubPolicy, Indentation);
- spaceBeforePlaceHolder(OS);
- return;
- }
-
- if (Policy.SuppressElaboration) {
- printBefore(T->getNamedType(), OS);
- return;
- }
-
- // The tag definition will take care of these.
- if (!Policy.IncludeTagDefinition)
- {
- OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ElaboratedTypeKeyword::None)
- OS << " ";
- NestedNameSpecifier *Qualifier = T->getQualifier();
- if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
- !Policy.SuppressUnwrittenScope) {
- bool OldTagKeyword = Policy.SuppressTagKeyword;
- bool OldSupressScope = Policy.SuppressScope;
- Policy.SuppressTagKeyword = true;
- Policy.SuppressScope = false;
- printBefore(T->getNamedType(), OS);
- Policy.SuppressTagKeyword = OldTagKeyword;
- Policy.SuppressScope = OldSupressScope;
- return;
- }
- if (Qualifier)
- Qualifier->print(OS, Policy);
- }
-
- ElaboratedTypePolicyRAII PolicyRAII(Policy);
- printBefore(T->getNamedType(), OS);
-}
-
-void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
- raw_ostream &OS) {
- if (Policy.IncludeTagDefinition && T->getOwnedTagDecl())
- return;
-
- if (Policy.SuppressElaboration) {
- printAfter(T->getNamedType(), OS);
- return;
- }
-
- ElaboratedTypePolicyRAII PolicyRAII(Policy);
- printAfter(T->getNamedType(), OS);
-}
-
void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) {
if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
printBefore(T->getInnerType(), OS);
@@ -1791,9 +1818,7 @@ void TypePrinter::printDependentNameBefore(const DependentNameType *T,
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
if (T->getKeyword() != ElaboratedTypeKeyword::None)
OS << " ";
-
- T->getQualifier()->print(OS, Policy);
-
+ T->getQualifier().print(OS, Policy);
OS << T->getIdentifier()->getName();
spaceBeforePlaceHolder(OS);
}
diff --git a/clang/lib/AST/VTTBuilder.cpp b/clang/lib/AST/VTTBuilder.cpp
index de01184..85101ae 100644
--- a/clang/lib/AST/VTTBuilder.cpp
+++ b/clang/lib/AST/VTTBuilder.cpp
@@ -64,7 +64,9 @@ void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
continue;
const auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
CharUnits BaseOffset = Base.getBaseOffset() +
@@ -90,7 +92,9 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
for (const auto &I : RD->bases()) {
const auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Itanium C++ ABI 2.6.2:
// Secondary virtual pointers are present for all bases with either
@@ -154,7 +158,9 @@ void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases) {
for (const auto &I : RD->bases()) {
const auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Check if this is a virtual base.
if (I.isVirtual()) {
diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp
index 0001745..6cec526 100644
--- a/clang/lib/AST/VTableBuilder.cpp
+++ b/clang/lib/AST/VTableBuilder.cpp
@@ -313,10 +313,12 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
}
const CXXRecordDecl *DerivedRD =
- cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+ cast<CXXRecordDecl>(
+ cast<RecordType>(CanDerivedReturnType)->getOriginalDecl())
+ ->getDefinitionOrSelf();
- const CXXRecordDecl *BaseRD =
- cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+ const CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(
+ cast<RecordType>(CanBaseReturnType)->getOriginalDecl());
return ComputeBaseOffset(Context, BaseRD, DerivedRD);
}
diff --git a/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 6d0ba0b..d43d1aec7 100644
--- a/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -122,15 +122,15 @@ public:
else if (const Stmt *S = DynNode.get<Stmt>())
traverse(*S);
else if (const NestedNameSpecifier *NNS =
- DynNode.get<NestedNameSpecifier>())
+ DynNode.get<NestedNameSpecifier>())
traverse(*NNS);
else if (const NestedNameSpecifierLoc *NNSLoc =
DynNode.get<NestedNameSpecifierLoc>())
traverse(*NNSLoc);
else if (const QualType *Q = DynNode.get<QualType>())
- traverse(*Q);
+ traverse(*Q, /*TraverseQualifier=*/true);
else if (const TypeLoc *T = DynNode.get<TypeLoc>())
- traverse(*T);
+ traverse(*T, /*TraverseQualifier=*/true);
else if (const auto *C = DynNode.get<CXXCtorInitializer>())
traverse(*C);
else if (const TemplateArgumentLoc *TALoc =
@@ -194,7 +194,7 @@ public:
}
// We assume that the QualType and the contained type are on the same
// hierarchy level. Thus, we try to match either of them.
- bool TraverseType(QualType TypeNode) {
+ bool TraverseType(QualType TypeNode, bool TraverseQualifier = true) {
if (TypeNode.isNull())
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
@@ -202,11 +202,11 @@ public:
if (!match(*TypeNode))
return false;
// The QualType is matched inside traverse.
- return traverse(TypeNode);
+ return traverse(TypeNode, TraverseQualifier);
}
// We assume that the TypeLoc, contained QualType and contained Type all are
// on the same hierarchy level. Thus, we try to match all of them.
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ bool TraverseTypeLoc(TypeLoc TypeLocNode, bool TraverseQualifier = true) {
if (TypeLocNode.isNull())
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
@@ -217,17 +217,17 @@ public:
if (!match(TypeLocNode.getType()))
return false;
// The TypeLoc is matched inside traverse.
- return traverse(TypeLocNode);
+ return traverse(TypeLocNode, TraverseQualifier);
}
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS) {
ScopedIncrement ScopedDepth(&CurrentDepth);
- return (NNS == nullptr) || traverse(*NNS);
+ return !NNS || traverse(NNS);
}
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
if (!NNS)
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
- if (!match(*NNS.getNestedNameSpecifier()))
+ if (!match(NNS.getNestedNameSpecifier()))
return false;
return traverse(NNS);
}
@@ -340,15 +340,14 @@ private:
bool baseTraverse(const Stmt &StmtNode) {
return VisitorBase::TraverseStmt(const_cast<Stmt*>(&StmtNode));
}
- bool baseTraverse(QualType TypeNode) {
- return VisitorBase::TraverseType(TypeNode);
+ bool baseTraverse(QualType TypeNode, bool TraverseQualifier) {
+ return VisitorBase::TraverseType(TypeNode, TraverseQualifier);
}
- bool baseTraverse(TypeLoc TypeLocNode) {
- return VisitorBase::TraverseTypeLoc(TypeLocNode);
+ bool baseTraverse(TypeLoc TypeLocNode, bool TraverseQualifier) {
+ return VisitorBase::TraverseTypeLoc(TypeLocNode, TraverseQualifier);
}
- bool baseTraverse(const NestedNameSpecifier &NNS) {
- return VisitorBase::TraverseNestedNameSpecifier(
- const_cast<NestedNameSpecifier*>(&NNS));
+ bool baseTraverse(NestedNameSpecifier NNS) {
+ return VisitorBase::TraverseNestedNameSpecifier(NNS);
}
bool baseTraverse(NestedNameSpecifierLoc NNS) {
return VisitorBase::TraverseNestedNameSpecifierLoc(NNS);
@@ -396,13 +395,13 @@ private:
// Traverses the subtree rooted at 'Node'; returns true if the
// traversal should continue after this function returns.
- template <typename T>
- bool traverse(const T &Node) {
+ template <typename T, class... Args>
+ bool traverse(const T &Node, Args &&...args) {
static_assert(IsBaseType<T>::value,
"traverse can only be instantiated with base type");
if (!match(Node))
return false;
- return baseTraverse(Node);
+ return baseTraverse(Node, std::forward<Args>(args)...);
}
const DynTypedMatcher *const Matcher;
@@ -501,9 +500,9 @@ public:
bool TraverseDecl(Decl *DeclNode);
bool TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue = nullptr);
- bool TraverseType(QualType TypeNode);
- bool TraverseTypeLoc(TypeLoc TypeNode);
- bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool TraverseType(QualType TypeNode, bool TraverseQualifier = true);
+ bool TraverseTypeLoc(TypeLoc TypeNode, bool TraverseQualifier = true);
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier NNS);
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
bool TraverseTemplateArgumentLoc(TemplateArgumentLoc TAL);
@@ -577,13 +576,13 @@ public:
const auto *T = Proto.getTypePtr();
for (const auto &E : T->exceptions())
- TraverseType(E);
+ TraverseType(E, /*TraverseQualifier=*/true);
if (Expr *NE = T->getNoexceptExpr())
TraverseStmt(NE, Queue);
if (LE->hasExplicitResultType())
- TraverseTypeLoc(Proto.getReturnLoc());
+ TraverseTypeLoc(Proto.getReturnLoc(), /*TraverseQualifier=*/true);
TraverseStmt(
const_cast<Expr *>(LE->getTrailingRequiresClause().ConstraintExpr));
}
@@ -1289,33 +1288,42 @@ private:
if (Aliases == TypeAliases.end())
return false;
- if (const auto *ElaboratedTypeNode =
- llvm::dyn_cast<ElaboratedType>(TypeNode)) {
- if (ElaboratedTypeNode->isSugared() && Aliases->second.size() > 1) {
- const auto &DesugaredTypeName =
- ElaboratedTypeNode->desugar().getAsString();
+ auto matches = [&](const TypedefNameDecl *Alias) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (Matcher.matches(*Alias, this, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ return false;
+ };
- for (const TypedefNameDecl *Alias : Aliases->second) {
- if (Alias->getName() != DesugaredTypeName) {
- continue;
- }
+ if (const auto *T = TypeNode->getAs<TypedefType>()) {
+ const auto *TD = T->getDecl()->getCanonicalDecl();
- BoundNodesTreeBuilder Result(*Builder);
- if (Matcher.matches(*Alias, this, &Result)) {
- *Builder = std::move(Result);
- return true;
- }
+ // Prioritize exact matches.
+ SmallVector<const TypedefNameDecl *, 8> NonExactMatches;
+ for (const TypedefNameDecl *Alias : Aliases->second) {
+ if (!declaresSameEntity(TD, Alias)) {
+ NonExactMatches.push_back(Alias);
+ continue;
}
+ if (matches(Alias))
+ return true;
}
- }
- for (const TypedefNameDecl *Alias : Aliases->second) {
- BoundNodesTreeBuilder Result(*Builder);
- if (Matcher.matches(*Alias, this, &Result)) {
- *Builder = std::move(Result);
- return true;
+ for (const TypedefNameDecl *Alias : NonExactMatches) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (Matcher.matches(*Alias, this, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
}
+ return false;
}
+
+ for (const TypedefNameDecl *Alias : Aliases->second)
+ if (matches(Alias))
+ return true;
return false;
}
@@ -1506,12 +1514,14 @@ bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue) {
return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode, Queue);
}
-bool MatchASTVisitor::TraverseType(QualType TypeNode) {
+bool MatchASTVisitor::TraverseType(QualType TypeNode, bool TraverseQualifier) {
match(TypeNode);
- return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode,
+ TraverseQualifier);
}
-bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode) {
+bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode,
+ bool TraverseQualifier) {
// The RecursiveASTVisitor only visits types if they're not within TypeLocs.
// We still want to find those types via matchers, so we match them here. Note
// that the TypeLocs are structurally a shadow-hierarchy to the expressed
@@ -1519,11 +1529,12 @@ bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLocNode) {
// each TypeLoc.
match(TypeLocNode);
match(TypeLocNode.getType());
- return RecursiveASTVisitor<MatchASTVisitor>::TraverseTypeLoc(TypeLocNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseTypeLoc(
+ TypeLocNode, TraverseQualifier);
}
-bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
- match(*NNS);
+bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier NNS) {
+ match(NNS);
return RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifier(NNS);
}
@@ -1537,7 +1548,7 @@ bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
// We only match the nested name specifier here (as opposed to traversing it)
// because the traversal is already done in the parallel "Loc"-hierarchy.
if (NNS.hasQualifier())
- match(*NNS.getNestedNameSpecifier());
+ match(NNS.getNestedNameSpecifier());
return
RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifierLoc(NNS);
}
diff --git a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 80dc88881..653b381 100644
--- a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -755,6 +755,8 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, UsingShadowDecl>
+ usingShadowDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
const internal::VariadicAllOfMatcher<Decl> decl;
@@ -808,8 +810,6 @@ const internal::VariadicDynCastAllOfMatcher<TypeLoc, ReferenceTypeLoc>
const internal::VariadicDynCastAllOfMatcher<TypeLoc,
TemplateSpecializationTypeLoc>
templateSpecializationTypeLoc;
-const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
- elaboratedTypeLoc;
const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
@@ -1103,7 +1103,6 @@ const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType;
const AstTypeMatcher<UnaryTransformType> unaryTransformType;
const AstTypeMatcher<RecordType> recordType;
const AstTypeMatcher<TagType> tagType;
-const AstTypeMatcher<ElaboratedType> elaboratedType;
const AstTypeMatcher<UsingType> usingType;
const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType;
const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 562df71..7f6a6aa 100644
--- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -235,8 +235,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(designatorCountIs);
REGISTER_MATCHER(doStmt);
REGISTER_MATCHER(eachOf);
- REGISTER_MATCHER(elaboratedType);
- REGISTER_MATCHER(elaboratedTypeLoc);
REGISTER_MATCHER(usingType);
REGISTER_MATCHER(enumConstantDecl);
REGISTER_MATCHER(enumDecl);
@@ -341,7 +339,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasMemberName);
REGISTER_MATCHER(hasMethod);
REGISTER_MATCHER(hasName);
- REGISTER_MATCHER(hasNamedTypeLoc);
REGISTER_MATCHER(hasNullSelector);
REGISTER_MATCHER(hasObjectExpression);
REGISTER_MATCHER(hasOperands);
@@ -503,7 +500,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(memberHasSameNameAsBoundNode);
REGISTER_MATCHER(memberPointerType);
REGISTER_MATCHER(namedDecl);
- REGISTER_MATCHER(namesType);
REGISTER_MATCHER(namespaceAliasDecl);
REGISTER_MATCHER(namespaceDecl);
REGISTER_MATCHER(nestedNameSpecifier);
@@ -593,6 +589,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefNameDecl);
+ REGISTER_MATCHER(usingShadowDecl);
REGISTER_MATCHER(typedefType);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index 256ea182..f14cb43 100644
--- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -532,9 +532,11 @@ void Environment::initialize() {
} else if (auto *FieldBeingInitialized =
dyn_cast<FieldDecl>(Parent->getLambdaContextDecl())) {
// This is in a field initializer, rather than a method.
+ const RecordDecl *RD = FieldBeingInitialized->getParent();
+ const ASTContext &Ctx = RD->getASTContext();
+ CanQualType T = Ctx.getCanonicalTagType(RD);
setThisPointeeStorageLocation(
- cast<RecordStorageLocation>(createObject(QualType(
- FieldBeingInitialized->getParent()->getTypeForDecl(), 0))));
+ cast<RecordStorageLocation>(createObject(T)));
} else {
assert(false && "Unexpected this-capturing lambda context.");
}
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index c9fd9cc..026d030 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -1929,7 +1929,9 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
assert(inserted.second && "Are we visiting the same expression again?");
if (isa<CXXConstructExpr>(Exp))
Self = Placeholder;
- if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
+ if (TagT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<ScopedLockableAttr>())
Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
}
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index ddbd0a9..f560dd8 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -84,8 +84,8 @@ static std::pair<StringRef, bool> classifyCapability(QualType QT) {
// which it is. The type should either be a record or a typedef, or a pointer
// or reference thereof.
if (const auto *RT = QT->getAs<RecordType>()) {
- if (const auto *RD = RT->getDecl())
- return classifyCapability(*RD);
+ if (const auto *RD = RT->getOriginalDecl())
+ return classifyCapability(*RD->getDefinitionOrSelf());
} else if (const auto *TT = QT->getAs<TypedefType>()) {
if (const auto *TD = TT->getDecl())
return classifyCapability(*TD);
diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp
index 40dff7e..1521ada 100644
--- a/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -182,18 +182,22 @@ public:
return DynamicRecursiveASTVisitor::TraverseUnaryExprOrTypeTraitExpr(Node);
}
- bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) override {
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node,
+ bool TraverseQualifier) override {
// Unevaluated context.
if (ignoreUnevaluatedContext)
return true;
- return DynamicRecursiveASTVisitor::TraverseTypeOfExprTypeLoc(Node);
+ return DynamicRecursiveASTVisitor::TraverseTypeOfExprTypeLoc(
+ Node, TraverseQualifier);
}
- bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) override {
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node,
+ bool TraverseQualifier) override {
// Unevaluated context.
if (ignoreUnevaluatedContext)
return true;
- return DynamicRecursiveASTVisitor::TraverseDecltypeTypeLoc(Node);
+ return DynamicRecursiveASTVisitor::TraverseDecltypeTypeLoc(
+ Node, TraverseQualifier);
}
bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) override {
@@ -1986,6 +1990,14 @@ public:
const auto *FD = dyn_cast<FunctionDecl>(CE->getDirectCallee());
if (!FD)
return false;
+
+ bool IsGlobalAndNotInAnyNamespace =
+ FD->isGlobal() && !FD->getEnclosingNamespaceContext()->isNamespace();
+
+ // A libc function must either be in the std:: namespace or a global
+ // function that is not in any namespace:
+ if (!FD->isInStdNamespace() && !IsGlobalAndNotInAnyNamespace)
+ return false;
auto isSingleStringLiteralArg = false;
if (CE->getNumArgs() == 1) {
isSingleStringLiteralArg =
diff --git a/clang/lib/Basic/Targets/AVR.h b/clang/lib/Basic/Targets/AVR.h
index 75c969f..11aa844 100644
--- a/clang/lib/Basic/Targets/AVR.h
+++ b/clang/lib/Basic/Targets/AVR.h
@@ -57,7 +57,8 @@ public:
Int16Type = SignedInt;
Char32Type = UnsignedLong;
SigAtomicType = SignedChar;
- resetDataLayout("e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8");
+ resetDataLayout(
+ "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-n16:8-a:8");
}
void getTargetDefines(const LangOptions &Opts,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 7767bf4..16fc650 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -129,6 +129,24 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_assume_aligned: {
+ const Expr *ptrExpr = e->getArg(0);
+ mlir::Value ptrValue = emitScalarExpr(ptrExpr);
+ mlir::Value offsetValue =
+ (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
+
+ std::optional<llvm::APSInt> alignment =
+ e->getArg(1)->getIntegerConstantExpr(getContext());
+ assert(alignment.has_value() &&
+ "the second argument to __builtin_assume_aligned must be an "
+ "integral constant expression");
+
+ mlir::Value result =
+ emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
+ alignment->getSExtValue(), offsetValue);
+ return RValue::get(result);
+ }
+
case Builtin::BI__builtin_complex: {
mlir::Value real = emitScalarExpr(e->getArg(0));
mlir::Value imag = emitScalarExpr(e->getArg(1));
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
index 67d8988..bc30c7b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
@@ -75,7 +75,7 @@ static MemberCallInfo commonBuildCXXMemberOrOperatorCall(
RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *ce, const CXXMethodDecl *md, ReturnValueSlot returnValue,
- bool hasQualifier, NestedNameSpecifier *qualifier, bool isArrow,
+ bool hasQualifier, NestedNameSpecifier qualifier, bool isArrow,
const Expr *base) {
assert(isa<CXXMemberCallExpr>(ce) || isa<CXXOperatorCallExpr>(ce));
@@ -169,7 +169,7 @@ CIRGenFunction::emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
assert(md->isInstance() &&
"Trying to emit a member call expr on a static method!");
return emitCXXMemberOrOperatorMemberCallExpr(
- e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
/*IsArrow=*/false, e->getArg(0));
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index fc208ff..adf0ca1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -203,9 +203,9 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl gd) {
/// when calling a method pointer.
CanQualType CIRGenTypes::deriveThisType(const CXXRecordDecl *rd,
const CXXMethodDecl *md) {
- QualType recTy;
+ CanQualType recTy;
if (rd) {
- recTy = getASTContext().getTagDeclType(rd)->getCanonicalTypeInternal();
+ recTy = getASTContext().getCanonicalTagType(rd);
} else {
// This can happen with the MS ABI. It shouldn't need anything more than
// setting recTy to VoidTy here, but we're flagging it for now because we
@@ -215,9 +215,9 @@ CanQualType CIRGenTypes::deriveThisType(const CXXRecordDecl *rd,
}
if (md)
- recTy = getASTContext().getAddrSpaceQualType(
- recTy, md->getMethodQualifiers().getAddressSpace());
- return getASTContext().getPointerType(CanQualType::CreateUnsafe(recTy));
+ recTy = CanQualType::CreateUnsafe(getASTContext().getAddrSpaceQualType(
+ recTy, md->getMethodQualifiers().getAddressSpace()));
+ return getASTContext().getPointerType(recTy);
}
/// Arrange the CIR function layout for a value of the given function type, on
@@ -267,7 +267,10 @@ void CIRGenFunction::emitDelegateCallArg(CallArgList &args,
// Deactivate the cleanup for the callee-destructed param that was pushed.
assert(!cir::MissingFeatures::thunks());
if (type->isRecordType() &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
cgm.errorNYI(param->getSourceRange(),
"emitDelegateCallArg: callee-destructed param");
@@ -667,8 +670,10 @@ void CIRGenFunction::emitCallArg(CallArgList &args, const clang::Expr *e,
// In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
- if (argType->isRecordType() &&
- argType->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ if (argType->isRecordType() && argType->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
assert(!cir::MissingFeatures::msabi());
cgm.errorNYI(e->getSourceRange(), "emitCallArg: msabi is NYI");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 72b9d17..46cf2a1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -86,7 +86,7 @@ static void emitMemberInitializer(CIRGenFunction &cgf,
QualType fieldType = field->getType();
mlir::Value thisPtr = cgf.loadCXXThis();
- QualType recordTy = cgf.getContext().getTypeDeclType(classDecl);
+ CanQualType recordTy = cgf.getContext().getCanonicalTagType(classDecl);
// If a base constructor is being emitted, create an LValue that has the
// non-virtual alignment.
@@ -121,7 +121,8 @@ static void emitMemberInitializer(CIRGenFunction &cgf,
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *baseInit) {
const Type *baseType = baseInit->getBaseClass();
const auto *baseClassDecl =
- cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
return baseClassDecl->isDynamicClass();
}
@@ -161,7 +162,8 @@ void CIRGenFunction::emitBaseInitializer(mlir::Location loc,
const Type *baseType = baseInit->getBaseClass();
const auto *baseClassDecl =
- cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
bool isBaseVirtual = baseInit->isBaseVirtual();
@@ -377,7 +379,7 @@ void CIRGenFunction::emitCXXAggrConstructorCall(
//
// Note that these are complete objects and so we don't need to
// use the non-virtual size or alignment.
- QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CanQualType type = getContext().getCanonicalTagType(ctor->getParent());
CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
getContext().getTypeSizeInChars(type));
@@ -484,7 +486,8 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
QualType type) {
const RecordType *rtype = type->castAs<RecordType>();
- const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXRecordDecl *record =
+ cast<CXXRecordDecl>(rtype->getOriginalDecl())->getDefinitionOrSelf();
const CXXDestructorDecl *dtor = record->getDestructor();
// TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
// dtors which shall be removed on later CIR passes. However, only remove this
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index be21ce9..b8663eb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -16,6 +16,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CIRGenCleanup.h"
#include "CIRGenFunction.h"
#include "clang/CIR/MissingFeatures.h"
@@ -33,6 +34,52 @@ using namespace clang::CIRGen;
void EHScopeStack::Cleanup::anchor() {}
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t size) {
+ size = llvm::alignTo(size, ScopeStackAlignment);
+ if (!startOfBuffer) {
+ unsigned capacity = llvm::PowerOf2Ceil(std::max(size, 1024ul));
+ startOfBuffer = std::make_unique<char[]>(capacity);
+ startOfData = endOfBuffer = startOfBuffer.get() + capacity;
+ } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
+ unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
+ unsigned usedCapacity =
+ currentCapacity - (startOfData - startOfBuffer.get());
+ unsigned requiredCapacity = usedCapacity + size;
+ // We know from the 'else if' condition that requiredCapacity is greater
+ // than currentCapacity.
+ unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
+
+ std::unique_ptr<char[]> newStartOfBuffer =
+ std::make_unique<char[]>(newCapacity);
+ char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
+ char *newStartOfData = newEndOfBuffer - usedCapacity;
+ memcpy(newStartOfData, startOfData, usedCapacity);
+ startOfBuffer.swap(newStartOfBuffer);
+ endOfBuffer = newEndOfBuffer;
+ startOfData = newStartOfData;
+ }
+
+ assert(startOfBuffer.get() + size <= startOfData);
+ startOfData -= size;
+ return startOfData;
+}
+
+void EHScopeStack::deallocate(size_t size) {
+ startOfData += llvm::alignTo(size, ScopeStackAlignment);
+}
+
+void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
+ char *buffer = allocate(size);
+
+ // When the full implementation is upstreamed, this will allocate
+ // extra memory for and construct a wrapper object that is used to
+ // manage the cleanup generation.
+ assert(!cir::MissingFeatures::ehCleanupScope());
+
+ return buffer;
+}
+
static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
mlir::Block *cleanup =
@@ -44,26 +91,34 @@ static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
void CIRGenFunction::popCleanupBlock() {
- assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
+ assert(!ehStack.empty() && "cleanup stack is empty!");
+
+ // The memory for the cleanup continues to be owned by the EHScopeStack
+ // allocator, so we just destroy the object rather than attempting to
+ // free it.
+ EHScopeStack::Cleanup &cleanup = *ehStack.begin();
+
+ // The eventual implementation here will use the EHCleanupScope helper class.
+ assert(!cir::MissingFeatures::ehCleanupScope());
+
mlir::OpBuilder::InsertionGuard guard(builder);
- std::unique_ptr<EHScopeStack::Cleanup> cleanup =
- ehStack.cleanupStack.pop_back_val();
assert(!cir::MissingFeatures::ehCleanupFlags());
mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
builder.setInsertionPointToEnd(cleanupEntry);
- cleanup->emit(*this);
+ cleanup.emit(*this);
+
+ ehStack.deallocate(cleanup.getSize());
}
/// Pops cleanup blocks until the given savepoint is reached.
-void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+void CIRGenFunction::popCleanupBlocks(
+ EHScopeStack::stable_iterator oldCleanupStackDepth) {
assert(!cir::MissingFeatures::ehstackBranches());
- assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
-
// Pop cleanup blocks until we reach the base stack depth for the
// current scope.
- while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ while (ehStack.stable_begin() != oldCleanupStackDepth) {
popCleanupBlock();
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
new file mode 100644
index 0000000..7361c8c
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -0,0 +1,43 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of CIR for cleanups, initially based
+// on LLVM IR cleanup handling, but ought to change as CIR evolves.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
+#define CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
+
+#include "EHScopeStack.h"
+
+namespace clang::CIRGen {
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *ptr = nullptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *ptr) : ptr(ptr) {}
+
+public:
+ iterator() = default;
+
+ EHScopeStack::Cleanup *get() const {
+ return reinterpret_cast<EHScopeStack::Cleanup *>(ptr);
+ }
+
+ EHScopeStack::Cleanup &operator*() const { return *get(); }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(startOfData);
+}
+
+} // namespace clang::CIRGen
+#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 78d375c..715d101 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -667,6 +667,12 @@ struct DestroyObject final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDestroy(addr, type, destroyer);
}
+
+ // This is a placeholder until EHCleanupScope is implemented.
+ size_t getSize() const override {
+ assert(!cir::MissingFeatures::ehCleanupScope());
+ return sizeof(DestroyObject);
+ }
};
} // namespace
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index a0ff08e..c437b14 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1013,7 +1013,9 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_DerivedToBase: {
const auto *derivedClassTy =
e->getSubExpr()->getType()->castAs<clang::RecordType>();
- auto *derivedClassDecl = cast<CXXRecordDecl>(derivedClassTy->getDecl());
+ auto *derivedClassDecl =
+ cast<CXXRecordDecl>(derivedClassTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
LValue lv = emitLValue(e->getSubExpr());
Address thisAddr = lv.getAddress();
@@ -1167,9 +1169,10 @@ static void pushTemporaryCleanup(CIRGenFunction &cgf,
->getBaseElementTypeUnsafe()
->getAs<clang::RecordType>()) {
// Get the destructor for the reference temporary.
- auto *classDecl = cast<CXXRecordDecl>(rt->getDecl());
+ auto *classDecl = cast<CXXRecordDecl>(rt->getOriginalDecl());
if (!classDecl->hasTrivialDestructor())
- referenceTemporaryDtor = classDecl->getDestructor();
+ referenceTemporaryDtor =
+ classDecl->getDefinitionOrSelf()->getDestructor();
}
if (!referenceTemporaryDtor)
@@ -1831,7 +1834,7 @@ RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *ce,
}
bool hasQualifier = me->hasQualifier();
- NestedNameSpecifier *qualifier = hasQualifier ? me->getQualifier() : nullptr;
+ NestedNameSpecifier qualifier = me->getQualifier();
bool isArrow = me->isArrow();
const Expr *base = me->getBase();
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 51aab95..1fda848 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -376,7 +376,10 @@ void AggExprEmitter::visitCXXParenListOrInitListExpr(
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned numInitElements = args.size();
- RecordDecl *record = e->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *record = e->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index c22cf60..cba06a1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -171,6 +171,10 @@ public:
mlir::Value VisitBinSubAssign(const CompoundAssignOperator *e) {
return emitCompoundAssign(e, &ComplexExprEmitter::emitBinSub);
}
+
+ mlir::Value VisitBinMulAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinMul);
+ }
};
} // namespace
@@ -776,7 +780,7 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
case LangOptions::CX_Basic:
return cir::ComplexRangeKind::Basic;
case LangOptions::CX_None:
- // The default value for ComplexRangeKind is Full is no option is selected
+ // The default value for ComplexRangeKind is Full if no option is selected
return cir::ComplexRangeKind::Full;
}
}
@@ -813,7 +817,7 @@ using CompoundFunc =
static CompoundFunc getComplexOp(BinaryOperatorKind op) {
switch (op) {
case BO_MulAssign:
- llvm_unreachable("getComplexOp: BO_MulAssign");
+ return &ComplexExprEmitter::emitBinMul;
case BO_DivAssign:
llvm_unreachable("getComplexOp: BO_DivAssign");
case BO_SubAssign:
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 5b3bf85..21e9a8a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -591,7 +591,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &d) {
// be a problem for the near future.
if (cd->isTrivial() && cd->isDefaultConstructor()) {
const auto *cxxrd =
- cast<CXXRecordDecl>(ty->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(ty->getAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (cxxrd->getNumBases() != 0) {
// There may not be anything additional to do here, but this will
// force us to pause and test this path when it is supported.
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index e93dc0b..03555a5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -28,8 +28,6 @@ CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
: CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
ehStack.setCGF(this);
- currentCleanupStackDepth = 0;
- assert(ehStack.getStackDepth() == 0);
}
CIRGenFunction::~CIRGenFunction() {}
@@ -358,7 +356,8 @@ static bool mayDropFunctionReturn(const ASTContext &astContext,
// destructor or a non-trivially copyable type.
if (const RecordType *recordType =
returnType.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl()))
+ if (const auto *classDecl =
+ dyn_cast<CXXRecordDecl>(recordType->getOriginalDecl()))
return classDecl->hasTrivialDestructor();
}
return returnType.isTriviallyCopyableType(astContext);
@@ -409,6 +408,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
curFuncDecl = d->getNonClosureContext();
+ prologueCleanupDepth = ehStack.stable_begin();
+
mlir::Block *entryBB = &fn.getBlocks().front();
builder.setInsertionPointToStart(entryBB);
@@ -475,11 +476,11 @@ void CIRGenFunction::finishFunction(SourceLocation endLoc) {
// important to do this before we enter the return block or return
// edges will be *really* confused.
// TODO(cir): Use prologueCleanupDepth here.
- bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
if (hasCleanups) {
assert(!cir::MissingFeatures::generateDebugInfo());
// FIXME(cir): should we clearInsertionPoint? breaks many testcases
- popCleanupBlocks(currentCleanupStackDepth);
+ popCleanupBlocks(prologueCleanupDepth);
}
}
@@ -827,7 +828,9 @@ void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
// Ignore empty classes in C++.
if (getLangOpts().CPlusPlus) {
if (const RecordType *rt = ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(rt->getDecl())->isEmpty())
+ if (cast<CXXRecordDecl>(rt->getOriginalDecl())
+ ->getDefinitionOrSelf()
+ ->isEmpty())
return;
}
}
@@ -930,6 +933,23 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
}
+mlir::Value CIRGenFunction::emitAlignmentAssumption(
+ mlir::Value ptrValue, QualType ty, SourceLocation loc,
+ SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
+ assert(!cir::MissingFeatures::sanitizers());
+ return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
+ alignment, offsetValue);
+}
+
+mlir::Value CIRGenFunction::emitAlignmentAssumption(
+ mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
+ int64_t alignment, mlir::Value offsetValue) {
+ QualType ty = expr->getType();
+ SourceLocation loc = expr->getExprLoc();
+ return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
+ offsetValue);
+}
+
// TODO(cir): Most of this function can be shared between CIRGen
// and traditional LLVM codegen
void CIRGenFunction::emitVariablyModifiedType(QualType type) {
@@ -978,10 +998,6 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- type = cast<clang::ElaboratedType>(ty)->getNamedType();
- break;
-
case Type::Adjusted:
type = cast<clang::AdjustedType>(ty)->getAdjustedType();
break;
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 2e60cfc..065039e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -601,9 +601,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// The cleanup depth enclosing all the cleanups associated with the
+ /// parameters.
+ EHScopeStack::stable_iterator prologueCleanupDepth;
+
/// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
- void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
void popCleanupBlock();
/// Push a cleanup to be run at the end of the current full-expression. Safe
@@ -622,7 +626,7 @@ public:
/// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- size_t cleanupStackDepth, oldCleanupStackDepth;
+ EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
protected:
bool performCleanup;
@@ -638,7 +642,7 @@ public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CIRGenFunction &cgf)
: performCleanup(true), cgf(cgf) {
- cleanupStackDepth = cgf.ehStack.getStackDepth();
+ cleanupStackDepth = cgf.ehStack.stable_begin();
oldCleanupStackDepth = cgf.currentCleanupStackDepth;
cgf.currentCleanupStackDepth = cleanupStackDepth;
}
@@ -663,7 +667,7 @@ public:
};
// Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
- size_t currentCleanupStackDepth;
+ EHScopeStack::stable_iterator currentCleanupStackDepth = ehStack.stable_end();
public:
/// Represents a scope, including function bodies, compound statements, and
@@ -825,6 +829,18 @@ public:
/// ----------------------
/// CIR emit functions
/// ----------------------
+public:
+ mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
+ SourceLocation loc,
+ SourceLocation assumptionLoc,
+ int64_t alignment,
+ mlir::Value offsetValue = nullptr);
+
+ mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
+ SourceLocation assumptionLoc,
+ int64_t alignment,
+ mlir::Value offsetValue = nullptr);
+
private:
void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
clang::CharUnits alignment);
@@ -1007,7 +1023,7 @@ public:
RValue emitCXXMemberOrOperatorMemberCallExpr(
const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
ReturnValueSlot returnValue, bool hasQualifier,
- clang::NestedNameSpecifier *qualifier, bool isArrow,
+ clang::NestedNameSpecifier qualifier, bool isArrow,
const clang::Expr *base);
mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 425250d..06b030f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -997,7 +997,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &astContext,
return true;
if (const auto *rt = varType->getAs<RecordType>()) {
- const RecordDecl *rd = rt->getDecl();
+ const RecordDecl *rd = rt->getOriginalDecl()->getDefinitionOrSelf();
for (const FieldDecl *fd : rd->fields()) {
if (fd->isBitField())
continue;
@@ -1365,6 +1365,21 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
assert(!cir::MissingFeatures::generateDebugInfo());
assert(!cir::MissingFeatures::cxxRecordStaticMembers());
break;
+
+ case Decl::FileScopeAsm:
+ // File-scope asm is ignored during device-side CUDA compilation.
+ if (langOpts.CUDA && langOpts.CUDAIsDevice)
+ break;
+ // File-scope asm is ignored during device-side OpenMP compilation.
+ if (langOpts.OpenMPIsTargetDevice)
+ break;
+ // File-scope asm is ignored during device-side SYCL compilation.
+ if (langOpts.SYCLIsDevice)
+ break;
+ auto *file_asm = cast<FileScopeAsmDecl>(decl);
+ std::string line = file_asm->getAsmString();
+ globalScopeAsm.push_back(builder.getStringAttr(line));
+ break;
}
}
@@ -1978,6 +1993,9 @@ void CIRGenModule::release() {
emitDeferred();
applyReplacements();
+ theModule->setAttr(cir::CIRDialect::getModuleLevelAsmAttrName(),
+ builder.getArrayAttr(globalScopeAsm));
+
// There's a lot of code that is not implemented yet.
assert(!cir::MissingFeatures::cgmRelease());
}
@@ -2048,8 +2066,10 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset(
// Get the layout.
const ASTRecordLayout &layout = astContext.getASTRecordLayout(rd);
- const auto *baseDecl = cast<CXXRecordDecl>(
- base->getType()->castAs<clang::RecordType>()->getDecl());
+ const auto *baseDecl =
+ cast<CXXRecordDecl>(
+ base->getType()->castAs<clang::RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Add the offset.
offset += layout.getBaseClassOffset(baseDecl);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 5d07d38..163a0fc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -90,6 +90,8 @@ private:
/// for FunctionDecls's.
CIRGenFunction *curCGF = nullptr;
+ llvm::SmallVector<mlir::Attribute> globalScopeAsm;
+
public:
mlir::ModuleOp getModule() const { return theModule; }
CIRGenBuilderTy &getBuilder() { return builder; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 50642e7..332babd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -412,7 +412,7 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
// This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
- if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ if (ehStack.stable_begin() != currentCleanupStackDepth)
cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index 3e07f6d..2084b6d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -103,7 +103,8 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl,
policy.SuppressTagKeyword = true;
if (recordDecl->getIdentifier())
- astContext.getRecordType(recordDecl).print(outStream, policy);
+ QualType(astContext.getCanonicalTagType(recordDecl))
+ .print(outStream, policy);
else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
typedefNameDecl->printQualifiedName(outStream, policy);
else
@@ -138,7 +139,7 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
if (!alreadyChecked.insert(rd).second)
return true;
- const Type *key = cgt.getASTContext().getTagDeclType(rd).getTypePtr();
+ const Type *key = cgt.getASTContext().getCanonicalTagType(rd).getTypePtr();
// If this type is already laid out, converting it is a noop.
if (cgt.isRecordLayoutComplete(key))
@@ -182,7 +183,8 @@ isSafeToConvert(QualType qt, CIRGenTypes &cgt,
// If this is a record, check it.
if (const auto *rt = qt->getAs<RecordType>())
- return isSafeToConvert(rt->getDecl(), cgt, alreadyChecked);
+ return isSafeToConvert(rt->getOriginalDecl()->getDefinitionOrSelf(), cgt,
+ alreadyChecked);
// If this is an array, check the elements, which are embedded inline.
if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
@@ -210,7 +212,7 @@ static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
// TagDecl's are not necessarily unique, instead use the (clang) type
// connected to the decl.
- const Type *key = astContext.getTagDeclType(rd).getTypePtr();
+ const Type *key = astContext.getCanonicalTagType(rd).getTypePtr();
cir::RecordType entry = recordDeclTypes[key];
// If we don't have an entry for this record yet, create one.
@@ -242,7 +244,10 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
for (const auto &base : cxxRecordDecl->bases()) {
if (base.isVirtual())
continue;
- convertRecordDeclType(base.getType()->castAs<RecordType>()->getDecl());
+ convertRecordDeclType(base.getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf());
}
}
@@ -275,7 +280,8 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// Process record types before the type cache lookup.
if (const auto *recordType = dyn_cast<RecordType>(type))
- return convertRecordDeclType(recordType->getDecl());
+ return convertRecordDeclType(
+ recordType->getOriginalDecl()->getDefinitionOrSelf());
// Has the type already been processed?
TypeCacheTy::iterator tci = typeCache.find(ty);
@@ -457,7 +463,8 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
}
case Type::Enum: {
- const EnumDecl *ed = cast<EnumType>(ty)->getDecl();
+ const EnumDecl *ed =
+ cast<EnumType>(ty)->getOriginalDecl()->getDefinitionOrSelf();
if (auto integerType = ed->getIntegerType(); !integerType.isNull())
return convertType(integerType);
// Return a placeholder 'i32' type. This can be changed later when the
@@ -516,7 +523,7 @@ mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType,
/// Return record layout info for the given record decl.
const CIRGenRecordLayout &
CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *rd) {
- const auto *key = astContext.getTagDeclType(rd).getTypePtr();
+ const auto *key = astContext.getCanonicalTagType(rd).getTypePtr();
// If we have already computed the layout, return it.
auto it = cirGenRecordLayouts.find(key);
@@ -548,7 +555,7 @@ bool CIRGenTypes::isZeroInitializable(clang::QualType t) {
}
if (const RecordType *rt = t->getAs<RecordType>()) {
- const RecordDecl *rd = rt->getDecl();
+ const RecordDecl *rd = rt->getOriginalDecl()->getDefinitionOrSelf();
return isZeroInitializable(rd);
}
@@ -623,8 +630,10 @@ void CIRGenTypes::updateCompletedType(const TagDecl *td) {
// declaration of enums, and C doesn't allow an incomplete forward
// declaration with a non-default type.
assert(
- !typeCache.count(ed->getTypeForDecl()) ||
- (convertType(ed->getIntegerType()) == typeCache[ed->getTypeForDecl()]));
+ !typeCache.count(
+ ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()) ||
+ (convertType(ed->getIntegerType()) ==
+ typeCache[ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()]));
// If necessary, provide the full definition of a type only used with a
// declaration so far.
assert(!cir::MissingFeatures::generateDebugInfo());
@@ -639,7 +648,7 @@ void CIRGenTypes::updateCompletedType(const TagDecl *td) {
// Only complete if we converted it already. If we haven't converted it yet,
// we'll just do it lazily.
- if (recordDeclTypes.count(astContext.getTagDeclType(rd).getTypePtr()))
+ if (recordDeclTypes.count(astContext.getCanonicalTagType(rd).getTypePtr()))
convertRecordDeclType(rd);
// If necessary, provide the full definition of a type only used with a
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 22750ac..47478f6 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -42,7 +42,47 @@ enum CleanupKind : unsigned {
/// A stack of scopes which respond to exceptions, including cleanups
/// and catch blocks.
class EHScopeStack {
+ friend class CIRGenFunction;
+
public:
+ // TODO(ogcg): Switch to alignof(uint64_t) instead of 8
+ enum { ScopeStackAlignment = 8 };
+
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from startOfData to endOfBuffer.
+ ptrdiff_t size = -1;
+
+ explicit stable_iterator(ptrdiff_t size) : size(size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() = default;
+
+ bool isValid() const { return size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator other) const { return size <= other.size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return size < I.size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.size == B.size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.size != B.size;
+ }
+ };
+
/// Information for lazily generating a cleanup. Subclasses must be
/// POD-like: cleanups will not be destructed, and they will be
/// allocated on the cleanup stack and freely copied and moved
@@ -68,30 +108,75 @@ public:
///
// \param flags cleanup kind.
virtual void emit(CIRGenFunction &cgf) = 0;
- };
- // Classic codegen has a finely tuned custom allocator and a complex stack
- // management scheme. We'll probably eventually want to find a way to share
- // that implementation. For now, we will use a very simplified implementation
- // to get cleanups working.
- llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+ // This is a placeholder until EHScope is implemented.
+ virtual size_t getSize() const = 0;
+ };
private:
+ // The implementation for this class is in CIRGenCleanup.h and
+ // CIRGenCleanup.cpp; the definition is here because it's used as a
+ // member of CIRGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ std::unique_ptr<char[]> startOfBuffer;
+
+ /// The end of the buffer.
+ char *endOfBuffer = nullptr;
+
+ /// The first valid entry in the buffer.
+ char *startOfData = nullptr;
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ // This class uses a custom allocator for maximum efficiency because cleanups
+ // are allocated and freed very frequently. It's basically a bump pointer
+ // allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
+ // into the buffer as stable iterators.
+ char *allocate(size_t size);
+ void deallocate(size_t size);
+
+ void *pushCleanup(CleanupKind kind, size_t dataSize);
+
public:
EHScopeStack() = default;
~EHScopeStack() = default;
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
- cleanupStack.push_back(std::make_unique<T>(a...));
+ static_assert(alignof(T) <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *buffer = pushCleanup(kind, sizeof(T));
+ [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
}
void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
- size_t getStackDepth() const { return cleanupStack.size(); }
+ /// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
+ void popCleanup();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return startOfData == endOfBuffer; }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(endOfBuffer - startOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp
index d2d32bb..1447dba 100644
--- a/clang/lib/CIR/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp
@@ -10,7 +10,7 @@ bool clang::CIRGen::isEmptyRecordForLayout(const ASTContext &context,
if (!rt)
return false;
- const RecordDecl *rd = rt->getDecl();
+ const RecordDecl *rd = rt->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *cxxrd = dyn_cast<CXXRecordDecl>(rd)) {
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index d3fcac1..53ab04e 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1444,6 +1444,27 @@ cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
}
//===----------------------------------------------------------------------===//
+// VTableAddrPointOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+cir::VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ StringRef name = getName();
+
+ // Verify that the result type underlying pointer type matches the type of
+ // the referenced cir.global or cir.func op.
+ auto op = symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getNameAttr());
+ if (!op)
+ return emitOpError("'")
+ << name << "' does not reference a valid cir.global";
+ std::optional<mlir::Attribute> init = op.getInitialValue();
+ if (!init)
+ return success();
+ assert(!cir::MissingFeatures::vtableInitializer());
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// FuncOp
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 7e1c9fb..0a4aabd 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -460,6 +460,29 @@ mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMAssumeAlignedOpLowering::matchAndRewrite(
+ cir::AssumeAlignedOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ SmallVector<mlir::Value, 3> opBundleArgs{adaptor.getPointer()};
+
+ auto alignment = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ adaptor.getAlignmentAttr());
+ opBundleArgs.push_back(alignment);
+
+ if (mlir::Value offset = adaptor.getOffset())
+ opBundleArgs.push_back(offset);
+
+ auto cond = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI1Type(), 1);
+ mlir::LLVM::AssumeOp::create(rewriter, op.getLoc(), cond, "align",
+ opBundleArgs);
+
+ // The llvm.assume operation does not have a result, so we need to replace
+ // all uses of this cir.assume_aligned operation with the input ptr itself.
+ rewriter.replaceOp(op, adaptor.getPointer());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite(
cir::AssumeSepStorageOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -2143,6 +2166,11 @@ void ConvertCIRToLLVMPass::processCIRAttrs(mlir::ModuleOp module) {
module->getAttr(cir::CIRDialect::getTripleAttrName()))
module->setAttr(mlir::LLVM::LLVMDialect::getTargetTripleAttrName(),
tripleAttr);
+
+ if (mlir::Attribute asmAttr =
+ module->getAttr(cir::CIRDialect::getModuleLevelAsmAttrName()))
+ module->setAttr(mlir::LLVM::LLVMDialect::getModuleLevelAsmAttrName(),
+ asmAttr);
}
void ConvertCIRToLLVMPass::runOnOperation() {
@@ -2168,6 +2196,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
patterns.add<
// clang-format off
CIRToLLVMAssumeOpLowering,
+ CIRToLLVMAssumeAlignedOpLowering,
CIRToLLVMAssumeSepStorageOpLowering,
CIRToLLVMBaseClassAddrOpLowering,
CIRToLLVMBinOpLowering,
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index c5106cb..51b191a 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -44,6 +44,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMAssumeAlignedOpLowering
+ : public mlir::OpConversionPattern<cir::AssumeAlignedOp> {
+public:
+ using mlir::OpConversionPattern<cir::AssumeAlignedOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::AssumeAlignedOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMAssumeSepStorageOpLowering
: public mlir::OpConversionPattern<cir::AssumeSepStorageOp> {
public:
diff --git a/clang/lib/CodeGen/ABIInfo.cpp b/clang/lib/CodeGen/ABIInfo.cpp
index 3ef430e1..1604ad5 100644
--- a/clang/lib/CodeGen/ABIInfo.cpp
+++ b/clang/lib/CodeGen/ABIInfo.cpp
@@ -68,7 +68,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
return false;
Members *= NElements;
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp
index 0a612d3..79dbe70 100644
--- a/clang/lib/CodeGen/ABIInfoImpl.cpp
+++ b/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -29,7 +29,7 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -53,7 +53,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() >
@@ -105,13 +105,12 @@ llvm::Type *CodeGen::getVAListElementType(CodeGenFunction &CGF) {
CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(const RecordType *RT,
CGCXXABI &CXXABI) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD) {
- if (!RT->getDecl()->canPassInRegisters())
- return CGCXXABI::RAA_Indirect;
- return CGCXXABI::RAA_Default;
- }
- return CXXABI.getRecordArgABI(RD);
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ return CXXABI.getRecordArgABI(CXXRD);
+ if (!RD->canPassInRegisters())
+ return CGCXXABI::RAA_Indirect;
+ return CGCXXABI::RAA_Default;
}
CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(QualType T, CGCXXABI &CXXABI) {
@@ -125,20 +124,21 @@ bool CodeGen::classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
const ABIInfo &Info) {
QualType Ty = FI.getReturnType();
- if (const auto *RT = Ty->getAs<RecordType>())
- if (!isa<CXXRecordDecl>(RT->getDecl()) &&
- !RT->getDecl()->canPassInRegisters()) {
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!isa<CXXRecordDecl>(RD) && !RD->canPassInRegisters()) {
FI.getReturnInfo() = Info.getNaturalAlignIndirect(
Ty, Info.getDataLayout().getAllocaAddrSpace());
return true;
}
+ }
return CXXABI.classifyReturnType(FI);
}
QualType CodeGen::useFirstFieldIfTransparentUnion(QualType Ty) {
if (const RecordType *UT = Ty->getAsUnionType()) {
- const RecordDecl *UD = UT->getDecl();
+ const RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
if (UD->hasAttr<TransparentUnionAttr>()) {
assert(!UD->field_empty() && "sema created an empty transparent union");
return UD->field_begin()->getType();
@@ -276,7 +276,7 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
// according to the Itanium ABI. The exception applies only to records,
// not arrays of records, so we must also check whether we stripped off an
// array type above.
- if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ if (isa<CXXRecordDecl>(RT->getOriginalDecl()) &&
(WasArray || (!AsIfNoUniqueAddr && !FD->hasAttr<NoUniqueAddressAttr>())))
return false;
@@ -288,7 +288,7 @@ bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
@@ -320,7 +320,7 @@ bool CodeGen::isEmptyRecordForLayout(const ASTContext &Context, QualType T) {
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
@@ -344,7 +344,7 @@ const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
if (!RT)
return nullptr;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return nullptr;
@@ -463,7 +463,7 @@ bool CodeGen::isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 0e80522..cfeba6f 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -425,7 +425,8 @@ static bool isSafeForCXXConstantCapture(QualType type) {
// Only records can be unsafe.
if (!recordType) return true;
- const auto *record = cast<CXXRecordDecl>(recordType->getDecl());
+ const auto *record =
+ cast<CXXRecordDecl>(recordType->getOriginalDecl())->getDefinitionOrSelf();
// Maintain semantics for classes with non-trivial dtors or copy ctors.
if (!record->hasTrivialDestructor()) return false;
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a648bde..071667a 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -5985,8 +5985,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Create a temporary array to hold the sizes of local pointer arguments
// for the block. \p First is the position of the first size argument.
- auto CreateArrayForSizeVar = [=](unsigned First)
- -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
+ auto CreateArrayForSizeVar =
+ [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
llvm::APInt ArraySize(32, NumArgs - First);
QualType SizeArrayTy = getContext().getConstantArrayType(
getContext().getSizeType(), ArraySize, nullptr,
@@ -5999,9 +5999,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// actually the Alloca ascasted to the default AS, hence the
// stripPointerCasts()
llvm::Value *Alloca = TmpPtr->stripPointerCasts();
- llvm::Value *TmpSize = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), Alloca);
llvm::Value *ElemPtr;
+ EmitLifetimeStart(Alloca);
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
@@ -6018,7 +6017,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Return the Alloca itself rather than a potential ascast as this is only
// used by the paired EmitLifetimeEnd.
- return {ElemPtr, TmpSize, Alloca};
+ return {ElemPtr, Alloca};
};
// Could have events and/or varargs.
@@ -6030,7 +6029,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(4);
+ auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
@@ -6045,8 +6044,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
+ EmitLifetimeEnd(TmpPtr);
return Call;
}
// Any calls now have event arguments passed.
@@ -6111,15 +6109,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
- auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(7);
+ auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
Args.push_back(ElemPtr);
ArgTys.push_back(ElemPtr->getType());
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
+ EmitLifetimeEnd(TmpPtr);
return Call;
}
llvm_unreachable("Unexpected enqueue_kernel signature");
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index dd26be7..c7f4bf8 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -1131,7 +1131,8 @@ void CGNVCUDARuntime::handleVarRegistration(const VarDecl *D,
// Builtin surfaces and textures and their template arguments are
// also registered with CUDA runtime.
const auto *TD = cast<ClassTemplateSpecializationDecl>(
- D->getType()->castAs<RecordType>()->getDecl());
+ D->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
const TemplateArgumentList &Args = TD->getTemplateArgs();
if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
assert(Args.size() == 2 &&
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
index 78a7b02..f9aff89 100644
--- a/clang/lib/CodeGen/CGCXX.cpp
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -83,8 +83,9 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (I.isVirtual()) continue;
// Skip base classes with trivial destructors.
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *Base = cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Base->hasTrivialDestructor()) continue;
// If we've already found a base class with a non-trivial
@@ -277,18 +278,18 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
-CGCallee
-CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual,
- llvm::Type *Ty) {
- assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+CGCallee CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier Qual,
+ llvm::Type *Ty) {
+ assert(Qual.getKind() == NestedNameSpecifier::Kind::Type &&
"BuildAppleKextVirtualCall - bad Qual kind");
- const Type *QTy = Qual->getAsType();
+ const Type *QTy = Qual.getAsType();
QualType T = QualType(QTy, 0);
const RecordType *RT = T->getAs<RecordType>();
assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
- const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const auto *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD))
return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index d42e0bb8..cca6758 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -52,7 +52,7 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
const auto *RD = MPT->getMostRecentCXXRecordDecl();
ThisPtrForCall =
- CGF.getAsNaturalPointerTo(This, CGF.getContext().getRecordType(RD));
+ CGF.getAsNaturalPointerTo(This, CGF.getContext().getCanonicalTagType(RD));
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
llvm::Constant *FnPtr = llvm::Constant::getNullValue(
@@ -106,7 +106,7 @@ CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
llvm::Constant *CGCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
return GetBogusMemberPointer(CGM.getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent()));
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent()));
}
llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d9bd443..b959982 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -125,16 +125,16 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
/// calling a method pointer.
CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
const CXXMethodDecl *MD) {
- QualType RecTy;
+ CanQualType RecTy;
if (RD)
- RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ RecTy = Context.getCanonicalTagType(RD);
else
RecTy = Context.VoidTy;
if (MD)
- RecTy = Context.getAddrSpaceQualType(
- RecTy, MD->getMethodQualifiers().getAddressSpace());
- return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+ RecTy = CanQualType::CreateUnsafe(Context.getAddrSpaceQualType(
+ RecTy, MD->getMethodQualifiers().getAddressSpace()));
+ return Context.getPointerType(RecTy);
}
/// Returns the canonical formal type of the given C++ method.
@@ -1008,7 +1008,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
SmallVector<const FieldDecl *, 1> Fields;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
if (RD->isUnion()) {
@@ -1895,7 +1895,7 @@ bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
// complex destructor or a non-trivially copyable type.
if (const RecordType *RT =
ReturnType.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
return ClassDecl->hasTrivialDestructor();
}
return ReturnType.isTriviallyCopyableType(Context);
@@ -2870,7 +2870,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// (e.g., Obj-C ARC-managed structs, MSVC callee-destroyed objects).
if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
ParamType->castAs<RecordType>()
- ->getDecl()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
->isParamDestroyedInCallee())
Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
}
@@ -3828,7 +3829,7 @@ static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
SmallVectorImpl<uint64_t> &Bits) {
ASTContext &Context = CGM.getContext();
int CharWidth = Context.getCharWidth();
- const RecordDecl *RD = RTy->getDecl()->getDefinition();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinition();
const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
@@ -4289,7 +4290,10 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
// Deactivate the cleanup for the callee-destructed param that was pushed.
if (type->isRecordType() && !CurFuncIsThunk &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
EHScopeStack::stable_iterator cleanup =
CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
@@ -4319,10 +4323,7 @@ static void emitWriteback(CodeGenFunction &CGF,
if (writeback.WritebackExpr) {
CGF.EmitIgnoredExpr(writeback.WritebackExpr);
-
- if (writeback.LifetimeSz)
- CGF.EmitLifetimeEnd(writeback.LifetimeSz,
- writeback.Temporary.getBasePointer());
+ CGF.EmitLifetimeEnd(writeback.Temporary.getBasePointer());
return;
}
@@ -4885,8 +4886,10 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
- if (type->isRecordType() &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ if (type->isRecordType() && type->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
AggValueSlot Slot = args.isUsingInAlloca()
@@ -5282,7 +5285,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
Address SRetPtr = Address::invalid();
- llvm::Value *UnusedReturnSizePtr = nullptr;
+ bool NeedSRetLifetimeEnd = false;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
// For virtual function pointer thunks and musttail calls, we must always
// forward an incoming SRet pointer to the callee, because a local alloca
@@ -5296,11 +5299,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
SRetPtr = ReturnValue.getAddress();
} else {
SRetPtr = CreateMemTempWithoutCast(RetTy, "tmp");
- if (HaveInsertPoint() && ReturnValue.isUnused()) {
- llvm::TypeSize size =
- CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
- UnusedReturnSizePtr = EmitLifetimeStart(size, SRetPtr.getBasePointer());
- }
+ if (HaveInsertPoint() && ReturnValue.isUnused())
+ NeedSRetLifetimeEnd = EmitLifetimeStart(SRetPtr.getBasePointer());
}
if (IRFunctionArgs.hasSRetArg()) {
// A mismatch between the allocated return value's AS and the target's
@@ -5484,15 +5484,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
- // Emit lifetime markers for the temporary alloca.
- llvm::TypeSize ByvalTempElementSize =
- CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
- llvm::Value *LifetimeSize =
- EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
-
- // Add cleanup code to emit the end lifetime marker after the call.
- if (LifetimeSize) // In case we disabled lifetime markers.
- CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
+ // Emit lifetime markers for the temporary alloca and add cleanup code to
+ // emit the end lifetime marker after the call.
+ if (EmitLifetimeStart(AI.getPointer()))
+ CallLifetimeEndAfterCall.emplace_back(AI);
// Generate the copy.
I->copyInto(*this, AI);
@@ -5653,9 +5648,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
auto unpaddedCoercionType = ArgInfo.getUnpaddedCoerceAndExpandType();
auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
- llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
RawAddress AllocaAddr = RawAddress::invalid();
+ bool NeedLifetimeEnd = false;
if (I->isAggregate()) {
addr = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
@@ -5665,7 +5660,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(RV.isScalar()); // complex should always just be direct
llvm::Type *scalarType = RV.getScalarVal()->getType();
- auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
// Materialize to a temporary.
@@ -5674,7 +5668,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
layout->getAlignment(), scalarAlign)),
"tmp",
/*ArraySize=*/nullptr, &AllocaAddr);
- tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
+ NeedLifetimeEnd = EmitLifetimeStart(AllocaAddr.getPointer());
Builder.CreateStore(RV.getScalarVal(), addr);
}
@@ -5699,10 +5693,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
assert(IRArgPos == FirstIRArg + NumIRArgs);
- if (tempSize) {
- EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
- }
-
+ if (NeedLifetimeEnd)
+ EmitLifetimeEnd(AllocaAddr.getPointer());
break;
}
@@ -5871,9 +5863,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// can't depend on being inside of an ExprWithCleanups, so we need to manually
// pop this cleanup later on. Being eager about this is OK, since this
// temporary is 'invisible' outside of the callee.
- if (UnusedReturnSizePtr)
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr,
- UnusedReturnSizePtr);
+ if (NeedSRetLifetimeEnd)
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr);
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
@@ -6007,7 +5998,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// insertion point; this allows the rest of IRGen to discard
// unreachable code.
if (CI->doesNotReturn()) {
- if (UnusedReturnSizePtr)
+ if (NeedSRetLifetimeEnd)
PopCleanupBlock();
// Strip away the noreturn attribute to better diagnose unreachable UB.
@@ -6122,7 +6113,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- if (UnusedReturnSizePtr)
+ if (NeedSRetLifetimeEnd)
PopCleanupBlock();
return ret;
}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 0b4e3f9..3157b7f 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -289,9 +289,6 @@ public:
/// An Expression (optional) that performs the writeback with any required
/// casting.
const Expr *WritebackExpr;
-
- // Size for optional lifetime end on the temporary.
- llvm::Value *LifetimeSz;
};
struct CallArgCleanup {
@@ -321,9 +318,8 @@ public:
}
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse,
- const Expr *writebackExpr = nullptr,
- llvm::Value *lifetimeSz = nullptr) {
- Writeback writeback = {srcLV, temporary, toUse, writebackExpr, lifetimeSz};
+ const Expr *writebackExpr = nullptr) {
+ Writeback writeback = {srcLV, temporary, toUse, writebackExpr};
Writebacks.push_back(writeback);
}
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 4a465e6..e9a92ae 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -181,7 +181,9 @@ CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Add the offset.
Offset += Layout.getBaseClassOffset(BaseDecl);
@@ -301,7 +303,8 @@ Address CodeGenFunction::GetAddressOfBaseClass(
// and hence will not require any further steps.
if ((*Start)->isVirtual()) {
VBase = cast<CXXRecordDecl>(
- (*Start)->getType()->castAs<RecordType>()->getDecl());
+ (*Start)->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
++Start;
}
@@ -326,7 +329,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::Type *PtrTy = llvm::PointerType::get(
CGM.getLLVMContext(), Value.getType()->getPointerAddressSpace());
- QualType DerivedTy = getContext().getRecordType(Derived);
+ CanQualType DerivedTy = getContext().getCanonicalTagType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
// If the static offset is zero and we don't have a virtual step,
@@ -401,8 +404,7 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
bool NullCheckValue) {
assert(PathBegin != PathEnd && "Base path should not be empty!");
- QualType DerivedTy =
- getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ CanQualType DerivedTy = getContext().getCanonicalTagType(Derived);
llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
llvm::Value *NonVirtualOffset =
@@ -559,7 +561,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
const Type *BaseType = BaseInit->getBaseClass();
const auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
bool isBaseVirtual = BaseInit->isBaseVirtual();
@@ -638,7 +641,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
QualType FieldType = Field->getType();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
LValue LHS;
// If a base constructor is being emitted, create an LValue that has the
@@ -974,7 +977,7 @@ namespace {
}
CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
Address ThisPtr = CGF.LoadCXXThisAddress();
LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
@@ -1122,7 +1125,7 @@ namespace {
void pushEHDestructors() {
Address ThisPtr = CGF.LoadCXXThisAddress();
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
@@ -1265,7 +1268,8 @@ namespace {
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
const Type *BaseType = BaseInit->getBaseClass();
const auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
return BaseClassDecl->isDynamicClass();
}
@@ -1374,7 +1378,9 @@ HasTrivialDestructorBody(ASTContext &Context,
continue;
const CXXRecordDecl *NonVirtualBase =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!HasTrivialDestructorBody(Context, NonVirtualBase,
MostDerivedClassDecl))
return false;
@@ -1383,8 +1389,10 @@ HasTrivialDestructorBody(ASTContext &Context,
if (BaseClassDecl == MostDerivedClassDecl) {
// Check virtual bases.
for (const auto &I : BaseClassDecl->vbases()) {
- const CXXRecordDecl *VirtualBase =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *VirtualBase =
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!HasTrivialDestructorBody(Context, VirtualBase,
MostDerivedClassDecl))
return false;
@@ -1404,7 +1412,8 @@ FieldHasTrivialDestructorBody(ASTContext &Context,
if (!RT)
return true;
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ auto *FieldClassDecl =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
// The destructor for an implicit anonymous union member is never invoked.
if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
@@ -1588,7 +1597,7 @@ namespace {
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
LoadThisForDtorDelete(CGF, Dtor),
- CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.getContext().getCanonicalTagType(ClassDecl));
}
};
@@ -1606,7 +1615,7 @@ namespace {
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
LoadThisForDtorDelete(CGF, Dtor),
- CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.getContext().getCanonicalTagType(ClassDecl));
assert(Dtor->getOperatorDelete()->isDestroyingOperatorDelete() ==
ReturnAfterDelete &&
"unexpected value for ReturnAfterDelete");
@@ -1647,7 +1656,8 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Find the address of the field.
Address thisValue = CGF.LoadCXXThisAddress();
- QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
+ CanQualType RecordTy =
+ CGF.getContext().getCanonicalTagType(field->getParent());
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
LValue LV = CGF.EmitLValueForField(ThisLV, field);
assert(LV.isSimple());
@@ -1870,7 +1880,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
const CXXRecordDecl *ClassDecl = DD->getParent();
EmitDeleteCall(DD->getOperatorDelete(),
LoadThisForDtorDelete(*this, DD),
- getContext().getTagDeclType(ClassDecl));
+ getContext().getCanonicalTagType(ClassDecl));
EmitBranchThroughCleanup(ReturnBlock);
} else {
EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
@@ -1898,7 +1908,9 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// the reverse order.
for (const auto &Base : ClassDecl->vbases()) {
auto *BaseClassDecl =
- cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ Base.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (BaseClassDecl->hasTrivialDestructor()) {
// Under SanitizeMemoryUseAfterDtor, poison the trivial base class
@@ -1964,7 +1976,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// Anonymous union members do not have their destructors called.
const RecordType *RT = type->getAsUnionType();
- if (RT && RT->getDecl()->isAnonymousStructOrUnion())
+ if (RT && RT->getOriginalDecl()->isAnonymousStructOrUnion())
continue;
CleanupKind cleanupKind = getCleanupKind(dtorKind);
@@ -2057,7 +2069,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
//
// Note that these are complete objects and so we don't need to
// use the non-virtual size or alignment.
- QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CanQualType type = getContext().getCanonicalTagType(ctor->getParent());
CharUnits eltAlignment =
arrayBase.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
@@ -2119,7 +2131,8 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
Address addr,
QualType type) {
const RecordType *rtype = type->castAs<RecordType>();
- const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const auto *record =
+ cast<CXXRecordDecl>(rtype->getOriginalDecl())->getDefinitionOrSelf();
const CXXDestructorDecl *dtor = record->getDestructor();
assert(!dtor->isTrivial());
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
@@ -2158,7 +2171,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const Expr *Arg = E->getArg(0);
LValue Src = EmitLValue(Arg);
- QualType DestTy = getContext().getTypeDeclType(D->getParent());
+ CanQualType DestTy = getContext().getCanonicalTagType(D->getParent());
LValue Dest = MakeAddrLValue(This, DestTy);
EmitAggregateCopyCtor(Dest, Src, ThisAVS.mayOverlap());
return;
@@ -2210,7 +2223,8 @@ void CodeGenFunction::EmitCXXConstructorCall(
if (!NewPointerIsChecked)
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This,
- getContext().getRecordType(ClassDecl), CharUnits::Zero());
+ getContext().getCanonicalTagType(ClassDecl),
+ CharUnits::Zero());
if (D->isTrivial() && D->isDefaultConstructor()) {
assert(Args.size() == 1 && "trivial default ctor with args");
@@ -2226,7 +2240,7 @@ void CodeGenFunction::EmitCXXConstructorCall(
Address Src = makeNaturalAddressForPointer(
Args[1].getRValue(*this).getScalarVal(), SrcTy);
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
- QualType DestTy = getContext().getTypeDeclType(ClassDecl);
+ CanQualType DestTy = getContext().getCanonicalTagType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
EmitAggregateCopyCtor(DestLVal, SrcLVal, Overlap);
return;
@@ -2638,8 +2652,9 @@ void CodeGenFunction::getVTablePointers(BaseSubobject Base,
// Traverse bases.
for (const auto &I : RD->bases()) {
- auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Ignore classes without a vtable.
if (!BaseDecl->isDynamicClass())
@@ -2772,7 +2787,7 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
// Don't insert type test assumes if we are forcing public
// visibility.
!CGM.AlwaysHasLTOVisibilityPublic(RD)) {
- QualType Ty = QualType(RD->getTypeForDecl(), 0);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(Ty);
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
@@ -2839,7 +2854,8 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
if (!ClassTy)
return;
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
+ const auto *ClassDecl =
+ cast<CXXRecordDecl>(ClassTy->getOriginalDecl())->getDefinitionOrSelf();
if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
return;
@@ -2896,8 +2912,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
EmitSanitizerStatReport(SSK);
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
llvm::Value *TypeTest = Builder.CreateCall(
@@ -2906,7 +2922,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(Int8Ty, TCK),
EmitCheckSourceLocation(Loc),
- EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
+ EmitCheckTypeDescriptor(T),
};
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
@@ -2956,8 +2972,8 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
EmitSanitizerStatReport(llvm::SanStat_CFI_VCall);
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
auto CheckedLoadIntrinsic = CGM.getVTables().useRelativeLayout()
@@ -3039,7 +3055,8 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ CanQualType ThisType =
+ getContext().getPointerType(getContext().getCanonicalTagType(Lambda));
Address ThisPtr = GetAddrOfBlockDecl(variable);
CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType);
@@ -3066,8 +3083,8 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType LambdaType = getContext().getRecordType(Lambda);
- QualType ThisType = getContext().getPointerType(LambdaType);
+ CanQualType LambdaType = getContext().getCanonicalTagType(Lambda);
+ CanQualType ThisType = getContext().getPointerType(LambdaType);
Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType);
@@ -3118,8 +3135,8 @@ void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) {
// Forward %this argument.
CallArgList CallArgs;
- QualType LambdaType = getContext().getRecordType(MD->getParent());
- QualType ThisType = getContext().getPointerType(LambdaType);
+ CanQualType LambdaType = getContext().getCanonicalTagType(MD->getParent());
+ CanQualType ThisType = getContext().getPointerType(LambdaType);
llvm::Value *ThisArg = CurFn->getArg(0);
CallArgs.add(RValue::get(ThisArg), ThisType);
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 2b469f2..994bdbd 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -366,7 +366,7 @@ llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
if (const auto *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
- return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ return getOrCreateType(CGM.getContext().getCanonicalTagType(RDecl),
TheCU->getFile());
return Default;
}
@@ -1285,7 +1285,7 @@ static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM,
static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
SmallString<256> Identifier;
- const TagDecl *TD = Ty->getDecl();
+ const TagDecl *TD = Ty->getOriginalDecl()->getDefinitionOrSelf();
if (!needsTypeIdentifier(TD, CGM, TheCU))
return Identifier;
@@ -1321,8 +1321,8 @@ static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) {
llvm::DICompositeType *
CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
llvm::DIScope *Ctx) {
- const RecordDecl *RD = Ty->getDecl();
- if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
+ const RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
+ if (llvm::DIType *T = getTypeOrNull(QualType(Ty, 0)))
return cast<llvm::DICompositeType>(T);
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
const unsigned Line =
@@ -2015,6 +2015,8 @@ void CGDebugInfo::CollectRecordNestedType(
const TypeDecl *TD, SmallVectorImpl<llvm::Metadata *> &elements) {
QualType Ty = CGM.getContext().getTypeDeclType(TD);
// Injected class names are not considered nested records.
+ // FIXME: Is this supposed to be testing for injected class name declarations
+ // instead?
if (isa<InjectedClassNameType>(Ty))
return;
SourceLocation Loc = TD->getLocation();
@@ -2356,7 +2358,9 @@ void CGDebugInfo::CollectCXXBasesAux(
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (const auto &BI : Bases) {
const auto *Base =
- cast<CXXRecordDecl>(BI.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ BI.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinition();
if (!SeenTypes.insert(Base).second)
continue;
auto *BaseTy = getOrCreateType(BI.getType(), Unit);
@@ -2825,12 +2829,12 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
- QualType Ty = CGM.getContext().getEnumType(ED);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(ED);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
- llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>());
+ llvm::DIType *Res = CreateTypeDefinition(dyn_cast<EnumType>(Ty));
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -2900,7 +2904,7 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
void CGDebugInfo::completeClass(const RecordDecl *RD) {
if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
- QualType Ty = CGM.getContext().getRecordType(RD);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
@@ -2909,7 +2913,7 @@ void CGDebugInfo::completeClass(const RecordDecl *RD) {
// We want the canonical definition of the structure to not
// be the typedef. Since that would lead to circular typedef
// metadata.
- auto [Res, PrefRes] = CreateTypeDefinition(Ty->castAs<RecordType>());
+ auto [Res, PrefRes] = CreateTypeDefinition(dyn_cast<RecordType>(Ty));
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -3013,14 +3017,14 @@ void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts()))
return;
- QualType Ty = CGM.getContext().getRecordType(RD);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
llvm::DIType *T = getTypeOrNull(Ty);
if (T && T->isForwardDecl())
completeClassData(RD);
}
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD,
CGM.getLangOpts())) {
@@ -3048,7 +3052,7 @@ llvm::DIType *CGDebugInfo::GetPreferredNameType(const CXXRecordDecl *RD,
std::pair<llvm::DIType *, llvm::DIType *>
CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
// Get overall information about the record type for the debug info.
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
@@ -3070,7 +3074,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
// Push the struct on region stack.
LexicalBlockStack.emplace_back(&*FwdDecl);
- RegionMap[Ty->getDecl()].reset(FwdDecl);
+ RegionMap[RD].reset(FwdDecl);
// Convert all the elements.
SmallVector<llvm::Metadata *, 16> EltTys;
@@ -3092,7 +3096,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
LexicalBlockStack.pop_back();
- RegionMap.erase(Ty->getDecl());
+ RegionMap.erase(RD);
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
DBuilder.replaceArrays(FwdDecl, Elements);
@@ -3101,7 +3105,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
FwdDecl =
llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
- RegionMap[Ty->getDecl()].reset(FwdDecl);
+ RegionMap[RD].reset(FwdDecl);
if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB)
if (auto *PrefDI = GetPreferredNameType(CXXDecl, DefUnit))
@@ -3651,8 +3655,9 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
}
}
- llvm::DIType *ClassType = getOrCreateType(
- QualType(Ty->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0), U);
+ CanQualType T =
+ CGM.getContext().getCanonicalTagType(Ty->getMostRecentCXXRecordDecl());
+ llvm::DIType *ClassType = getOrCreateType(T, U);
if (Ty->isMemberDataPointerType())
return DBuilder.createMemberPointerType(
getOrCreateType(Ty->getPointeeType(), U), ClassType, Size, /*Align=*/0,
@@ -3687,17 +3692,21 @@ llvm::DIType *CGDebugInfo::CreateType(const HLSLInlineSpirvType *Ty,
return nullptr;
}
-llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
- const EnumDecl *ED = Ty->getDecl();
+static auto getEnumInfo(CodeGenModule &CGM, llvm::DICompileUnit *TheCU,
+ const EnumType *Ty) {
+ const EnumDecl *ED = Ty->getOriginalDecl()->getDefinitionOrSelf();
uint64_t Size = 0;
uint32_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ if (ED->isComplete()) {
+ Size = CGM.getContext().getTypeSize(QualType(Ty, 0));
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
+ return std::make_tuple(ED, Size, Align, getTypeIdentifier(Ty, CGM, TheCU));
+}
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
+ auto [ED, Size, Align, Identifier] = getEnumInfo(CGM, TheCU, Ty);
bool isImportedFromModule =
DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
@@ -3732,15 +3741,7 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
- const EnumDecl *ED = Ty->getDecl();
- uint64_t Size = 0;
- uint32_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = getDeclAlignIfRequired(ED, CGM.getContext());
- }
-
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+ auto [ED, Size, Align, Identifier] = getEnumInfo(CGM, TheCU, Ty);
SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
@@ -3815,6 +3816,11 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
switch (T->getTypeClass()) {
default:
return C.getQualifiedType(T.getTypePtr(), Quals);
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName:
+ return C.getQualifiedType(T->getCanonicalTypeUnqualified().getTypePtr(),
+ Quals);
case Type::TemplateSpecialization: {
const auto *Spec = cast<TemplateSpecializationType>(T);
if (Spec->isTypeAlias())
@@ -3843,11 +3849,8 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::CountAttributed:
T = cast<CountAttributedType>(T)->desugar();
break;
- case Type::Elaborated:
- T = cast<ElaboratedType>(T)->getNamedType();
- break;
case Type::Using:
- T = cast<UsingType>(T)->getUnderlyingType();
+ T = cast<UsingType>(T)->desugar();
break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
@@ -3906,7 +3909,8 @@ void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
completeClassData(&D);
// In case this type has no member function definitions being emitted, ensure
// it is retained
- RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr());
+ RetainedTypes.push_back(
+ CGM.getContext().getCanonicalTagType(&D).getAsOpaquePtr());
}
llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
@@ -4051,7 +4055,6 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Adjusted:
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
- case Type::Elaborated:
case Type::Using:
case Type::Paren:
case Type::MacroQualified:
@@ -4094,7 +4097,7 @@ CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty) {
// TODO: Currently used for context chains when limiting debug info.
llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
// Get overall information about the record type for the debug info.
StringRef RDName = getClassName(RD);
@@ -4111,7 +4114,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// If we ended up creating the type during the context chain construction,
// just return that.
auto *T = cast_or_null<llvm::DICompositeType>(
- getTypeOrNull(CGM.getContext().getRecordType(RD)));
+ getTypeOrNull(CGM.getContext().getCanonicalTagType(RD)));
if (T && (!T->isForwardDecl() || !RD->getDefinition()))
return T;
@@ -4181,7 +4184,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
break;
}
- RegionMap[Ty->getDecl()].reset(RealDecl);
+ RegionMap[RD].reset(RealDecl);
TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
if (const auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -4205,8 +4208,8 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
else
break;
}
- ContainingType = getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
- getOrCreateFile(RD->getLocation()));
+ CanQualType T = CGM.getContext().getCanonicalTagType(PBase);
+ ContainingType = getOrCreateType(T, getOrCreateFile(RD->getLocation()));
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
@@ -4412,9 +4415,10 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
// we would otherwise do to get a type for a pointee. (forward declarations in
// limited debug info, full definitions (if the type definition is available)
// in unlimited debug info)
- if (const auto *TD = dyn_cast<TypeDecl>(D))
- return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
- getOrCreateFile(TD->getLocation()));
+ if (const auto *TD = dyn_cast<TypeDecl>(D)) {
+ QualType Ty = CGM.getContext().getTypeDeclType(TD);
+ return getOrCreateType(Ty, getOrCreateFile(TD->getLocation()));
+ }
auto I = DeclCache.find(D->getCanonicalDecl());
if (I != DeclCache.end()) {
@@ -5076,7 +5080,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
} else if (const auto *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
// GDB has trouble finding local variables in anonymous unions, so we emit
// artificial local variables for each of the members.
@@ -5536,7 +5540,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
cast_or_null<CXXMethodDecl>(blockDecl->getNonClosureContext()))
type = Method->getThisType();
else if (auto *RDecl = dyn_cast<CXXRecordDecl>(blockDecl->getParent()))
- type = QualType(RDecl->getTypeForDecl(), 0);
+ type = CGM.getContext().getCanonicalTagType(RDecl);
else
llvm_unreachable("unexpected block declcontext");
@@ -5626,8 +5630,9 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
// Ignore unnamed fields, but recurse into anonymous records.
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
- GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
- Var, DContext);
+ GVE =
+ CollectAnonRecordDecls(RT->getOriginalDecl()->getDefinitionOrSelf(),
+ Unit, LineNo, LinkageName, Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
@@ -5646,7 +5651,7 @@ static bool ReferencesAnonymousEntity(RecordType *RT) {
// But so long as it's not one of those, it doesn't matter if some sub-type
// of the record (a template parameter) can't be reconstituted - because the
// un-reconstitutable type itself will carry its own name.
- const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
if (!RD)
return false;
if (!RD->getIdentifier())
@@ -5705,15 +5710,15 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
}
return true;
}
- bool TraverseEnumType(EnumType *ET) {
+ bool TraverseEnumType(EnumType *ET, bool = false) {
// Unnamed enums can't be reconstituted due to a lack of column info we
// produce in the DWARF, so we can't get Clang's full name back.
- if (const auto *ED = dyn_cast<EnumDecl>(ET->getDecl())) {
+ if (const auto *ED = dyn_cast<EnumDecl>(ET->getOriginalDecl())) {
if (!ED->getIdentifier()) {
Reconstitutable = false;
return false;
}
- if (!ED->isExternallyVisible()) {
+ if (!ED->getDefinitionOrSelf()->isExternallyVisible()) {
Reconstitutable = false;
return false;
}
@@ -5726,7 +5731,7 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
Reconstitutable &= !FT->getNoReturnAttr();
return Reconstitutable;
}
- bool VisitRecordType(RecordType *RT) {
+ bool VisitRecordType(RecordType *RT, bool = false) {
if (ReferencesAnonymousEntity(RT)) {
Reconstitutable = false;
return false;
@@ -5909,7 +5914,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// variable for each member of the anonymous union so that it's possible
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ T->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
@@ -5956,8 +5962,6 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
if (const auto *ECD = dyn_cast<EnumConstantDecl>(VD)) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
- assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
-
if (CGM.getCodeGenOpts().EmitCodeView) {
// If CodeView, emit enums as global variables, unless they are defined
// inside a class. We do this because MSVC doesn't emit S_CONSTANTs for
@@ -5969,10 +5973,9 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// If not CodeView, emit DW_TAG_enumeration_type if necessary. For
// example: for "enum { ZERO };", a DW_TAG_enumeration_type is created the
// first time `ZERO` is referenced in a function.
- llvm::DIType *EDTy =
- getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
- assert (EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
- (void)EDTy;
+ CanQualType T = CGM.getContext().getCanonicalTagType(ED);
+ [[maybe_unused]] llvm::DIType *EDTy = getOrCreateType(T, Unit);
+ assert(EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
return;
}
}
@@ -5991,7 +5994,7 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// FIXME: This is probably unnecessary, since Ty should reference RD
// through its scope.
RetainedTypes.push_back(
- CGM.getContext().getRecordType(RD).getAsOpaquePtr());
+ CGM.getContext().getCanonicalTagType(RD).getAsOpaquePtr());
return;
}
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index ff2dada..9df1220 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -113,12 +113,14 @@ void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
case Decl::CXXRecord: // struct/union/class X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
if (cast<RecordDecl>(D).getDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(&D)));
return;
case Decl::Enum: // enum X;
if (CGDebugInfo *DI = getDebugInfo())
if (cast<EnumDecl>(D).getDefinition())
- DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<EnumDecl>(&D)));
return;
case Decl::Function: // void X();
case Decl::EnumConstant: // enum ? { X = ? }
@@ -1351,30 +1353,27 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
}
/// Emit a lifetime.begin marker if some criteria are satisfied.
-/// \return a pointer to the temporary size Value if a marker was emitted, null
-/// otherwise
-llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
- llvm::Value *Addr) {
+/// \return whether the marker was emitted.
+bool CodeGenFunction::EmitLifetimeStart(llvm::Value *Addr) {
if (!ShouldEmitLifetimeMarkers)
- return nullptr;
+ return false;
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::Value *SizeV = llvm::ConstantInt::get(
- Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
- llvm::CallInst *C =
- Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
+ llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {Addr});
C->setDoesNotThrow();
- return SizeV;
+ return true;
}
-void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
+void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Addr) {
+ if (!ShouldEmitLifetimeMarkers)
+ return;
+
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::CallInst *C =
- Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
+ llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Addr});
C->setDoesNotThrow();
}
@@ -1571,7 +1570,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- const auto *RD = RecordTy->getDecl();
+ const auto *RD = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
RD->isNonTrivialToPrimitiveDestroy()) {
@@ -1632,9 +1631,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
- llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
- emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(Size, AllocaAddr.getPointer());
+ emission.UseLifetimeMarkers =
+ EmitLifetimeStart(AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1727,9 +1725,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
- EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
- emission.getOriginalAllocatedAddress(),
- emission.getSizeForLifetimeMarkers());
+ EHStack.pushCleanup<CallLifetimeEnd>(
+ NormalEHLifetimeMarker, emission.getOriginalAllocatedAddress());
// Analogous to lifetime markers, we use a 'cleanup' to emit fake.use
// calls for local variables. We are exempting volatile variables and
@@ -2732,7 +2729,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
if (Ty->isRecordType() && !CurFuncIsThunk &&
- Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ Ty->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
if (QualType::DestructionKind DtorKind =
D.needsDestruction(getContext())) {
assert((DtorKind == QualType::DK_cxx_destructor ||
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index ed35a05..d5df6dd 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -408,9 +408,10 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
if (const RecordType *RT =
E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
// Get the destructor for the reference temporary.
- if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
ClassDecl && !ClassDecl->hasTrivialDestructor())
- ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ ReferenceTemporaryDtor =
+ ClassDecl->getDefinitionOrSelf()->getDestructor();
}
if (!ReferenceTemporaryDtor)
@@ -588,11 +589,9 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
} else {
switch (M->getStorageDuration()) {
case SD_Automatic:
- if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
- Alloca.getPointer())) {
+ if (EmitLifetimeStart(Alloca.getPointer())) {
pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
- Alloca, Size);
+ Alloca);
}
break;
@@ -623,11 +622,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
Block, llvm::BasicBlock::iterator(Block->back())));
}
- if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
- Alloca.getPointer())) {
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
- Size);
+ if (EmitLifetimeStart(Alloca.getPointer())) {
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca);
}
if (OldConditional) {
@@ -1209,9 +1205,10 @@ llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP(
return nullptr;
Indices.push_back(Builder.getInt32(0));
- return Builder.CreateInBoundsGEP(
- ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
- RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ return Builder.CreateInBoundsGEP(ConvertType(T), Res,
+ RecIndicesTy(llvm::reverse(Indices)),
+ "counted_by.gep");
}
/// This method is typically called in contexts where we can't generate
@@ -1759,9 +1756,11 @@ static bool isConstantEmittableObjectType(QualType type) {
// Otherwise, all object types satisfy this except C++ classes with
// mutable subobjects or non-trivial copy/destroy behavior.
if (const auto *RT = dyn_cast<RecordType>(type))
- if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ RD = RD->getDefinitionOrSelf();
if (RD->hasMutableFields() || !RD->isTrivial())
return false;
+ }
return true;
}
@@ -1922,8 +1921,10 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::APInt &Min, llvm::APInt &End,
bool StrictEnums, bool IsBool) {
const EnumType *ET = Ty->getAs<EnumType>();
- bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
- ET && !ET->getDecl()->isFixed();
+ const EnumDecl *ED =
+ ET ? ET->getOriginalDecl()->getDefinitionOrSelf() : nullptr;
+ bool IsRegularCPlusPlusEnum =
+ CGF.getLangOpts().CPlusPlus && StrictEnums && ET && !ED->isFixed();
if (!IsBool && !IsRegularCPlusPlusEnum)
return false;
@@ -1931,7 +1932,6 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
} else {
- const EnumDecl *ED = ET->getDecl();
ED->getValueRange(End, Min);
}
return true;
@@ -4307,7 +4307,9 @@ static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
const auto *PointeeT = PtrT->getPointeeType()
->getUnqualifiedDesugaredType();
if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
- return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
+ return RecT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<BPFPreserveAccessIndexAttr>();
return false;
}
@@ -5073,10 +5075,12 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
Address Base = GetAddressOfBaseClass(
LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
- LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
+ CanQualType T = getContext().getCanonicalTagType(LambdaTy);
+ LambdaLV = MakeAddrLValue(Base, T);
}
} else {
- QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
+ CanQualType LambdaTagType =
+ getContext().getCanonicalTagType(Field->getParent());
LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
}
return EmitLValueForField(LambdaLV, Field);
@@ -5201,7 +5205,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field,
}
} else {
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
+ getContext().getCanonicalTagType(rec), rec->getLocation());
Addr = Builder.CreatePreserveStructAccessIndex(
Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
DbgInfo);
@@ -5663,7 +5667,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBase: {
const auto *DerivedClassTy =
E->getSubExpr()->getType()->castAs<RecordType>();
- auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ auto *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
LValue LV = EmitLValue(E->getSubExpr());
Address This = LV.getAddress();
@@ -5683,7 +5689,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return EmitAggExprToLValue(E);
case CK_BaseToDerived: {
const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
- auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ auto *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
LValue LV = EmitLValue(E->getSubExpr());
@@ -5784,13 +5792,10 @@ LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E,
llvm::Value *Addr = TempLV.getAddress().getBasePointer();
llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
- llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
-
- llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
+ EmitLifetimeStart(Addr);
Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
- Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
- LifetimeSize);
+ Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
Args.add(RValue::get(TmpAddr, *this), Ty);
return TempLV;
}
@@ -6741,7 +6746,7 @@ void CodeGenFunction::FlattenAccessAndType(
WorkList.emplace_back(CAT->getElementType(), IdxListCopy);
}
} else if (const auto *RT = dyn_cast<RecordType>(T)) {
- const RecordDecl *Record = RT->getDecl();
+ const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!Record->isUnion() && "Union types not supported in flat cast.");
const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index cad6731..04e125c 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -272,7 +272,7 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
if (!RecordTy) return false;
// Don't mess with non-trivial C++ types.
- RecordDecl *Record = RecordTy->getDecl();
+ RecordDecl *Record = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
if (isa<CXXRecordDecl>(Record) &&
(cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
@@ -300,16 +300,12 @@ void AggExprEmitter::withReturnValueSlot(
Address RetAddr = Address::invalid();
EHScopeStack::stable_iterator LifetimeEndBlock;
- llvm::Value *LifetimeSizePtr = nullptr;
llvm::IntrinsicInst *LifetimeStartInst = nullptr;
if (!UseTemp) {
RetAddr = Dest.getAddress();
} else {
RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
- llvm::TypeSize Size =
- CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
- LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAddr.getBasePointer());
- if (LifetimeSizePtr) {
+ if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
LifetimeStartInst =
cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
assert(LifetimeStartInst->getIntrinsicID() ==
@@ -317,7 +313,7 @@ void AggExprEmitter::withReturnValueSlot(
"Last insertion wasn't a lifetime.start?");
CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
- NormalEHLifetimeMarker, RetAddr, LifetimeSizePtr);
+ NormalEHLifetimeMarker, RetAddr);
LifetimeEndBlock = CGF.EHStack.stable_begin();
}
}
@@ -338,7 +334,7 @@ void AggExprEmitter::withReturnValueSlot(
// Since we're not guaranteed to be in an ExprWithCleanups, clean up
// eagerly.
CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
- CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAddr.getBasePointer());
+ CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
}
}
@@ -428,7 +424,10 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
assert(ArrayType && "std::initializer_list constructed from non-array");
- RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *Record = E->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
RecordDecl::field_iterator Field = Record->field_begin();
assert(Field != Record->field_end() &&
Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -1810,7 +1809,10 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = InitExprs.size();
- RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *record = ExprToVisit->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
@@ -2120,7 +2122,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// referencee. InitListExprs for unions and arrays can't have references.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
if (!RT->isUnionType()) {
- RecordDecl *SD = RT->getDecl();
+ RecordDecl *SD = RT->getOriginalDecl()->getDefinitionOrSelf();
CharUnits NumNonZeroBytes = CharUnits::Zero();
unsigned ILEElement = 0;
@@ -2172,7 +2174,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
if (CGF.getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getOriginalDecl());
if (RD->hasUserDeclaredConstructor())
return;
}
@@ -2293,7 +2295,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ auto *Record =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
assert((Record->hasTrivialCopyConstructor() ||
Record->hasTrivialCopyAssignment() ||
Record->hasTrivialMoveConstructor() ||
@@ -2377,7 +2380,7 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- RecordDecl *Record = RecordTy->getDecl();
+ RecordDecl *Record = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
if (Record->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
@@ -2386,7 +2389,9 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
} else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
- if (RecordTy->getDecl()->hasObjectMember()) {
+ if (RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 49d5d8a..57d7eec 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -181,7 +181,7 @@ static CXXRecordDecl *getCXXRecord(const Expr *E) {
if (const PointerType *PTy = T->getAs<PointerType>())
T = PTy->getPointeeType();
const RecordType *Ty = T->castAs<RecordType>();
- return cast<CXXRecordDecl>(Ty->getDecl());
+ return cast<CXXRecordDecl>(Ty->getOriginalDecl())->getDefinitionOrSelf();
}
// Note: This function also emit constructor calls to support a MSVC
@@ -206,7 +206,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
bool HasQualifier = ME->hasQualifier();
- NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
+ NestedNameSpecifier Qualifier = ME->getQualifier();
bool IsArrow = ME->isArrow();
const Expr *Base = ME->getBase();
@@ -217,7 +217,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
- bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
const Expr *Base, llvm::CallBase **CallOrInvoke) {
assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
@@ -361,7 +361,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (sanitizePerformTypeCheck())
EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
This.emitRawPointer(*this),
- C.getRecordType(CalleeDecl->getParent()),
+ C.getCanonicalTagType(CalleeDecl->getParent()),
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
// C++ [class.virtual]p12:
@@ -461,9 +461,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
else
This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
- EmitTypeCheck(
- TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
- QualType(MPT->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0));
+ CanQualType ClassType = CGM.getContext().getCanonicalTagType(RD);
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
+ ClassType);
// Get the member function pointer.
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
@@ -476,8 +476,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
CallArgList Args;
- QualType ThisType =
- getContext().getPointerType(getContext().getTagDeclType(RD));
+ QualType ThisType = getContext().getPointerType(ClassType);
// Push the this ptr.
Args.add(RValue::get(ThisPtrForCall), ThisType);
@@ -498,7 +497,7 @@ RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
assert(MD->isImplicitObjectMemberFunction() &&
"Trying to emit a member call expr on a static method!");
return EmitCXXMemberOrOperatorMemberCallExpr(
- E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
/*IsArrow=*/false, E->getArg(0), CallOrInvoke);
}
@@ -1237,11 +1236,12 @@ void CodeGenFunction::EmitNewArrayInitializer(
// usually use memset.
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
- if (RType->getDecl()->isStruct()) {
+ const RecordDecl *RD = RType->getOriginalDecl()->getDefinitionOrSelf();
+ if (RD->isStruct()) {
unsigned NumElements = 0;
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
NumElements = CXXRD->getNumBases();
- for (auto *Field : RType->getDecl()->fields())
+ for (auto *Field : RD->fields())
if (!Field->isUnnamedBitField())
++NumElements;
// FIXME: Recurse into nested InitListExprs.
@@ -1687,9 +1687,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType AlignValT = sizeType;
if (allocatorType->getNumParams() > IndexOfAlignArg) {
AlignValT = allocatorType->getParamType(IndexOfAlignArg);
- assert(getContext().hasSameUnqualifiedType(
- AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
- sizeType) &&
+ assert(getContext().hasSameUnqualifiedType(AlignValT->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType(),
+ sizeType) &&
"wrong type for alignment parameter");
++ParamsToSkip;
} else {
@@ -1972,7 +1974,8 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ auto *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
Dtor = RD->getDestructor();
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index 715bd39..a96c151 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -714,7 +714,10 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
}
bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
- RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = ILE->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
unsigned FieldNo = -1;
@@ -977,7 +980,8 @@ bool ConstStructBuilder::DoZeroInitPadding(const ASTRecordLayout &Layout,
llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
Type = Type.getNonReferenceType();
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ RecordDecl *RD =
+ Type->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
llvm::Type *ValTy = CGM.getTypes().ConvertType(Type);
return Builder.build(ValTy, RD->hasFlexibleArrayMember());
}
@@ -1000,7 +1004,8 @@ llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
ConstantAggregateBuilder Const(Emitter.CGM);
ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
- const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ ValTy->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
if (!Builder.Build(Val, RD, false, CD, CharUnits::Zero()))
return nullptr;
@@ -1506,7 +1511,9 @@ public:
llvm::Type *ValTy = CGM.getTypes().ConvertType(destType);
bool HasFlexibleArray = false;
if (const auto *RT = destType->getAs<RecordType>())
- HasFlexibleArray = RT->getDecl()->hasFlexibleArrayMember();
+ HasFlexibleArray = RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember();
return Const.build(ValTy, HasFlexibleArray);
}
@@ -2640,7 +2647,9 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
}
const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Ignore empty bases.
if (isEmptyRecordForLayout(CGM.getContext(), I.getType()) ||
@@ -2679,8 +2688,10 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
// Fill in the virtual bases, if we're working with the complete object.
if (CXXR && asCompleteObject) {
for (const auto &I : CXXR->vbases()) {
- const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *base =
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Ignore empty bases.
if (isEmptyRecordForLayout(CGM.getContext(), I.getType()))
@@ -2746,7 +2757,9 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
}
if (const RecordType *RT = T->getAs<RecordType>())
- return ::EmitNullConstant(*this, RT->getDecl(), /*complete object*/ true);
+ return ::EmitNullConstant(*this,
+ RT->getOriginalDecl()->getDefinitionOrSelf(),
+ /*complete object*/ true);
assert(T->isMemberDataPointerType() &&
"Should only see pointers to data members here!");
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 44931d0..155b80d 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -3515,7 +3515,9 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
case OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
- RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = CurrentType->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
// Compute the index of the field in its parent.
@@ -3548,15 +3550,16 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
continue;
}
- RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
- const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
+ CurrentType->castAs<RecordType>()->getOriginalDecl());
// Save the element type.
CurrentType = ON.getBase()->getType();
// Compute the offset to the base.
auto *BaseRT = CurrentType->castAs<RecordType>();
- auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ auto *BaseRD =
+ cast<CXXRecordDecl>(BaseRT->getOriginalDecl())->getDefinitionOrSelf();
CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
@@ -4183,9 +4186,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
return phi;
}
-/// Emit pointer + index arithmetic.
-static Value *emitPointerArithmetic(CodeGenFunction &CGF,
- const BinOpInfo &op,
+/// This function is used for BO_Add/BO_Sub/BO_AddAssign/BO_SubAssign.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
bool isSubtraction) {
// Must have binary (not unary) expr here. Unary pointer
// increment/decrement doesn't use this path.
@@ -4202,11 +4204,19 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
std::swap(pointerOperand, indexOperand);
}
+ return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
+ index, isSubtraction);
+}
+
+/// Emit pointer + index arithmetic.
+llvm::Value *CodeGenFunction::EmitPointerArithmetic(
+ const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
+ Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
- auto &DL = CGF.CGM.getDataLayout();
- auto PtrTy = cast<llvm::PointerType>(pointer->getType());
+ auto &DL = CGM.getDataLayout();
+ auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
// Some versions of glibc and gcc use idioms (particularly in their malloc
// routines) that add a pointer-sized integer (known to be a pointer value)
@@ -4227,79 +4237,77 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
//
// Note that we do not suppress the pointer overflow check in this case.
if (BinaryOperator::isNullPointerArithmeticExtension(
- CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) {
- Value *Ptr = CGF.Builder.CreateIntToPtr(index, pointer->getType());
- if (CGF.getLangOpts().PointerOverflowDefined ||
- !CGF.SanOpts.has(SanitizerKind::PointerOverflow) ||
- NullPointerIsDefined(CGF.Builder.GetInsertBlock()->getParent(),
+ getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
+ llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
+ if (getLangOpts().PointerOverflowDefined ||
+ !SanOpts.has(SanitizerKind::PointerOverflow) ||
+ NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
PtrTy->getPointerAddressSpace()))
return Ptr;
// The inbounds GEP of null is valid iff the index is zero.
auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
auto CheckHandler = SanitizerHandler::PointerOverflow;
- SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
- Value *IsZeroIndex = CGF.Builder.CreateIsNull(index);
- llvm::Constant *StaticArgs[] = {
- CGF.EmitCheckSourceLocation(op.E->getExprLoc())};
+ SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
+ llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
+ llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
- Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
- Value *ComputedGEP = CGF.Builder.CreateZExtOrTrunc(index, IntPtrTy);
- Value *DynamicArgs[] = {IntPtr, ComputedGEP};
- CGF.EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
- DynamicArgs);
+ llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
+ llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
+ llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
+ EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
+ DynamicArgs);
return Ptr;
}
if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
// Zero-extend or sign-extend the pointer value according to
// whether the index is signed or not.
- index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
- "idx.ext");
+ index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
+ "idx.ext");
}
// If this is subtraction, negate the index.
if (isSubtraction)
- index = CGF.Builder.CreateNeg(index, "idx.neg");
+ index = Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
- CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
- /*Accessed*/ false);
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
+ EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
+ /*Accessed*/ false);
- const PointerType *pointerType
- = pointerOperand->getType()->getAs<PointerType>();
+ const PointerType *pointerType =
+ pointerOperand->getType()->getAs<PointerType>();
if (!pointerType) {
QualType objectType = pointerOperand->getType()
- ->castAs<ObjCObjectPointerType>()
- ->getPointeeType();
- llvm::Value *objectSize
- = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize =
+ CGM.getSize(getContext().getTypeSizeInChars(objectType));
- index = CGF.Builder.CreateMul(index, objectSize);
+ index = Builder.CreateMul(index, objectSize);
- Value *result =
- CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
- return CGF.Builder.CreateBitCast(result, pointer->getType());
+ llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
+ return Builder.CreateBitCast(result, pointer->getType());
}
QualType elementType = pointerType->getPointeeType();
- if (const VariableArrayType *vla
- = CGF.getContext().getAsVariableArrayType(elementType)) {
+ if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(elementType)) {
// The element count here is the total number of non-VLA elements.
- llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
+ llvm::Value *numElements = getVLASize(vla).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
- if (CGF.getLangOpts().PointerOverflowDefined) {
- index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
+ llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
+ if (getLangOpts().PointerOverflowDefined) {
+ index = Builder.CreateMul(index, numElements, "vla.index");
+ pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
} else {
- index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer = CGF.EmitCheckedInBoundsGEP(
- elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
- "add.ptr");
+ index = Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer =
+ EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
+ isSubtraction, BO->getExprLoc(), "add.ptr");
}
return pointer;
}
@@ -4309,16 +4317,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// future proof.
llvm::Type *elemTy;
if (elementType->isVoidType() || elementType->isFunctionType())
- elemTy = CGF.Int8Ty;
+ elemTy = Int8Ty;
else
- elemTy = CGF.ConvertTypeForMem(elementType);
+ elemTy = ConvertTypeForMem(elementType);
- if (CGF.getLangOpts().PointerOverflowDefined)
- return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
+ if (getLangOpts().PointerOverflowDefined)
+ return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
- return CGF.EmitCheckedInBoundsGEP(
- elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
- "add.ptr");
+ return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
+ BO->getExprLoc(), "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index f64ac20..9b3ef6a 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -103,13 +103,6 @@ llvm::Triple::ArchType CGHLSLRuntime::getArch() {
return CGM.getTarget().getTriple().getArch();
}
-// Returns true if the type is an HLSL resource class or an array of them
-static bool isResourceRecordTypeOrArrayOf(const clang::Type *Ty) {
- while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- Ty = CAT->getArrayElementTypeNoTypeQual();
- return Ty->isHLSLResourceRecord();
-}
-
// Emits constant global variables for buffer constants declarations
// and creates metadata linking the constant globals with the buffer global.
void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
@@ -146,7 +139,7 @@ void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
if (VDTy.getAddressSpace() != LangAS::hlsl_constant) {
if (VD->getStorageClass() == SC_Static ||
VDTy.getAddressSpace() == LangAS::hlsl_groupshared ||
- isResourceRecordTypeOrArrayOf(VDTy.getTypePtr())) {
+ VDTy->isHLSLResourceRecord() || VDTy->isHLSLResourceRecordArray()) {
// Emit static and groupshared variables and resource classes inside
// cbuffer as regular globals
CGM.EmitGlobal(VD);
@@ -186,8 +179,7 @@ static const clang::HLSLAttributedResourceType *
createBufferHandleType(const HLSLBufferDecl *BufDecl) {
ASTContext &AST = BufDecl->getASTContext();
QualType QT = AST.getHLSLAttributedResourceType(
- AST.HLSLResourceTy,
- QualType(BufDecl->getLayoutStruct()->getTypeForDecl(), 0),
+ AST.HLSLResourceTy, AST.getCanonicalTagType(BufDecl->getLayoutStruct()),
HLSLAttributedResourceType::Attributes(ResourceClass::CBuffer));
return cast<HLSLAttributedResourceType>(QT.getTypePtr());
}
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index e0983ef..1b941fff 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -39,7 +39,8 @@ template <class Derived> struct StructVisitor {
template <class... Ts>
void visitStructFields(QualType QT, CharUnits CurStructOffset, Ts... Args) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ QT->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
// Iterate over the fields of the struct.
for (const FieldDecl *FD : RD->fields()) {
@@ -464,7 +465,8 @@ template <class Derived> struct GenFuncBase {
if (WrongType) {
std::string FuncName = std::string(F->getName());
- SourceLocation Loc = QT->castAs<RecordType>()->getDecl()->getLocation();
+ SourceLocation Loc =
+ QT->castAs<RecordType>()->getOriginalDecl()->getLocation();
CGM.Error(Loc, "special function " + FuncName +
" for non-trivial C struct has incorrect type");
return nullptr;
@@ -560,7 +562,8 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
if (FD->isZeroLengthBitField())
return;
- QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
+ CanQualType RT =
+ this->CGF->getContext().getCanonicalTagType(FD->getParent());
llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
LValue DstBase =
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 24b6ce7..b5f17b8 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -1000,7 +1000,9 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// Compute whether the ivar has strong members.
if (CGM.getLangOpts().getGC())
if (const RecordType *recordType = ivarType->getAs<RecordType>())
- HasStrong = recordType->getDecl()->hasObjectMember();
+ HasStrong = recordType->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasObjectMember();
// We can never access structs with object members with a native
// access, because we need to use write barriers. This is what
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 8c66176..eb49040 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -2495,7 +2495,7 @@ void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT,
CharUnits BytePos,
bool &HasUnion,
bool ByrefLayout) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
SmallVector<const FieldDecl *, 16> Fields(RD->fields());
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
@@ -3354,7 +3354,8 @@ static bool hasWeakMember(QualType type) {
}
if (auto recType = type->getAs<RecordType>()) {
- for (auto *field : recType->getDecl()->fields()) {
+ for (auto *field :
+ recType->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
if (hasWeakMember(field->getType()))
return true;
}
@@ -5184,7 +5185,7 @@ CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
}
void IvarLayoutBuilder::visitRecord(const RecordType *RT, CharUnits offset) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a union, remember that we had one, because it might mess
// up the ordering of layout entries.
@@ -5670,7 +5671,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
nullptr, false, ICIS_NoInit));
RD->completeDefinition();
- SuperCTy = Ctx.getTagDeclType(RD);
+ SuperCTy = Ctx.getCanonicalTagType(RD);
SuperPtrCTy = Ctx.getPointerType(SuperCTy);
SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
@@ -6016,7 +6017,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
false, ICIS_NoInit));
RD->completeDefinition();
- MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCTy = Ctx.getCanonicalTagType(RD);
MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 6e2f320..cbf9953 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -440,7 +440,9 @@ void CGObjCRuntime::destroyCalleeDestroyedArguments(CodeGenFunction &CGF,
} else {
QualType QT = param->getType();
auto *RT = QT->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee()) {
+ if (RT && RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
RValue RV = I->getRValue(CGF);
QualType::DestructionKind DtorKind = QT.isDestructedType();
switch (DtorKind) {
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 91237cf..3eba33f 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -2915,7 +2915,7 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
addFieldToRecordDecl(C, UD, KmpInt32Ty);
addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
UD->completeDefinition();
- QualType KmpCmplrdataTy = C.getRecordType(UD);
+ CanQualType KmpCmplrdataTy = C.getCanonicalTagType(UD);
RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
@@ -2950,7 +2950,7 @@ createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
RD->startDefinition();
addFieldToRecordDecl(C, RD, KmpTaskTQTy);
if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
- addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
+ addFieldToRecordDecl(C, RD, C.getCanonicalTagType(PrivateRD));
RD->completeDefinition();
return RD;
}
@@ -3582,7 +3582,7 @@ static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
KmpAffinityInfoRD->completeDefinition();
- KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
+ KmpTaskAffinityInfoTy = C.getCanonicalTagType(KmpAffinityInfoRD);
}
}
@@ -3640,7 +3640,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Build type kmp_task_t (if not built yet).
if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
if (SavedKmpTaskloopTQTy.isNull()) {
- SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ SavedKmpTaskloopTQTy = C.getCanonicalTagType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskloopTQTy;
@@ -3650,7 +3650,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
"Expected taskloop, task or target directive");
if (SavedKmpTaskTQTy.isNull()) {
- SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ SavedKmpTaskTQTy = C.getCanonicalTagType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskTQTy;
@@ -3659,7 +3659,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Build particular struct kmp_task_t for the given task.
const RecordDecl *KmpTaskTWithPrivatesQTyRD =
createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
- QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
+ CanQualType KmpTaskTWithPrivatesQTy =
+ C.getCanonicalTagType(KmpTaskTWithPrivatesQTyRD);
QualType KmpTaskTWithPrivatesPtrQTy =
C.getPointerType(KmpTaskTWithPrivatesQTy);
llvm::Type *KmpTaskTWithPrivatesPtrTy = CGF.Builder.getPtrTy(0);
@@ -3914,7 +3915,10 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Fill the data in the resulting kmp_task_t record.
// Copy shareds if there are any.
Address KmpTaskSharedsPtr = Address::invalid();
- if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
+ if (!SharedsTy->getAsStructureType()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->field_empty()) {
KmpTaskSharedsPtr = Address(
CGF.EmitLoadOfScalar(
CGF.EmitLValueForField(
@@ -3944,8 +3948,11 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
enum { Priority = 0, Destructors = 1 };
// Provide pointer to function with destructors for privates.
auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
- const RecordDecl *KmpCmplrdataUD =
- (*FI)->getType()->getAsUnionType()->getDecl();
+ const RecordDecl *KmpCmplrdataUD = (*FI)
+ ->getType()
+ ->getAsUnionType()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (NeedsCleanup) {
llvm::Value *DestructorFn = emitDestructorsFunction(
CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
@@ -4015,7 +4022,7 @@ static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
KmpDependInfoRD->completeDefinition();
- KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
+ KmpDependInfoTy = C.getCanonicalTagType(KmpDependInfoRD);
}
}
@@ -5714,7 +5721,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
const FieldDecl *FlagsFD = addFieldToRecordDecl(
C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
RD->completeDefinition();
- QualType RDType = C.getRecordType(RD);
+ CanQualType RDType = C.getCanonicalTagType(RD);
unsigned Size = Data.ReductionVars.size();
llvm::APInt ArraySize(/*numBits=*/64, Size);
QualType ArrayRDType =
@@ -10703,7 +10710,7 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
unsigned Offset = 0;
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (ParamAttrs[Offset].Kind == Vector)
- CDT = C.getPointerType(C.getRecordType(MD->getParent()));
+ CDT = C.getPointerType(C.getCanonicalTagType(MD->getParent()));
++Offset;
}
if (CDT.isNull()) {
@@ -11285,7 +11292,7 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
RD->completeDefinition();
- KmpDimTy = C.getRecordType(RD);
+ KmpDimTy = C.getCanonicalTagType(RD);
} else {
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
}
@@ -11781,7 +11788,7 @@ Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
RD->completeDefinition();
- NewType = C.getRecordType(RD);
+ NewType = C.getCanonicalTagType(RD);
Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 04c9192..cff1071 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -769,7 +769,7 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
"_openmp_teams_reduction_type_$_", RecordDecl::TagKind::Union);
StaticRD->startDefinition();
for (const RecordDecl *TeamReductionRec : TeamsReductions) {
- QualType RecTy = C.getRecordType(TeamReductionRec);
+ CanQualType RecTy = C.getCanonicalTagType(TeamReductionRec);
auto *Field = FieldDecl::Create(
C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
@@ -779,7 +779,7 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
StaticRD->addDecl(Field);
}
StaticRD->completeDefinition();
- QualType StaticTy = C.getRecordType(StaticRD);
+ CanQualType StaticTy = C.getCanonicalTagType(StaticRD);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
const auto &DL = CGM.getModule().getDataLayout();
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index dcef01a..1a5ffb5 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -531,7 +531,7 @@ llvm::Constant *CodeGenModule::getMemberFunctionPointer(llvm::Constant *Pointer,
llvm::Constant *CodeGenModule::getMemberFunctionPointer(const FunctionDecl *FD,
llvm::Type *Ty) {
QualType FT = FD->getType();
- FT = getContext().getMemberPointerType(FT, /*Qualifier=*/nullptr,
+ FT = getContext().getMemberPointerType(FT, /*Qualifier=*/std::nullopt,
cast<CXXMethodDecl>(FD)->getParent());
return getMemberFunctionPointer(getRawFunctionPointer(FD, Ty), FT);
}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 1a8c6f0..031ef73 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -3279,7 +3279,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
const RecordDecl *RD = S.getCapturedRecordDecl();
- QualType RecordTy = getContext().getRecordType(RD);
+ CanQualType RecordTy = getContext().getCanonicalTagType(RD);
// Initialize the captured struct.
LValue SlotLV =
@@ -3359,7 +3359,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
// Initialize variable-length arrays.
LValue Base = MakeNaturalAlignRawAddrLValue(
- CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD));
+ CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
for (auto *FD : RD->fields()) {
if (FD->hasCapturedVLAType()) {
auto *ExprArg =
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 5822e0f..f6a0ca5 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -5273,7 +5273,8 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
auto I = CS->getCapturedDecl()->param_begin();
auto PartId = std::next(I);
auto TaskT = std::next(I, 4);
@@ -5507,7 +5508,8 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
@@ -7890,7 +7892,8 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
CapturedStruct = GenerateCapturedStmtArgument(*CS);
}
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 0b6e830..e14e883 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -971,7 +971,7 @@ llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
- CGM.getContext().getTagDeclType(Base.getBase()));
+ CGM.getContext().getCanonicalTagType(Base.getBase()));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
@@ -1382,8 +1382,8 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
AP.second.AddressPointIndex,
{}};
llvm::raw_string_ostream Stream(N.TypeName);
- getCXXABI().getMangleContext().mangleCanonicalTypeName(
- QualType(N.Base->getTypeForDecl(), 0), Stream);
+ CanQualType T = getContext().getCanonicalTagType(N.Base);
+ getCXXABI().getMangleContext().mangleCanonicalTypeName(T, Stream);
AddressPoints.push_back(std::move(N));
}
@@ -1404,7 +1404,7 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
continue;
llvm::Metadata *MD = CreateMetadataIdentifierForVirtualMemPtrType(
Context.getMemberPointerType(Comps[I].getFunctionDecl()->getType(),
- /*Qualifier=*/nullptr, AP.Base));
+ /*Qualifier=*/std::nullopt, AP.Base));
VTable->addTypeMetadata((ComponentWidth * I).getQuantity(), MD);
}
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index ab345a5..d077ee5 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2223,7 +2223,9 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
// Ignore empty classes in C++.
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ if (cast<CXXRecordDecl>(RT->getOriginalDecl())
+ ->getDefinitionOrSelf()
+ ->isEmpty())
return;
}
}
@@ -2494,10 +2496,6 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::PredefinedSugar:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- type = cast<ElaboratedType>(ty)->getNamedType();
- break;
-
case Type::Adjusted:
type = cast<AdjustedType>(ty)->getAdjustedType();
break;
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 6c32c98..84be422 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -701,14 +701,12 @@ public:
bool isRedundantBeforeReturn() override { return true; }
llvm::Value *Addr;
- llvm::Value *Size;
public:
- CallLifetimeEnd(RawAddress addr, llvm::Value *size)
- : Addr(addr.getPointer()), Size(size) {}
+ CallLifetimeEnd(RawAddress addr) : Addr(addr.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- CGF.EmitLifetimeEnd(Size, Addr);
+ CGF.EmitLifetimeEnd(Addr);
}
};
@@ -2974,7 +2972,7 @@ public:
/// member.
bool hasVolatileMember(QualType T) {
if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
return RD->hasVolatileMember();
}
return false;
@@ -3233,8 +3231,8 @@ public:
void EmitSehTryScopeBegin();
void EmitSehTryScopeEnd();
- llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
- void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
+ bool EmitLifetimeStart(llvm::Value *Addr);
+ void EmitLifetimeEnd(llvm::Value *Addr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -3417,8 +3415,8 @@ public:
/// initializer.
bool IsConstantAggregate;
- /// Non-null if we should use lifetime annotations.
- llvm::Value *SizeForLifetimeMarkers;
+ /// True if lifetime markers should be used.
+ bool UseLifetimeMarkers;
/// Address with original alloca instruction. Invalid if the variable was
/// emitted as a global constant.
@@ -3432,20 +3430,14 @@ public:
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsEscapingByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
+ UseLifetimeMarkers(false), AllocaAddr(RawAddress::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
- bool useLifetimeMarkers() const {
- return SizeForLifetimeMarkers != nullptr;
- }
- llvm::Value *getSizeForLifetimeMarkers() const {
- assert(useLifetimeMarkers());
- return SizeForLifetimeMarkers;
- }
+ bool useLifetimeMarkers() const { return UseLifetimeMarkers; }
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
@@ -4560,7 +4552,7 @@ public:
ArrayRef<llvm::Value *> args);
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual, llvm::Type *Ty);
+ NestedNameSpecifier Qual, llvm::Type *Ty);
CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
@@ -4665,7 +4657,7 @@ public:
llvm::CallBase **CallOrInvoke = nullptr);
RValue EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
- bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
const Expr *Base, llvm::CallBase **CallOrInvoke);
// Compute the object pointer.
Address EmitCXXMemberDataPointerAddress(
@@ -5220,6 +5212,12 @@ public:
/// operation is a subtraction.
enum { NotSubtraction = false, IsSubtraction = true };
+ /// Emit pointer + index arithmetic.
+ llvm::Value *EmitPointerArithmetic(const BinaryOperator *BO,
+ Expr *pointerOperand, llvm::Value *pointer,
+ Expr *indexOperand, llvm::Value *index,
+ bool isSubtraction);
+
/// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
/// detect undefined behavior when the pointer overflow sanitizer is enabled.
/// \p SignedIndices indicates whether any of the GEP indices are signed.
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 834b1c0..2541a44 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -2793,7 +2793,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
llvm::Metadata *Id =
CreateMetadataIdentifierForType(Context.getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, Base));
+ MD->getType(), /*Qualifier=*/std::nullopt, Base));
F->addTypeMetadata(0, Id);
}
}
@@ -4152,9 +4152,11 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Check if T is a class type with a destructor that's not dllimport.
static bool HasNonDllImportDtor(QualType T) {
if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ RD = RD->getDefinitionOrSelf();
if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
return true;
+ }
return false;
}
@@ -6029,7 +6031,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
return true;
if (const auto *RT = VarType->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (const FieldDecl *FD : RD->fields()) {
if (FD->isBitField())
continue;
@@ -6738,7 +6740,7 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
}
D->completeDefinition();
- ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ ObjCFastEnumerationStateType = Context.getCanonicalTagType(D);
}
return ObjCFastEnumerationStateType;
@@ -7248,7 +7250,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
if (CGDebugInfo *DI = getModuleDebugInfo()) {
if (CRD->hasDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(D)));
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
DI->completeUnusedClass(*CRD);
@@ -7467,20 +7470,23 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Typedef:
case Decl::TypeAlias: // using foo = bar; [C++11]
if (CGDebugInfo *DI = getModuleDebugInfo())
- DI->EmitAndRetainType(
- getContext().getTypedefType(cast<TypedefNameDecl>(D)));
+ DI->EmitAndRetainType(getContext().getTypedefType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ cast<TypedefNameDecl>(D)));
break;
case Decl::Record:
if (CGDebugInfo *DI = getModuleDebugInfo())
if (cast<RecordDecl>(D)->getDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(D)));
break;
case Decl::Enum:
if (CGDebugInfo *DI = getModuleDebugInfo())
if (cast<EnumDecl>(D)->getDefinition())
- DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<EnumDecl>(D)));
break;
case Decl::HLSLBuffer:
@@ -7934,8 +7940,8 @@ bool CodeGenModule::NeedAllVtablesTypeId() const {
void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
CharUnits Offset,
const CXXRecordDecl *RD) {
- llvm::Metadata *MD =
- CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(T);
VTable->addTypeMetadata(Offset.getQuantity(), MD);
if (CodeGenOpts.SanitizeCfiCrossDso)
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
index 90eafe2..bd2442f 100644
--- a/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -143,7 +143,7 @@ static bool TypeHasMayAlias(QualType QTy) {
/// Check if the given type is a valid base type to be used in access tags.
static bool isValidBaseType(QualType QTy) {
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
// Incomplete types are not valid base access types.
if (!RD)
return false;
@@ -311,7 +311,7 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// This also covers anonymous structs and unions, which have a different
// compatibility rule, but it doesn't matter because you can never have a
// pointer to an anonymous struct or union.
- if (!RT->getDecl()->getDeclName())
+ if (!RT->getOriginalDecl()->getDeclName())
return getAnyPtr(PtrDepth);
// For non-builtin types use the mangled name of the canonical type.
@@ -333,14 +333,15 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// Enum types are distinct types. In C++ they have "underlying types",
// however they aren't related for TBAA.
if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ const EnumDecl *ED = ETy->getOriginalDecl()->getDefinitionOrSelf();
if (!Features.CPlusPlus)
- return getTypeInfo(ETy->getDecl()->getIntegerType());
+ return getTypeInfo(ED->getIntegerType());
// In C++ mode, types have linkage, so we can rely on the ODR and
// on their mangled names, if they're external.
// TODO: Is there a way to get a program-wide unique name for a
// decl with local linkage or no linkage?
- if (!ETy->getDecl()->isExternallyVisible())
+ if (!ED->isExternallyVisible())
return getChar();
SmallString<256> OutName;
@@ -433,7 +434,7 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
llvm::MDBuilder::TBAAStructField(BaseOffset, Size, TBAATag));
return true;
}
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
if (RD->hasFlexibleArrayMember())
return false;
@@ -514,7 +515,7 @@ CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (auto *TTy = dyn_cast<RecordType>(Ty)) {
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
using TBAAStructField = llvm::MDBuilder::TBAAStructField;
SmallVector<TBAAStructField, 4> Fields;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index c98503e..f2a0a64 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -229,12 +229,13 @@ bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ CanQualType T = CGM.getContext().getCanonicalTagType(TD);
// If this is an enum being completed, then we flush all non-struct types from
// the cache. This allows function types and other things that may be derived
// from the enum to be recomputed.
if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
// Only flush the cache if we've actually already converted this type.
- if (TypeCache.count(ED->getTypeForDecl())) {
+ if (TypeCache.count(T->getTypePtr())) {
// Okay, we formed some types based on this. We speculated that the enum
// would be lowered to i32, so we only need to flush the cache if this
// didn't happen.
@@ -255,7 +256,7 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
// Only complete it if we converted it already. If we haven't converted it
// yet, we'll just do it lazily.
- if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ if (RecordDeclTypes.count(T.getTypePtr()))
ConvertRecordDeclType(RD);
// If necessary, provide the full definition of a type only used with a
@@ -265,7 +266,7 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
}
void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
- QualType T = Context.getRecordType(RD);
+ CanQualType T = Context.getCanonicalTagType(RD);
T = Context.getCanonicalType(T);
const Type *Ty = T.getTypePtr();
@@ -311,11 +312,11 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
// Force conversion of all the relevant record types, to make sure
// we re-convert the FunctionType when appropriate.
if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
+ ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
+ ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
SkippedLayout = true;
@@ -373,7 +374,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// RecordTypes are cached and processed specially.
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return ConvertRecordDeclType(RT->getDecl());
+ return ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
llvm::Type *CachedType = nullptr;
auto TCI = TypeCache.find(Ty);
@@ -699,7 +700,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
case Type::Enum: {
- const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ const EnumDecl *ED =
+ cast<EnumType>(Ty)->getOriginalDecl()->getDefinitionOrSelf();
if (ED->isCompleteDefinition() || ED->isFixed())
return ConvertType(ED->getIntegerType());
// Return a placeholder 'i32' type. This can be changed later when the
@@ -725,8 +727,10 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::MemberPointer: {
auto *MPTy = cast<MemberPointerType>(Ty);
if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
- auto *C = MPTy->getMostRecentCXXRecordDecl()->getTypeForDecl();
- auto Insertion = RecordsWithOpaqueMemberPointers.try_emplace(C);
+ CanQualType T = CGM.getContext().getCanonicalTagType(
+ MPTy->getMostRecentCXXRecordDecl());
+ auto Insertion =
+ RecordsWithOpaqueMemberPointers.try_emplace(T.getTypePtr());
if (Insertion.second)
Insertion.first->second = llvm::StructType::create(getLLVMContext());
ResultType = Insertion.first->second;
@@ -789,7 +793,7 @@ bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
- const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+ const Type *Key = Context.getCanonicalTagType(RD).getTypePtr();
llvm::StructType *&Entry = RecordDeclTypes[Key];
@@ -810,7 +814,10 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CRD->bases()) {
if (I.isVirtual()) continue;
- ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
+ ConvertRecordDeclType(I.getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf());
}
}
@@ -830,7 +837,7 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
/// getCGRecordLayout - Return record layout info for the given record decl.
const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
- const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+ const Type *Key = Context.getCanonicalTagType(RD).getTypePtr();
auto I = CGRecordLayouts.find(Key);
if (I != CGRecordLayouts.end())
@@ -869,7 +876,7 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
// Records are non-zero-initializable if they contain any
// non-zero-initializable subobjects.
if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
return isZeroInitializable(RD);
}
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 38aaceb..05fb137 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -2269,6 +2269,11 @@ struct CounterCoverageMappingBuilder
// Track LHS True/False Decision.
const auto DecisionLHS = MCDCBuilder.pop();
+ if (auto Gap =
+ findGapAreaBetween(getEnd(E->getLHS()), getStart(E->getRHS()))) {
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), getRegionCounter(E));
+ }
+
// Counter tracks the right hand side of a logical and operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
@@ -2330,6 +2335,11 @@ struct CounterCoverageMappingBuilder
// Track LHS True/False Decision.
const auto DecisionLHS = MCDCBuilder.pop();
+ if (auto Gap =
+ findGapAreaBetween(getEnd(E->getLHS()), getStart(E->getRHS()))) {
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), getRegionCounter(E));
+ }
+
// Counter tracks the right hand side of a logical or operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
diff --git a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
index 1ed3389..ac56dda 100644
--- a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
@@ -101,7 +101,8 @@ llvm::TargetExtType *HLSLBufferLayoutBuilder::createLayoutType(
const RecordType *RT = RecordTypes.back();
RecordTypes.pop_back();
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
assert((!PackOffsets || Index < PackOffsets->size()) &&
"number of elements in layout struct does not match number of "
"packoffset annotations");
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 5ffc1ed..4ed3775 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -831,7 +831,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
getContext().getMemberPointerType(MPT->getPointeeType(),
- /*Qualifier=*/nullptr,
+ /*Qualifier=*/std::nullopt,
Base->getCanonicalDecl()));
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
@@ -1241,7 +1241,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
QualType SrcType = getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent());
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent());
return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM);
}
@@ -1397,8 +1397,9 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// to pass to the deallocation function.
// Grab the vtable pointer as an intptr_t*.
- auto *ClassDecl =
- cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
+ auto *ClassDecl = cast<CXXRecordDecl>(
+ ElementType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
@@ -1484,7 +1485,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
// trivial destructor (or isn't a record), we just pass null.
llvm::Constant *Dtor = nullptr;
if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXRecordDecl *Record =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
if (!Record->hasTrivialDestructor()) {
// __cxa_throw is declared to take its destructor as void (*)(void *). We
// must match that if function pointers can be authenticated with a
@@ -1611,7 +1613,8 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
auto *ClassDecl =
- cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
ClassDecl);
@@ -1784,7 +1787,8 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
QualType SrcRecordTy) {
auto *ClassDecl =
- cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
@@ -2037,7 +2041,7 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
llvm::Constant *RTTI =
- CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
+ CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getCanonicalTagType(RD));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
@@ -3776,7 +3780,8 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
if (!Context.getLangOpts().RTTI) return false;
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
if (!RD->hasDefinition())
return false;
@@ -3810,7 +3815,9 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
static bool IsIncompleteClassType(const RecordType *RecordTy) {
- return !RecordTy->getDecl()->isCompleteDefinition();
+ return !RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isCompleteDefinition();
}
/// ContainsIncompleteClassType - Returns whether the given type contains an
@@ -3836,9 +3843,7 @@ static bool ContainsIncompleteClassType(QualType Ty) {
if (const MemberPointerType *MemberPointerTy =
dyn_cast<MemberPointerType>(Ty)) {
// Check if the class type is incomplete.
- const auto *ClassType = cast<RecordType>(
- MemberPointerTy->getMostRecentCXXRecordDecl()->getTypeForDecl());
- if (IsIncompleteClassType(ClassType))
+ if (!MemberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
return true;
return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
@@ -3867,8 +3872,9 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
return false;
// Check that the class is dynamic iff the base is.
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!BaseDecl->isEmpty() &&
BaseDecl->isDynamicClass() != RD->isDynamicClass())
return false;
@@ -3947,7 +3953,8 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
case Type::Record: {
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!RD->hasDefinition() || !RD->getNumBases()) {
VTableName = ClassTypeInfo;
@@ -4069,7 +4076,8 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::LinkOnceODRLinkage;
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Record->getOriginalDecl())->getDefinitionOrSelf();
if (RD->hasAttr<WeakAttr>())
return llvm::GlobalValue::WeakODRLinkage;
if (CGM.getTriple().isWindowsItaniumEnvironment())
@@ -4233,7 +4241,8 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Record: {
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!RD->hasDefinition() || !RD->getNumBases()) {
// We don't need to emit any fields.
break;
@@ -4280,7 +4289,8 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
if (CGM.getTarget().hasPS4DLLImportExport() &&
GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (RD->hasAttr<DLLExportAttr>() ||
CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
@@ -4384,8 +4394,9 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
unsigned Flags = 0;
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Base->isVirtual()) {
// Mark the virtual base as seen.
@@ -4485,7 +4496,9 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
auto *BaseDecl =
- cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ Base.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
int64_t OffsetFlags = 0;
@@ -4575,9 +4588,8 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// attributes of the type pointed to.
unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
- const auto *ClassType =
- cast<RecordType>(Ty->getMostRecentCXXRecordDecl()->getTypeForDecl());
- if (IsIncompleteClassType(ClassType))
+ const auto *RD = Ty->getMostRecentCXXRecordDecl();
+ if (!RD->hasDefinition())
Flags |= PTI_ContainingClassIncomplete;
llvm::Type *UnsignedIntLTy =
@@ -4595,8 +4607,8 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// __context is a pointer to an abi::__class_type_info corresponding to the
// class type containing the member pointed to
// (e.g., the "A" in "int A::*").
- Fields.push_back(
- ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(T));
}
llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
@@ -5176,7 +5188,7 @@ ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
.getDecl());
llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD);
QualType funcType = CGM.getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent());
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent());
return CGM.getMemberFunctionPointer(thunk, funcType);
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index e8d2451..88f0648 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -876,7 +876,8 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// it indirectly. Prior to MSVC version 19.14, passing overaligned
// arguments was not supported and resulted in a compiler error. In 19.14
// and later versions, such arguments are now passed indirectly.
- TypeInfo Info = getContext().getTypeInfo(RD->getTypeForDecl());
+ TypeInfo Info =
+ getContext().getTypeInfo(getContext().getCanonicalTagType(RD));
if (Info.isAlignRequired() && Info.Align > 4)
return RAA_Indirect;
@@ -2924,15 +2925,15 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP,
if (!FD)
FD = cast<FieldDecl>(*cast<IndirectFieldDecl>(MPD)->chain_begin());
const CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getParent());
- RD = RD->getMostRecentNonInjectedDecl();
+ RD = RD->getMostRecentDecl();
C = EmitMemberDataPointer(RD, FieldOffset);
}
if (!MemberPointerPath.empty()) {
const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext());
const MemberPointerType *SrcTy =
- Ctx.getMemberPointerType(DstTy->getPointeeType(), /*Qualifier=*/nullptr,
- SrcRD)
+ Ctx.getMemberPointerType(DstTy->getPointeeType(),
+ /*Qualifier=*/std::nullopt, SrcRD)
->castAs<MemberPointerType>();
bool DerivedMember = MP.isMemberPointerToDerivedMember();
@@ -2969,7 +2970,7 @@ MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
assert(MD->isInstance() && "Member function must not be static!");
CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
- const CXXRecordDecl *RD = MD->getParent()->getMostRecentNonInjectedDecl();
+ const CXXRecordDecl *RD = MD->getParent()->getMostRecentDecl();
CodeGenTypes &Types = CGM.getTypes();
unsigned VBTableIndex = 0;
@@ -3690,7 +3691,7 @@ struct MSRTTIBuilder {
MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD)
: CGM(ABI.CGM), Context(CGM.getContext()),
VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD),
- Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))),
+ Linkage(getLinkageForRTTI(CGM.getContext().getCanonicalTagType(RD))),
ABI(ABI) {}
llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes);
@@ -3864,7 +3865,7 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
// Initialize the BaseClassDescriptor.
llvm::Constant *Fields[] = {
ABI.getImageRelativeConstant(
- ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))),
+ ABI.getAddrOfRTTIDescriptor(Context.getCanonicalTagType(Class.RD))),
llvm::ConstantInt::get(CGM.IntTy, Class.NumBases),
llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase),
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
@@ -3911,7 +3912,7 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
llvm::ConstantInt::get(CGM.IntTy, OffsetToTop),
llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset),
ABI.getImageRelativeConstant(
- CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))),
+ CGM.GetAddrOfRTTIDescriptor(Context.getCanonicalTagType(RD))),
ABI.getImageRelativeConstant(getClassHierarchyDescriptor()),
ABI.getImageRelativeConstant(COL),
};
@@ -4082,7 +4083,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
const CXXRecordDecl *RD = CD->getParent();
- QualType RecordTy = getContext().getRecordType(RD);
+ CanQualType RecordTy = getContext().getCanonicalTagType(RD);
llvm::Function *ThunkFn = llvm::Function::Create(
ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule());
ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>(
@@ -4318,7 +4319,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
// Turn our record back into a pointer if the exception object is a
// pointer.
- QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0);
+ CanQualType RTTITy = Context.getCanonicalTagType(Class.RD);
if (IsPointer)
RTTITy = Context.getPointerType(RTTITy);
CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase,
@@ -4469,8 +4470,8 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
std::pair<llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) {
- std::tie(This, std::ignore, RD) =
- performBaseAdjustment(CGF, This, QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGF.getContext().getCanonicalTagType(RD);
+ std::tie(This, std::ignore, RD) = performBaseAdjustment(CGF, This, T);
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp
index 10f9f20b..de58e0d 100644
--- a/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -66,9 +66,9 @@ void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
// Record types.
if (auto recType = type->getAs<RecordType>()) {
- addTypedData(recType->getDecl(), begin);
+ addTypedData(recType->getOriginalDecl(), begin);
- // Array types.
+ // Array types.
} else if (type->isArrayType()) {
// Incomplete array types (flexible array members?) don't provide
// data to lay out, and the other cases shouldn't be possible.
@@ -814,7 +814,7 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
bool forReturn) {
unsigned IndirectAS = CGM.getDataLayout().getAllocaAddrSpace();
if (auto recordType = dyn_cast<RecordType>(type)) {
- auto record = recordType->getDecl();
+ auto record = recordType->getOriginalDecl();
auto &layout = CGM.getContext().getASTRecordLayout(record);
if (mustPassRecordIndirectly(CGM, record))
@@ -822,7 +822,8 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
/*AddrSpace=*/IndirectAS, /*byval=*/false);
SwiftAggLowering lowering(CGM);
- lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
+ lowering.addTypedData(recordType->getOriginalDecl(), CharUnits::Zero(),
+ layout);
lowering.finish();
return classifyExpandedType(lowering, forReturn, layout.getAlignment(),
diff --git a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
index 33a8d8f..1a1889a 100644
--- a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
@@ -246,35 +246,26 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
llvm::FunctionType *LLVMFuncTy =
cast<llvm::FunctionType>(ConvertType(QualType(FuncTy, 0)));
+ bool VarArg = LLVMFuncTy->isVarArg();
unsigned NParams = LLVMFuncTy->getNumParams();
std::vector<Value *> Args;
- Args.reserve(NParams + 3);
+ Args.reserve(NParams + 3 + VarArg);
// The only real argument is the FuncRef
Args.push_back(FuncRef);
// Add the type information
- auto addType = [this, &Args](llvm::Type *T) {
- if (T->isVoidTy()) {
- // Do nothing
- } else if (T->isFloatingPointTy()) {
- Args.push_back(ConstantFP::get(T, 0));
- } else if (T->isIntegerTy()) {
- Args.push_back(ConstantInt::get(T, 0));
- } else if (T->isPointerTy()) {
- Args.push_back(ConstantPointerNull::get(llvm::PointerType::get(
- getLLVMContext(), T->getPointerAddressSpace())));
- } else {
- // TODO: Handle reference types. For now, we reject them in Sema.
- llvm_unreachable("Unhandled type");
- }
- };
-
- addType(LLVMFuncTy->getReturnType());
+ llvm::Type *RetType = LLVMFuncTy->getReturnType();
+ if (!RetType->isVoidTy()) {
+ Args.push_back(PoisonValue::get(RetType));
+ }
// The token type indicates the boundary between return types and param
// types.
Args.push_back(PoisonValue::get(llvm::Type::getTokenTy(getLLVMContext())));
for (unsigned i = 0; i < NParams; i++) {
- addType(LLVMFuncTy->getParamType(i));
+ Args.push_back(PoisonValue::get(LLVMFuncTy->getParamType(i)));
+ }
+ if (VarArg) {
+ Args.push_back(PoisonValue::get(Builder.getPtrTy()));
}
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_test_func);
return Builder.CreateCall(Callee, Args);
diff --git a/clang/lib/CodeGen/TargetBuiltins/X86.cpp b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
index e23d19d..b508709 100644
--- a/clang/lib/CodeGen/TargetBuiltins/X86.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
@@ -1051,18 +1051,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
/*NegAcc*/ true);
- case X86::BI__builtin_ia32_vfmaddph:
- case X86::BI__builtin_ia32_vfmaddps:
- case X86::BI__builtin_ia32_vfmaddpd:
- case X86::BI__builtin_ia32_vfmaddph256:
- case X86::BI__builtin_ia32_vfmaddps256:
- case X86::BI__builtin_ia32_vfmaddpd256:
case X86::BI__builtin_ia32_vfmaddph512_mask:
case X86::BI__builtin_ia32_vfmaddph512_maskz:
case X86::BI__builtin_ia32_vfmaddph512_mask3:
- case X86::BI__builtin_ia32_vfmaddbf16128:
- case X86::BI__builtin_ia32_vfmaddbf16256:
- case X86::BI__builtin_ia32_vfmaddbf16512:
case X86::BI__builtin_ia32_vfmaddps512_mask:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index b82c469..289f8a9 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -375,7 +375,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn,
if (!passAsAggregateType(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
@@ -496,7 +496,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn,
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases())
if (!Self(Self, I.getType()))
@@ -548,7 +548,8 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (!passAsAggregateType(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
@@ -744,7 +745,7 @@ bool AArch64ABIInfo::passAsPureScalableType(
return false;
// Pure scalable types are never unions and never contain unions.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion())
return false;
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 47a552a..41bccbb 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -96,7 +96,7 @@ unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!RD->hasFlexibleArrayMember());
for (const FieldDecl *Field : RD->fields()) {
@@ -153,7 +153,7 @@ ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyReturnType(RetTy);
}
@@ -246,7 +246,7 @@ ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyArgumentType(Ty);
}
diff --git a/clang/lib/CodeGen/Targets/ARC.cpp b/clang/lib/CodeGen/Targets/ARC.cpp
index c8db7e8..ace524e 100644
--- a/clang/lib/CodeGen/Targets/ARC.cpp
+++ b/clang/lib/CodeGen/Targets/ARC.cpp
@@ -106,13 +106,14 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectByValue(Ty);
// Ignore empty structs/unions.
diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp
index 68f9e01..532ba4c 100644
--- a/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/clang/lib/CodeGen/Targets/ARM.cpp
@@ -383,7 +383,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
}
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -516,7 +516,7 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
if (!RT) return false;
// Ignore records with flexible arrays.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
@@ -593,7 +593,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
@@ -718,7 +719,7 @@ bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
return false;
return containsAnyFP16Vectors(AT->getElementType());
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
diff --git a/clang/lib/CodeGen/Targets/BPF.cpp b/clang/lib/CodeGen/Targets/BPF.cpp
index 880a8910..87d50e6 100644
--- a/clang/lib/CodeGen/Targets/BPF.cpp
+++ b/clang/lib/CodeGen/Targets/BPF.cpp
@@ -48,7 +48,7 @@ public:
}
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -70,7 +70,8 @@ public:
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = RetTy->getAs<BitIntType>())
diff --git a/clang/lib/CodeGen/Targets/CSKY.cpp b/clang/lib/CodeGen/Targets/CSKY.cpp
index ef26d48..7e5a16f 100644
--- a/clang/lib/CodeGen/Targets/CSKY.cpp
+++ b/clang/lib/CodeGen/Targets/CSKY.cpp
@@ -116,7 +116,7 @@ ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// All integral types are promoted to XLen width, unless passed on the
// stack.
diff --git a/clang/lib/CodeGen/Targets/Hexagon.cpp b/clang/lib/CodeGen/Targets/Hexagon.cpp
index 2976657..0c42342 100644
--- a/clang/lib/CodeGen/Targets/Hexagon.cpp
+++ b/clang/lib/CodeGen/Targets/Hexagon.cpp
@@ -98,7 +98,7 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 64)
@@ -161,7 +161,8 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Size > 64 && RetTy->isBitIntType())
return getNaturalAlignIndirect(
diff --git a/clang/lib/CodeGen/Targets/Lanai.cpp b/clang/lib/CodeGen/Targets/Lanai.cpp
index 6f75bd5..08cb360 100644
--- a/clang/lib/CodeGen/Targets/Lanai.cpp
+++ b/clang/lib/CodeGen/Targets/Lanai.cpp
@@ -102,7 +102,8 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectResult(Ty, /*ByVal=*/true, State);
// Ignore empty structs/unions.
@@ -125,7 +126,7 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
// Treat an enum type as its underlying type.
if (const auto *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
bool InReg = shouldUseInReg(Ty, State);
diff --git a/clang/lib/CodeGen/Targets/LoongArch.cpp b/clang/lib/CodeGen/Targets/LoongArch.cpp
index 7640f37..af863e6 100644
--- a/clang/lib/CodeGen/Targets/LoongArch.cpp
+++ b/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -150,7 +150,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
// Non-zero-length arrays of empty records make the struct ineligible to be
// passed via FARs in C++.
if (const auto *RTy = EltTy->getAs<RecordType>()) {
- if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
@@ -169,7 +169,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
// copy constructor are not eligible for the FP calling convention.
if (getRecordArgABI(Ty, CGT.getCXXABI()))
return false;
- const RecordDecl *RD = RTy->getDecl();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
if (isEmptyRecord(getContext(), Ty, true, true) &&
(!RD->isUnion() || !isa<CXXRecordDecl>(RD)))
return true;
@@ -181,7 +181,9 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &B : CXXRD->bases()) {
const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ B.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!detectFARsEligibleStructHelper(
B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
Field1Ty, Field1Off, Field2Ty, Field2Off))
@@ -369,7 +371,7 @@ ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// All integral types are promoted to GRLen width.
if (Size < GRLen && Ty->isIntegralOrEnumerationType())
diff --git a/clang/lib/CodeGen/Targets/Mips.cpp b/clang/lib/CodeGen/Targets/Mips.cpp
index c025f73..e12a34c 100644
--- a/clang/lib/CodeGen/Targets/Mips.cpp
+++ b/clang/lib/CodeGen/Targets/Mips.cpp
@@ -161,7 +161,7 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
return llvm::StructType::get(getVMContext(), ArgList);
}
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
@@ -242,7 +242,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Make sure we pass indirectly things that are too large.
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -265,7 +265,7 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
unsigned FieldCnt = Layout.getFieldCount();
@@ -333,7 +333,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Make sure we pass indirectly things that are too large.
if (const auto *EIT = RetTy->getAs<BitIntType>())
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 82bdfe2..e874617 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -133,7 +133,7 @@ bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
const auto *RT = T->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
@@ -174,7 +174,7 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect());
@@ -183,7 +183,7 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Return aggregates type as indirect by value
if (isAggregateTypeForABI(Ty)) {
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 4df4c9f..38e7639 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -154,7 +154,7 @@ public:
bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (getContext().isPromotableIntegerType(Ty))
@@ -295,7 +295,9 @@ void AIXTargetCodeGenInfo::setTargetAttributes(
unsigned Alignment = Context.toBits(Context.getDeclAlign(D)) / 8;
const auto *Ty = VarD->getType().getTypePtr();
const RecordDecl *RDecl =
- Ty->isRecordType() ? Ty->getAs<RecordType>()->getDecl() : nullptr;
+ Ty->isRecordType()
+ ? Ty->getAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf()
+ : nullptr;
bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage();
auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) {
@@ -707,7 +709,7 @@ bool
PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (isPromotableIntegerTypeForABI(Ty))
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index a7f9298..e42bd6c 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -228,7 +228,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
// Non-zero-length arrays of empty records make the struct ineligible for
// the FP calling convention in C++.
if (const auto *RTy = EltTy->getAs<RecordType>()) {
- if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
@@ -250,7 +250,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
return false;
if (isEmptyRecord(getContext(), Ty, true, true))
return true;
- const RecordDecl *RD = RTy->getDecl();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
// Unions aren't eligible unless they're empty (which is caught above).
if (RD->isUnion())
return false;
@@ -259,7 +259,9 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &B : CXXRD->bases()) {
const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ B.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
Field1Ty, Field1Off, Field2Ty,
@@ -673,7 +675,7 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// All integral types are promoted to XLen width
if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index d952c6e..237aea7 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -119,7 +119,7 @@ ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
return DefaultABIInfo::classifyReturnType(RetTy);
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyReturnType(RetTy);
}
@@ -187,7 +187,7 @@ ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const {
RAA == CGCXXABI::RAA_DirectInMemory);
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyArgumentType(Ty);
}
@@ -432,7 +432,7 @@ static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
case SpirvOperandKind::TypeId: {
QualType TypeOperand = Operand.getResultType();
if (auto *RT = TypeOperand->getAs<RecordType>()) {
- auto *RD = RT->getDecl();
+ auto *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(RD->isCompleteDefinition() &&
"Type completion should have been required in Sema");
diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp
index 9642196..1547bed 100644
--- a/clang/lib/CodeGen/Targets/Sparc.cpp
+++ b/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -238,7 +238,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Integer types smaller than a register are extended.
if (Size < 64 && Ty->isIntegerType())
diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp
index 6ea6c7a..38cc4d3 100644
--- a/clang/lib/CodeGen/Targets/SystemZ.cpp
+++ b/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -146,7 +146,7 @@ public:
bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
@@ -211,7 +211,7 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
const RecordType *RT = Ty->getAs<RecordType>();
if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
QualType Found;
// If this is a C++ record, check the bases first.
@@ -455,7 +455,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
// Structures with flexible arrays have variable length, so really
// fail the size test above.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
/*ByVal=*/false);
@@ -526,7 +526,7 @@ bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
return true;
if (const auto *RecordTy = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RecordTy->getDecl();
+ const RecordDecl *RD = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (CXXRD->hasDefinition())
for (const auto &I : CXXRD->bases())
diff --git a/clang/lib/CodeGen/Targets/WebAssembly.cpp b/clang/lib/CodeGen/Targets/WebAssembly.cpp
index 9217c78..ac8dcd2 100644
--- a/clang/lib/CodeGen/Targets/WebAssembly.cpp
+++ b/clang/lib/CodeGen/Targets/WebAssembly.cpp
@@ -118,7 +118,8 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
const RecordType *RT = Ty->getAs<RecordType>();
assert(RT);
bool HasBitField = false;
- for (auto *Field : RT->getDecl()->fields()) {
+ for (auto *Field :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
if (Field->isBitField()) {
HasBitField = true;
break;
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index abb9148..d3431aa 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -359,7 +359,8 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
// Structure types are passed in register if all fields would be
// passed in a register.
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
// Empty fields are ignored.
if (isEmptyField(Context, FD, true))
continue;
@@ -429,9 +430,9 @@ bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
uint64_t Size = 0;
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (!IsWin32StructABI) {
// On non-Windows, we have to conservatively match our old bitcode
// prototypes in order to be ABI-compatible at the bitcode level.
@@ -509,7 +510,9 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (isAggregateTypeForABI(RetTy)) {
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
// Structures with flexible arrays are always indirect.
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember())
return getIndirectReturnResult(RetTy, State);
}
@@ -554,7 +557,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
@@ -796,7 +799,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
// FIXME: This should not be byval!
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectResult(Ty, true, State);
// Ignore empty structs/unions on non-Windows.
@@ -831,7 +835,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
unsigned AlignInBits = 0;
if (RT) {
const ASTRecordLayout &Layout =
- getContext().getASTRecordLayout(RT->getDecl());
+ getContext().getASTRecordLayout(RT->getOriginalDecl());
AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
} else if (TI.isAlignRequired()) {
AlignInBits = TI.Align;
@@ -883,7 +887,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
bool InReg = shouldPrimitiveUseInReg(Ty, State);
@@ -1847,7 +1851,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
if (const EnumType *ET = Ty->getAs<EnumType>()) {
// Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
+ classify(ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType(),
+ OffsetBase, Lo, Hi, isNamedArg);
return;
}
@@ -2053,7 +2058,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
if (getRecordArgABI(RT, getCXXABI()))
return;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// Assume variable sized types are passed in memory.
if (RD->hasFlexibleArrayMember())
@@ -2070,7 +2075,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Classify this field.
//
@@ -2184,7 +2191,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Ty->isBitIntType())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
@@ -2226,7 +2233,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
!Ty->isBitIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
@@ -2347,7 +2354,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// If this is a C++ record, check the bases first.
@@ -2356,7 +2363,9 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// If the base is after the span we care about, ignore it.
unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
@@ -2637,7 +2646,8 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const {
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (RetTy->isIntegralOrEnumerationType() &&
isPromotableIntegerTypeForABI(RetTy))
@@ -2787,7 +2797,7 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Ty->isIntegralOrEnumerationType() &&
isPromotableIntegerTypeForABI(Ty))
@@ -2866,14 +2876,15 @@ ABIArgInfo
X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
unsigned &NeededSSE,
unsigned &MaxVectorWidth) const {
- auto RT = Ty->getAs<RecordType>();
- assert(RT && "classifyRegCallStructType only valid with struct types");
+ auto *RD = cast<RecordType>(Ty.getCanonicalType())
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RD->hasFlexibleArrayMember())
return getIndirectReturnResult(Ty);
// Sum up bases
- if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->isDynamicClass()) {
NeededInt = NeededSSE = 0;
return getIndirectReturnResult(Ty);
@@ -2889,7 +2900,7 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
}
// Sum up members
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD : RD->fields()) {
QualType MTy = FD->getType();
if (MTy->isRecordType() && !MTy->isUnionType()) {
if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
@@ -3313,7 +3324,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getIgnore();
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
TypeInfo Info = getContext().getTypeInfo(Ty);
uint64_t Width = Info.Width;
@@ -3327,7 +3338,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
RAA == CGCXXABI::RAA_DirectInMemory);
}
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
/*ByVal=*/false);
}
diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp
index b7824bd..aa6947b 100644
--- a/clang/lib/CodeGen/Targets/XCore.cpp
+++ b/clang/lib/CodeGen/Targets/XCore.cpp
@@ -379,7 +379,7 @@ static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
// We collect all encoded fields and order as necessary.
bool IsRecursive = false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
if (RD && !RD->field_empty()) {
// An incomplete TypeString stub is placed in the cache for this RecordType
// so that recursive calls to this RecordType will use it whilst building a
@@ -428,7 +428,7 @@ static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
Enc += "){";
// We collect all encoded enumerations and order them alphanumerically.
- if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
+ if (const EnumDecl *ED = ET->getOriginalDecl()->getDefinition()) {
SmallVector<FieldEncoding, 16> FE;
for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
++I) {
diff --git a/clang/lib/Driver/Compilation.cpp b/clang/lib/Driver/Compilation.cpp
index a39952e..4e30031 100644
--- a/clang/lib/Driver/Compilation.cpp
+++ b/clang/lib/Driver/Compilation.cpp
@@ -232,11 +232,6 @@ static bool ActionFailed(const Action *A,
return false;
}
-static bool InputsOk(const Command &C,
- const FailingCommandList &FailingCommands) {
- return !ActionFailed(&C.getSource(), FailingCommands);
-}
-
void Compilation::ExecuteJobs(const JobList &Jobs,
FailingCommandList &FailingCommands,
bool LogOnly) const {
@@ -245,7 +240,7 @@ void Compilation::ExecuteJobs(const JobList &Jobs,
// In all but CLMode, execute all the jobs unless the necessary inputs for the
// job is missing due to previous failures.
for (const auto &Job : Jobs) {
- if (!InputsOk(Job, FailingCommands))
+ if (ActionFailed(&Job.getSource(), FailingCommands))
continue;
const Command *FailingCommand = nullptr;
if (int Res = ExecuteCommand(Job, FailingCommand, LogOnly)) {
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index baa2c8c..76dde0d 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -273,9 +273,12 @@ std::string riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// Clang does not yet support MULTILIB_REUSE, so we use `rv{XLEN}imafdc`
// instead of `rv{XLEN}gc` though they are (currently) equivalent.
- // 1. If `-march=` is specified, use it.
- if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
- return A->getValue();
+ // 1. If `-march=` is specified, use it unless the value is "unset".
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ StringRef MArch = A->getValue();
+ if (MArch != "unset")
+ return MArch.str();
+ }
// 2. Get march (isa string) based on `-mcpu=`
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
@@ -300,7 +303,7 @@ std::string riscv::getRISCVArch(const llvm::opt::ArgList &Args,
StringRef MArch = llvm::RISCV::getMArchFromMcpu(CPU);
// Bypass if target cpu's default march is empty.
- if (MArch != "")
+ if (!MArch.empty())
return MArch.str();
}
diff --git a/clang/lib/ExtractAPI/DeclarationFragments.cpp b/clang/lib/ExtractAPI/DeclarationFragments.cpp
index 51a6f6b..541af6d 100644
--- a/clang/lib/ExtractAPI/DeclarationFragments.cpp
+++ b/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -205,45 +205,39 @@ DeclarationFragments::getStructureTypeFragment(const RecordDecl *Record) {
// Build declaration fragments for NNS recursively so that we have the USR for
// every part in a qualified name, and also leaves the actual underlying type
// cleaner for its own fragment.
-DeclarationFragments
-DeclarationFragmentsBuilder::getFragmentsForNNS(const NestedNameSpecifier *NNS,
- ASTContext &Context,
- DeclarationFragments &After) {
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForNNS(
+ NestedNameSpecifier NNS, ASTContext &Context, DeclarationFragments &After) {
DeclarationFragments Fragments;
- if (NNS->getPrefix())
- Fragments.append(getFragmentsForNNS(NNS->getPrefix(), Context, After));
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- Fragments.append(NNS->getAsIdentifier()->getName(),
- DeclarationFragments::FragmentKind::Identifier);
- break;
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ return Fragments;
- case NestedNameSpecifier::Namespace: {
- const NamespaceBaseDecl *NS = NNS->getAsNamespace();
- if (const auto *Namespace = dyn_cast<NamespaceDecl>(NS);
- Namespace && Namespace->isAnonymousNamespace())
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
+ Fragments.append(getFragmentsForNNS(Prefix, Context, After));
+ if (const auto *NS = dyn_cast<NamespaceDecl>(Namespace);
+ NS && NS->isAnonymousNamespace())
return Fragments;
SmallString<128> USR;
- index::generateUSRForDecl(NS, USR);
- Fragments.append(NS->getName(),
- DeclarationFragments::FragmentKind::Identifier, USR, NS);
+ index::generateUSRForDecl(Namespace, USR);
+ Fragments.append(Namespace->getName(),
+ DeclarationFragments::FragmentKind::Identifier, USR,
+ Namespace);
break;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// The global specifier `::` at the beginning. No stored value.
break;
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
// Microsoft's `__super` specifier.
Fragments.append("__super", DeclarationFragments::FragmentKind::Keyword);
break;
- case NestedNameSpecifier::TypeSpec: {
- const Type *T = NNS->getAsType();
+ case NestedNameSpecifier::Kind::Type: {
// FIXME: Handle C++ template specialization type
- Fragments.append(getFragmentsForType(T, Context, After));
+ Fragments.append(getFragmentsForType(NNS.getAsType(), Context, After));
break;
}
}
@@ -273,26 +267,6 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
return Fragments;
}
- // An ElaboratedType is a sugar for types that are referred to using an
- // elaborated keyword, e.g., `struct S`, `enum E`, or (in C++) via a
- // qualified name, e.g., `N::M::type`, or both.
- if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(T)) {
- ElaboratedTypeKeyword Keyword = ET->getKeyword();
- if (Keyword != ElaboratedTypeKeyword::None) {
- Fragments
- .append(ElaboratedType::getKeywordName(Keyword),
- DeclarationFragments::FragmentKind::Keyword)
- .appendSpace();
- }
-
- if (const NestedNameSpecifier *NNS = ET->getQualifier())
- Fragments.append(getFragmentsForNNS(NNS, Context, After));
-
- // After handling the elaborated keyword or qualified name, build
- // declaration fragments for the desugared underlying type.
- return Fragments.append(getFragmentsForType(ET->desugar(), Context, After));
- }
-
// If the type is a typedefed type, get the underlying TypedefNameDecl for a
// direct reference to the typedef instead of the wrapped type.
@@ -303,7 +277,18 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
TypedefUnderlyingTypeResolver TypedefResolver(Context);
std::string USR = TypedefResolver.getUSRForType(QualType(T, 0));
- if (T->isObjCIdType()) {
+ if (ElaboratedTypeKeyword Keyword = TypedefTy->getKeyword();
+ Keyword != ElaboratedTypeKeyword::None) {
+ Fragments
+ .append(KeywordHelpers::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ }
+
+ Fragments.append(
+ getFragmentsForNNS(TypedefTy->getQualifier(), Context, After));
+
+ if (TypedefTy->isObjCIdType()) {
return Fragments.append(Decl->getName(),
DeclarationFragments::FragmentKind::Keyword);
}
@@ -396,14 +381,26 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
if (const TemplateSpecializationType *TemplSpecTy =
dyn_cast<TemplateSpecializationType>(T)) {
- const auto TemplName = TemplSpecTy->getTemplateName();
+ if (ElaboratedTypeKeyword Keyword = TemplSpecTy->getKeyword();
+ Keyword != ElaboratedTypeKeyword::None)
+ Fragments
+ .append(KeywordHelpers::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ auto TemplName = TemplSpecTy->getTemplateName();
std::string Str;
raw_string_ostream Stream(Str);
TemplName.print(Stream, Context.getPrintingPolicy(),
TemplateName::Qualified::AsWritten);
SmallString<64> USR("");
+ if (const auto *QTN = TemplName.getAsQualifiedTemplateName()) {
+ Fragments.append(getFragmentsForNNS(QTN->getQualifier(), Context, After));
+ TemplName = QTN->getUnderlyingTemplate();
+ }
if (const auto *TemplDecl = TemplName.getAsTemplateDecl())
index::generateUSRForDecl(TemplDecl, USR);
+ // FIXME: Handle other kinds of TemplateNames.
return Fragments
.append(Str, DeclarationFragments::FragmentKind::TypeIdentifier, USR)
@@ -413,14 +410,19 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
.append(">", DeclarationFragments::FragmentKind::Text);
}
- // Everything we care about has been handled now, reduce to the canonical
- // unqualified base type.
- QualType Base = T->getCanonicalTypeUnqualified();
-
// If the base type is a TagType (struct/interface/union/class/enum), let's
// get the underlying Decl for better names and USRs.
- if (const TagType *TagTy = dyn_cast<TagType>(Base)) {
- const TagDecl *Decl = TagTy->getDecl();
+ if (const TagType *TagTy = dyn_cast<TagType>(T)) {
+ if (ElaboratedTypeKeyword Keyword = TagTy->getKeyword();
+ Keyword != ElaboratedTypeKeyword::None)
+ Fragments
+ .append(KeywordHelpers::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ Fragments.append(getFragmentsForNNS(TagTy->getQualifier(), Context, After));
+
+ const TagDecl *Decl = TagTy->getOriginalDecl();
// Anonymous decl, skip this fragment.
if (Decl->getName().empty())
return Fragments.append("{ ... }",
@@ -432,6 +434,10 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
TagUSR, Decl);
}
+ // Everything we care about has been handled now, reduce to the canonical
+ // unqualified base type.
+ QualType Base = T->getCanonicalTypeUnqualified();
+
// If the base type is an ObjCInterfaceType, use the underlying
// ObjCInterfaceDecl for the true USR.
if (const auto *ObjCIT = dyn_cast<ObjCInterfaceType>(Base)) {
diff --git a/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp b/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
index 41e4e0c..5adbbc6 100644
--- a/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
+++ b/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
@@ -26,7 +26,7 @@ TypedefUnderlyingTypeResolver::getUnderlyingTypeDecl(QualType Type) const {
if (TypedefTy)
TypeDecl = TypedefTy->getDecl();
if (const TagType *TagTy = Type->getAs<TagType>()) {
- TypeDecl = TagTy->getDecl();
+ TypeDecl = TagTy->getOriginalDecl();
} else if (const ObjCInterfaceType *ObjCITy =
Type->getAs<ObjCInterfaceType>()) {
TypeDecl = ObjCITy->getDecl();
diff --git a/clang/lib/Frontend/ASTConsumers.cpp b/clang/lib/Frontend/ASTConsumers.cpp
index ab8a35a..67c8761 100644
--- a/clang/lib/Frontend/ASTConsumers.cpp
+++ b/clang/lib/Frontend/ASTConsumers.cpp
@@ -97,6 +97,7 @@ namespace {
Out << "Not a DeclContext\n";
} else if (OutputKind == Print) {
PrintingPolicy Policy(D->getASTContext().getLangOpts());
+ Policy.IncludeTagDefinition = true;
D->print(Out, Policy, /*Indentation=*/0, /*PrintInstantiation=*/true);
} else if (OutputKind != None) {
D->dump(Out, OutputKind == DumpFull, OutputFormat);
@@ -112,8 +113,10 @@ namespace {
// FIXME: Support combining -ast-dump-decl-types with -ast-dump-lookups.
if (auto *VD = dyn_cast<ValueDecl>(InnerD))
VD->getType().dump(Out, VD->getASTContext());
- if (auto *TD = dyn_cast<TypeDecl>(InnerD))
- TD->getTypeForDecl()->dump(Out, TD->getASTContext());
+ if (auto *TD = dyn_cast<TypeDecl>(InnerD)) {
+ const ASTContext &Ctx = TD->getASTContext();
+ Ctx.getTypeDeclType(TD)->dump(Out, Ctx);
+ }
}
}
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index a407825..03b08cd 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -396,7 +396,7 @@ void ASTUnit::CacheCodeCompletionResults() {
// Keep track of the type of this completion in an ASTContext-agnostic
// way.
- QualType UsageType = getDeclUsageType(*Ctx, R.Declaration);
+ QualType UsageType = getDeclUsageType(*Ctx, R.Qualifier, R.Declaration);
if (UsageType.isNull()) {
CachedResult.TypeClass = STC_Void;
CachedResult.Type = 0;
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 9f77e62..2ea3ed7 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -3936,47 +3936,18 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Consumer, OPT_fsanitize_ignorelist_EQ, F);
switch (Opts.getClangABICompat()) {
- case LangOptions::ClangABI::Ver3_8:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "3.8");
+#define ABI_VER_MAJOR_MINOR(Major, Minor) \
+ case LangOptions::ClangABI::Ver##Major##_##Minor: \
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, #Major "." #Minor); \
break;
- case LangOptions::ClangABI::Ver4:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "4.0");
+#define ABI_VER_MAJOR(Major) \
+ case LangOptions::ClangABI::Ver##Major: \
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, #Major ".0"); \
break;
- case LangOptions::ClangABI::Ver6:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "6.0");
- break;
- case LangOptions::ClangABI::Ver7:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "7.0");
- break;
- case LangOptions::ClangABI::Ver9:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "9.0");
- break;
- case LangOptions::ClangABI::Ver11:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "11.0");
- break;
- case LangOptions::ClangABI::Ver12:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "12.0");
- break;
- case LangOptions::ClangABI::Ver14:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "14.0");
- break;
- case LangOptions::ClangABI::Ver15:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "15.0");
- break;
- case LangOptions::ClangABI::Ver17:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "17.0");
- break;
- case LangOptions::ClangABI::Ver18:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "18.0");
- break;
- case LangOptions::ClangABI::Ver19:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "19.0");
- break;
- case LangOptions::ClangABI::Ver20:
- GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "20.0");
- break;
- case LangOptions::ClangABI::Latest:
+#define ABI_VER_LATEST(Latest) \
+ case LangOptions::ClangABI::Latest: \
break;
+#include "clang/Basic/ABIVersions.def"
}
if (Opts.getSignReturnAddressScope() ==
@@ -4470,7 +4441,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
StringRef Ver = A->getValue();
std::pair<StringRef, StringRef> VerParts = Ver.split('.');
- unsigned Major, Minor = 0;
+ int Major, Minor = 0;
// Check the version number is valid: either 3.x (0 <= x <= 9) or
// y or y.0 (4 <= y <= current version).
@@ -4482,32 +4453,18 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
!VerParts.second.getAsInteger(10, Minor)
: VerParts.first.size() == Ver.size() || VerParts.second == "0")) {
// Got a valid version number.
- if (Major == 3 && Minor <= 8)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver3_8);
- else if (Major <= 4)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver4);
- else if (Major <= 6)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver6);
- else if (Major <= 7)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver7);
- else if (Major <= 9)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver9);
- else if (Major <= 11)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
- else if (Major <= 12)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver12);
- else if (Major <= 14)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver14);
- else if (Major <= 15)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver15);
- else if (Major <= 17)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver17);
- else if (Major <= 18)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver18);
- else if (Major <= 19)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver19);
- else if (Major <= 20)
- Opts.setClangABICompat(LangOptions::ClangABI::Ver20);
+#define ABI_VER_MAJOR_MINOR(Major_, Minor_) \
+ if (std::tuple(Major, Minor) <= std::tuple(Major_, Minor_)) \
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver##Major_##_##Minor_); \
+ else
+#define ABI_VER_MAJOR(Major_) \
+ if (Major <= Major_) \
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver##Major_); \
+ else
+#define ABI_VER_LATEST(Latest) \
+ { /* Equivalent to latest version - do nothing */ \
+ }
+#include "clang/Basic/ABIVersions.def"
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 8f27553..2b55820 100644
--- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -852,7 +852,7 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
if (!IvarT->getAs<TypedefType>() && IvarT->isRecordType()) {
- RecordDecl *RD = IvarT->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = IvarT->castAs<RecordType>()->getOriginalDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
@@ -865,7 +865,8 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get(RecName));
- QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrStructIMPL =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *Zero = IntegerLiteral::Create(*Context,
@@ -2999,7 +3000,7 @@ QualType RewriteModernObjC::getSuperStructType() {
SuperStructDecl->completeDefinition();
}
- return Context->getTagDeclType(SuperStructDecl);
+ return Context->getCanonicalTagType(SuperStructDecl);
}
QualType RewriteModernObjC::getConstantStringStructType() {
@@ -3032,7 +3033,7 @@ QualType RewriteModernObjC::getConstantStringStructType() {
ConstantStringDecl->completeDefinition();
}
- return Context->getTagDeclType(ConstantStringDecl);
+ return Context->getCanonicalTagType(ConstantStringDecl);
}
/// getFunctionSourceLocation - returns start location of a function
@@ -3637,7 +3638,8 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
return RewriteObjCFieldDeclType(ElemTy, Result);
}
else if (Type->isRecordType()) {
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ RecordDecl *RD =
+ Type->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isCompleteDefinition()) {
if (RD->isStruct())
Result += "\n\tstruct ";
@@ -3660,7 +3662,8 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
}
}
else if (Type->isEnumeralType()) {
- EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
+ EnumDecl *ED =
+ Type->castAs<EnumType>()->getOriginalDecl()->getDefinitionOrSelf();
if (ED->isCompleteDefinition()) {
Result += "\n\tenum ";
Result += ED->getName();
@@ -3732,10 +3735,10 @@ void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDec
TagDecl *TD = nullptr;
if (Type->isRecordType()) {
- TD = Type->castAs<RecordType>()->getDecl();
+ TD = Type->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
}
else if (Type->isEnumeralType()) {
- TD = Type->castAs<EnumType>()->getDecl();
+ TD = Type->castAs<EnumType>()->getOriginalDecl()->getDefinitionOrSelf();
}
if (TD) {
@@ -3793,7 +3796,7 @@ QualType RewriteModernObjC::SynthesizeBitfieldGroupStructType(
false, ICIS_NoInit));
}
RD->completeDefinition();
- return Context->getTagDeclType(RD);
+ return Context->getCanonicalTagType(RD);
}
QualType RewriteModernObjC::GetGroupRecordTypeForObjCIvarBitfield(ObjCIvarDecl *IV) {
@@ -4572,7 +4575,7 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
- QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrBlock = Context->getPointerType(Context->getCanonicalTagType(RD));
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
@@ -5316,7 +5319,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
FD = SynthBlockInitFunctionDecl(ND->getName());
Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
@@ -5719,7 +5723,10 @@ void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
}
}
} else if (VD->getType()->isRecordType()) {
- RecordDecl *RD = VD->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = VD->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
}
@@ -7460,7 +7467,7 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
if (!IvarT->getAs<TypedefType>() && IvarT->isRecordType()) {
- RecordDecl *RD = IvarT->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = IvarT->castAs<RecordType>()->getOriginalDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
@@ -7473,7 +7480,8 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RecordDecl *RD = RecordDecl::Create(
*Context, TagTypeKind::Struct, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(RecName));
- QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrStructIMPL =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *Zero = IntegerLiteral::Create(*Context,
diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index f49ccf7..0242fc1 100644
--- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -2358,7 +2358,7 @@ void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
- QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType argT = Context->getPointerType(Context->getCanonicalTagType(RD));
assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
@@ -2401,7 +2401,7 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
- QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType argT = Context->getPointerType(Context->getCanonicalTagType(RD));
assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
@@ -2552,7 +2552,7 @@ QualType RewriteObjC::getSuperStructType() {
SuperStructDecl->completeDefinition();
}
- return Context->getTagDeclType(SuperStructDecl);
+ return Context->getCanonicalTagType(SuperStructDecl);
}
QualType RewriteObjC::getConstantStringStructType() {
@@ -2585,7 +2585,7 @@ QualType RewriteObjC::getConstantStringStructType() {
ConstantStringDecl->completeDefinition();
}
- return Context->getTagDeclType(ConstantStringDecl);
+ return Context->getCanonicalTagType(ConstantStringDecl);
}
CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
@@ -3750,7 +3750,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
- QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType PtrBlock = Context->getPointerType(Context->getCanonicalTagType(RD));
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
@@ -4468,7 +4468,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
@@ -4834,7 +4835,10 @@ void RewriteObjC::HandleDeclInMainFile(Decl *D) {
}
}
} else if (VD->getType()->isRecordType()) {
- RecordDecl *RD = VD->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = VD->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
}
@@ -5804,7 +5808,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
CK_BitCast,
IV->getBase());
@@ -5845,7 +5850,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(), II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ QualType castT =
+ Context->getPointerType(Context->getCanonicalTagType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
CK_BitCast,
IV->getBase());
diff --git a/clang/lib/Headers/avx10_2_512bf16intrin.h b/clang/lib/Headers/avx10_2_512bf16intrin.h
index 75290d2..95e9bd7a 100644
--- a/clang/lib/Headers/avx10_2_512bf16intrin.h
+++ b/clang/lib/Headers/avx10_2_512bf16intrin.h
@@ -441,8 +441,8 @@ _mm512_maskz_sqrt_pbh(__mmask32 __U, __m512bh __A) {
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
- (__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
+ (__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@@ -469,8 +469,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
- -(__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
+ -(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@@ -497,8 +497,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmsub_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
- (__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
+ (__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmadd_pbh(
@@ -527,8 +527,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fnmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
- return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
- -(__v32bf)__C);
+ return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
+ -(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmsub_pbh(
diff --git a/clang/lib/Headers/avx10_2bf16intrin.h b/clang/lib/Headers/avx10_2bf16intrin.h
index 66797ae..0c7f381 100644
--- a/clang/lib/Headers/avx10_2bf16intrin.h
+++ b/clang/lib/Headers/avx10_2bf16intrin.h
@@ -852,8 +852,8 @@ _mm_maskz_sqrt_pbh(__mmask8 __U, __m128bh __A) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
- (__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
+ (__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@@ -880,8 +880,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
- -(__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
+ -(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@@ -908,8 +908,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
- (__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
+ (__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pbh(
@@ -938,8 +938,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
- return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
- -(__v16bf)__C);
+ return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
+ -(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pbh(
@@ -969,8 +969,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pbh(
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
- (__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
+ (__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -997,8 +997,8 @@ _mm_maskz_fmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
- -(__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
+ -(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -1025,8 +1025,8 @@ _mm_maskz_fmsub_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
- (__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
+ (__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -1053,8 +1053,8 @@ _mm_maskz_fnmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
- return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
- -(__v8bf)__C);
+ return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
+ -(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index dc9fc07..55e7102 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -31,6 +31,14 @@
__min_vector_width__(128)))
#endif
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#else
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#endif
+
/* SSE4 Multiple Packed Sums of Absolute Difference. */
/// Computes sixteen sum of absolute difference (SAD) operations on sets of
/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and
@@ -460,7 +468,7 @@ _mm256_adds_epu16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_and_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a & (__v4du)__b);
@@ -478,7 +486,7 @@ _mm256_and_si256(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_andnot_si256(__m256i __a, __m256i __b)
{
return (__m256i)(~(__v4du)__a & (__v4du)__b);
@@ -1721,10 +1729,10 @@ _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mulhi_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hu)__a, (__v16hu)__b);
}
/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
@@ -1740,7 +1748,7 @@ _mm256_mulhi_epu16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mulhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
@@ -1759,7 +1767,7 @@ _mm256_mulhi_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mullo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a * (__v16hu)__b);
@@ -1822,7 +1830,7 @@ _mm256_mul_epu32(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_or_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a | (__v4du)__b);
@@ -2974,7 +2982,7 @@ _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_xor_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a ^ (__v4du)__b);
@@ -5289,5 +5297,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
#undef __DEFAULT_FN_ATTRS256
#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#endif /* __AVX2INTRIN_H */
diff --git a/clang/lib/Headers/avx512bitalgintrin.h b/clang/lib/Headers/avx512bitalgintrin.h
index 3c446b3..9a1ff8f3 100644
--- a/clang/lib/Headers/avx512bitalgintrin.h
+++ b/clang/lib/Headers/avx512bitalgintrin.h
@@ -20,7 +20,13 @@
__target__("avx512bitalg,evex512"), \
__min_vector_width__(512)))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm512_popcnt_epi16(__m512i __A)
{
return (__m512i)__builtin_elementwise_popcount((__v32hu)__A);
@@ -42,7 +48,7 @@ _mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B)
__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm512_popcnt_epi8(__m512i __A)
{
return (__m512i)__builtin_elementwise_popcount((__v64qu)__A);
@@ -80,7 +86,7 @@ _mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B)
__B);
}
-
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index c854720..233d4a6 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -25,6 +25,14 @@ typedef unsigned long long __mmask64;
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512bw,no-evex512")))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
static __inline __mmask32 __DEFAULT_FN_ATTRS
_knot_mask32(__mmask32 __M)
{
@@ -438,7 +446,7 @@ _mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
return (__m512i) ((__v32hu) __A * (__v32hu) __B);
}
@@ -1082,7 +1090,7 @@ _mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mulhi_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
@@ -1105,10 +1113,10 @@ _mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_mulhi_epu16(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_ia32_pmulhuw512((__v32hu) __A, (__v32hu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -2010,5 +2018,7 @@ _mm512_sad_epu8 (__m512i __A, __m512i __B)
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS512_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 88b48e3..62325b9 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -20,6 +20,14 @@
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512dq,no-evex512")))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
static __inline __mmask8 __DEFAULT_FN_ATTRS
_knot_mask8(__mmask8 __M)
{
@@ -167,7 +175,7 @@ _mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A ^ (__v8du)__B);
}
@@ -186,7 +194,7 @@ _mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_ps (__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A ^ (__v16su)__B);
}
@@ -205,7 +213,7 @@ _mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A | (__v8du)__B);
}
@@ -224,7 +232,7 @@ _mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_ps(__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A | (__v16su)__B);
}
@@ -243,7 +251,7 @@ _mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A & (__v8du)__B);
}
@@ -262,7 +270,7 @@ _mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_ps(__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A & (__v16su)__B);
}
@@ -281,7 +289,7 @@ _mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_pd(__m512d __A, __m512d __B) {
return (__m512d)(~(__v8du)__A & (__v8du)__B);
}
@@ -300,7 +308,7 @@ _mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_ps(__m512 __A, __m512 __B) {
return (__m512)(~(__v16su)__A & (__v16su)__B);
}
@@ -1375,5 +1383,7 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS512_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 45e7eeb..95b80cc 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -277,20 +277,20 @@ _mm512_setzero_pd(void) {
return __extension__(__m512d){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_ps(float __w)
{
return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
__w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_pd(double __w)
{
return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi8(char __w)
{
return __extension__ (__m512i)(__v64qi){
@@ -304,7 +304,7 @@ _mm512_set1_epi8(char __w)
__w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi16(short __w)
{
return __extension__ (__m512i)(__v32hi){
@@ -314,7 +314,7 @@ _mm512_set1_epi16(short __w)
__w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi32(int __s)
{
return __extension__ (__m512i)(__v16si){
@@ -330,7 +330,7 @@ _mm512_maskz_set1_epi32(__mmask16 __M, int __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS512
+static __inline __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_set1_epi64(long long __d)
{
return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
@@ -645,7 +645,7 @@ _mm512_zextsi256_si512(__m256i __a)
}
/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a & (__v16su)__b);
@@ -666,7 +666,7 @@ _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
__k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a & (__v8du)__b);
@@ -687,13 +687,13 @@ _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
__k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_si512 (__m512i __A, __m512i __B)
{
return (__m512i)(~(__v8du)__A & (__v8du)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_epi32 (__m512i __A, __m512i __B)
{
return (__m512i)(~(__v16su)__A & (__v16su)__B);
@@ -714,7 +714,7 @@ _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_andnot_epi64(__m512i __A, __m512i __B)
{
return (__m512i)(~(__v8du)__A & (__v8du)__B);
@@ -735,7 +735,7 @@ _mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a | (__v16su)__b);
@@ -755,7 +755,7 @@ _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a | (__v8du)__b);
@@ -775,7 +775,7 @@ _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a ^ (__v16su)__b);
@@ -795,7 +795,7 @@ _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a ^ (__v8du)__b);
@@ -815,19 +815,19 @@ _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_and_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a & (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_or_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a | (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_xor_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a ^ (__v8du)__b);
@@ -835,45 +835,38 @@ _mm512_xor_si512(__m512i __a, __m512i __b)
/* Arithmetic */
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_add_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_add_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a + (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_add_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_add_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a + (__v16sf)__b);
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mul_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mul_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a * (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mul_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mul_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a * (__v16sf)__b);
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_sub_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_sub_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a - (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_sub_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_sub_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a - (__v16sf)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi64 (__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_add_epi64(__m512i __A, __m512i __B) {
return (__m512i) ((__v8du) __A + (__v8du) __B);
}
@@ -2315,9 +2308,8 @@ _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(R)))
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_div_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d
+ __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_div_pd(__m512d __a, __m512d __b) {
return (__m512d)((__v8df)__a/(__v8df)__b);
}
@@ -2335,9 +2327,8 @@ _mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_div_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_div_ps(__m512 __a, __m512 __b) {
return (__m512)((__v16sf)__a/(__v16sf)__b);
}
@@ -4123,9 +4114,8 @@ _mm512_cvtss_f32(__m512 __a)
/* Unpack and Interleave */
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpackhi_pd(__m512d __a, __m512d __b) {
return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
}
@@ -4146,9 +4136,8 @@ _mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_pd(__m512d __a, __m512d __b)
-{
+static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpacklo_pd(__m512d __a, __m512d __b) {
return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
}
@@ -4169,9 +4158,8 @@ _mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpackhi_ps(__m512 __a, __m512 __b) {
return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
2, 18, 3, 19,
2+4, 18+4, 3+4, 19+4,
@@ -4195,9 +4183,8 @@ _mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)_mm512_setzero_ps());
}
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_ps(__m512 __a, __m512 __b)
-{
+static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_unpacklo_ps(__m512 __a, __m512 __b) {
return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
0, 16, 1, 17,
0+4, 16+4, 1+4, 17+4,
@@ -5303,7 +5290,7 @@ _mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
+static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_movedup_pd (__m512d __A)
{
return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
@@ -8665,7 +8652,7 @@ _mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
_mm512_setzero_si512());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_movehdup_ps (__m512 __A)
{
return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
@@ -8688,7 +8675,7 @@ _mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
+static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_moveldup_ps (__m512 __A)
{
return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
@@ -9337,19 +9324,23 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
* This takes log2(n) steps where n is the number of elements in the vector.
*/
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_add_epi64(__m512i __W) {
return __builtin_reduce_add((__v8di)__W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_mul_epi64(__m512i __W) {
return __builtin_reduce_mul((__v8di)__W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_and_epi64(__m512i __W) {
return __builtin_reduce_and((__v8di)__W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_reduce_or_epi64(__m512i __W) {
return __builtin_reduce_or((__v8di)__W);
}
@@ -9400,22 +9391,22 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
return __builtin_ia32_reduce_fmul_pd512(1.0, __W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_add_epi32(__m512i __W) {
return __builtin_reduce_add((__v16si)__W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_mul_epi32(__m512i __W) {
return __builtin_reduce_mul((__v16si)__W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_and_epi32(__m512i __W) {
return __builtin_reduce_and((__v16si)__W);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_or_epi32(__m512i __W) {
return __builtin_reduce_or((__v16si)__W);
}
@@ -9466,22 +9457,22 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
return __builtin_ia32_reduce_fmul_ps512(1.0f, __W);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epi64(__m512i __V) {
return __builtin_reduce_max((__v8di)__V);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epu64(__m512i __V) {
return __builtin_reduce_max((__v8du)__V);
}
-static __inline__ long long __DEFAULT_FN_ATTRS512
+static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epi64(__m512i __V) {
return __builtin_reduce_min((__v8di)__V);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epu64(__m512i __V) {
return __builtin_reduce_min((__v8du)__V);
}
@@ -9509,22 +9500,22 @@ _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
return __builtin_reduce_min((__v8du)__V);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epi32(__m512i __V) {
return __builtin_reduce_max((__v16si)__V);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_max_epu32(__m512i __V) {
return __builtin_reduce_max((__v16su)__V);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epi32(__m512i __V) {
return __builtin_reduce_min((__v16si)__V);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512_CONSTEXPR
_mm512_reduce_min_epu32(__m512i __V) {
return __builtin_reduce_min((__v16su)__V);
}
diff --git a/clang/lib/Headers/avx512vlbitalgintrin.h b/clang/lib/Headers/avx512vlbitalgintrin.h
index 1b01fe0..739e78a 100644
--- a/clang/lib/Headers/avx512vlbitalgintrin.h
+++ b/clang/lib/Headers/avx512vlbitalgintrin.h
@@ -24,7 +24,15 @@
__target__("avx512vl,avx512bitalg,no-evex512"), \
__min_vector_width__(256)))
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
+#else
+#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
+#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
+#endif
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_popcnt_epi16(__m256i __A)
{
return (__m256i)__builtin_elementwise_popcount((__v16hu)__A);
@@ -46,7 +54,7 @@ _mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B)
__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_popcnt_epi16(__m128i __A)
{
return (__m128i)__builtin_elementwise_popcount((__v8hu)__A);
@@ -68,7 +76,7 @@ _mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B)
__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_popcnt_epi8(__m256i __A)
{
return (__m256i)__builtin_elementwise_popcount((__v32qu)__A);
@@ -90,7 +98,7 @@ _mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B)
__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_popcnt_epi8(__m128i __A)
{
return (__m128i)__builtin_elementwise_popcount((__v16qu)__A);
@@ -147,5 +155,7 @@ _mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B)
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512vlfp16intrin.h b/clang/lib/Headers/avx512vlfp16intrin.h
index a12acb7..1f8cca7 100644
--- a/clang/lib/Headers/avx512vlfp16intrin.h
+++ b/clang/lib/Headers/avx512vlfp16intrin.h
@@ -1419,8 +1419,8 @@ _mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) {
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
- (__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
@@ -1429,7 +1429,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
__m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)__A);
}
@@ -1437,7 +1437,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)__C);
}
@@ -1445,15 +1445,15 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)_mm_setzero_ph());
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
- -(__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
+ -(__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A,
@@ -1476,7 +1476,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)__C);
}
@@ -1484,7 +1484,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
(__v8hf)_mm_setzero_ph());
}
@@ -1492,22 +1492,22 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
(__v8hf)_mm_setzero_ph());
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
- (__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
+ (__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)__A);
}
@@ -1515,7 +1515,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)__C);
}
@@ -1523,22 +1523,22 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
- -(__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
+ -(__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)__A);
}
@@ -1546,7 +1546,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
@@ -1554,7 +1554,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)__C);
}
@@ -1562,7 +1562,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
@@ -1570,7 +1570,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)_mm256_setzero_ph());
}
@@ -1684,7 +1684,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
(__v8hf)__C);
}
@@ -1692,7 +1692,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
(__v16hf)__C);
}
@@ -1715,45 +1715,45 @@ _mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
- (__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
+ (__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
(__v8hf)__A);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
- (__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
+ (__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
(__v16hf)__A);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A,
__m128h __B,
__m128h __C) {
- return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
- -(__v8hf)__C);
+ return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
+ -(__v8hf)__C);
}
static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
(__v8hf)__A);
}
@@ -1761,22 +1761,22 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
return (__m128h)__builtin_ia32_selectph_128(
(__mmask8)__U,
- __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+ __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
(__v8hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A,
__m256h __B,
__m256h __C) {
- return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
- -(__v16hf)__C);
+ return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
+ -(__v16hf)__C);
}
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
(__v16hf)__A);
}
@@ -1784,7 +1784,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
return (__m256h)__builtin_ia32_selectph_256(
(__mmask16)__U,
- __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+ __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
(__v16hf)__C);
}
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 2a5f7b4..cbad39a 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -899,321 +899,289 @@ _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd (-(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd (-(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd (-(__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df)_mm_setzero_pd());
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
- (__v4df) __B,
- (__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df)_mm256_setzero_pd());
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps (-(__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf)_mm_setzero_ps());
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf)_mm256_setzero_ps());
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -1420,41 +1388,37 @@ _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -1500,121 +1464,109 @@ _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- -(__v2df) __B,
- (__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, (__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- -(__v4df) __B,
- (__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, (__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- -(__v4sf) __B,
- (__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- -(__v8sf) __B,
- (__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, (__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- -(__v2df) __B,
- -(__v2df) __C),
- (__v2df) __A);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
+ (__v2df)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
- __builtin_ia32_vfmaddpd ((__v2df) __A,
- -(__v2df) __B,
- -(__v2df) __C),
- (__v2df) __C);
+ return (__m128d)__builtin_ia32_selectpd_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
+ (__v2df)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- -(__v4df) __B,
- -(__v4df) __C),
- (__v4df) __A);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
+ (__v4df)__A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
- __builtin_ia32_vfmaddpd256 ((__v4df) __A,
- -(__v4df) __B,
- -(__v4df) __C),
- (__v4df) __C);
+ return (__m256d)__builtin_ia32_selectpd_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
+ (__v4df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- -(__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __A);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
- __builtin_ia32_vfmaddps ((__v4sf) __A,
- -(__v4sf) __B,
- -(__v4sf) __C),
- (__v4sf) __C);
+ return (__m128)__builtin_ia32_selectps_128(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
+ (__v4sf)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- -(__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __A);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
- __builtin_ia32_vfmaddps256 ((__v8sf) __A,
- -(__v8sf) __B,
- -(__v8sf) __C),
- (__v8sf) __C);
+ return (__m256)__builtin_ia32_selectps_256(
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
+ (__v8sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
diff --git a/clang/lib/Headers/avx512vpopcntdqintrin.h b/clang/lib/Headers/avx512vpopcntdqintrin.h
index e24c2c5..79fc6e1 100644
--- a/clang/lib/Headers/avx512vpopcntdqintrin.h
+++ b/clang/lib/Headers/avx512vpopcntdqintrin.h
@@ -60,5 +60,6 @@ _mm512_maskz_popcnt_epi32(__mmask16 __U, __m512i __A) {
}
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avx512vpopcntdqvlintrin.h b/clang/lib/Headers/avx512vpopcntdqvlintrin.h
index b6c819b..d14cb1e 100644
--- a/clang/lib/Headers/avx512vpopcntdqvlintrin.h
+++ b/clang/lib/Headers/avx512vpopcntdqvlintrin.h
@@ -99,5 +99,7 @@ _mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) {
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
+#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#endif
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index b9ca013..5a6d48b 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -87,9 +87,8 @@ typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32)));
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the sums of both
/// operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_add_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_add_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a+(__v4df)__b);
}
@@ -105,9 +104,8 @@ _mm256_add_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the sums of both
/// operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_add_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a+(__v8sf)__b);
}
@@ -123,9 +121,8 @@ _mm256_add_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing the subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the differences between
/// both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_sub_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_sub_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a-(__v4df)__b);
}
@@ -141,9 +138,8 @@ _mm256_sub_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing the subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the differences between
/// both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_sub_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a-(__v8sf)__b);
}
@@ -197,9 +193,8 @@ _mm256_addsub_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing the divisor.
/// \returns A 256-bit vector of [4 x double] containing the quotients of both
/// operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_div_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_div_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a/(__v4df)__b);
}
@@ -215,9 +210,8 @@ _mm256_div_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing the divisor.
/// \returns A 256-bit vector of [8 x float] containing the quotients of both
/// operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_div_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a/(__v8sf)__b);
}
@@ -317,9 +311,8 @@ _mm256_min_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing one of the operands.
/// \returns A 256-bit vector of [4 x double] containing the products of both
/// operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_mul_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_mul_pd(__m256d __a, __m256d __b) {
return (__m256d)((__v4df)__a * (__v4df)__b);
}
@@ -335,9 +328,8 @@ _mm256_mul_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the operands.
/// \returns A 256-bit vector of [8 x float] containing the products of both
/// operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_mul_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a,
+ __m256 __b) {
return (__m256)((__v8sf)__a * (__v8sf)__b);
}
@@ -555,7 +547,7 @@ _mm256_rcp_ps(__m256 __a)
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
/// values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_and_pd(__m256d __a, __m256d __b)
{
return (__m256d)((__v4du)__a & (__v4du)__b);
@@ -573,7 +565,7 @@ _mm256_and_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
/// values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_and_ps(__m256 __a, __m256 __b)
{
return (__m256)((__v8su)__a & (__v8su)__b);
@@ -594,7 +586,7 @@ _mm256_and_ps(__m256 __a, __m256 __b)
/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the
/// values of the second operand and the one's complement of the first
/// operand.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_andnot_pd(__m256d __a, __m256d __b)
{
return (__m256d)(~(__v4du)__a & (__v4du)__b);
@@ -615,7 +607,7 @@ _mm256_andnot_pd(__m256d __a, __m256d __b)
/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the
/// values of the second operand and the one's complement of the first
/// operand.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_andnot_ps(__m256 __a, __m256 __b)
{
return (__m256)(~(__v8su)__a & (__v8su)__b);
@@ -633,7 +625,7 @@ _mm256_andnot_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the
/// values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_or_pd(__m256d __a, __m256d __b)
{
return (__m256d)((__v4du)__a | (__v4du)__b);
@@ -651,7 +643,7 @@ _mm256_or_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the
/// values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_or_ps(__m256 __a, __m256 __b)
{
return (__m256)((__v8su)__a | (__v8su)__b);
@@ -669,7 +661,7 @@ _mm256_or_ps(__m256 __a, __m256 __b)
/// A 256-bit vector of [4 x double] containing one of the source operands.
/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the
/// values between both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_xor_pd(__m256d __a, __m256d __b)
{
return (__m256d)((__v4du)__a ^ (__v4du)__b);
@@ -687,7 +679,7 @@ _mm256_xor_pd(__m256d __a, __m256d __b)
/// A 256-bit vector of [8 x float] containing one of the source operands.
/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the
/// values between both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_xor_ps(__m256 __a, __m256 __b)
{
return (__m256)((__v8su)__a ^ (__v8su)__b);
@@ -2392,7 +2384,7 @@ _mm256_cvtss_f32(__m256 __a)
/// return value.
/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
/// values.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_movehdup_ps(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
@@ -2417,7 +2409,7 @@ _mm256_movehdup_ps(__m256 __a)
/// return value.
/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
/// values.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_moveldup_ps(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
@@ -2439,7 +2431,7 @@ _mm256_moveldup_ps(__m256 __a)
/// the return value.
/// \returns A 256-bit vector of [4 x double] containing the moved and
/// duplicated values.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_movedup_pd(__m256d __a)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);
@@ -2462,9 +2454,8 @@ _mm256_movedup_pd(__m256d __a)
/// Bits [127:64] are written to bits [127:64] of the return value. \n
/// Bits [255:192] are written to bits [255:192] of the return value. \n
/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_unpackhi_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpackhi_pd(__m256d __a, __m256d __b) {
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
}
@@ -2484,9 +2475,8 @@ _mm256_unpackhi_pd(__m256d __a, __m256d __b)
/// Bits [63:0] are written to bits [127:64] of the return value. \n
/// Bits [191:128] are written to bits [255:192] of the return value. \n
/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_unpacklo_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpacklo_pd(__m256d __a, __m256d __b) {
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
}
@@ -2511,9 +2501,8 @@ _mm256_unpacklo_pd(__m256d __a, __m256d __b)
/// Bits [223:192] are written to bits [191:160] of the return value. \n
/// Bits [255:224] are written to bits [255:224] of the return value.
/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_unpackhi_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpackhi_ps(__m256 __a, __m256 __b) {
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
}
@@ -2538,9 +2527,8 @@ _mm256_unpackhi_ps(__m256 __a, __m256 __b)
/// Bits [159:128] are written to bits [191:160] of the return value. \n
/// Bits [191:160] are written to bits [255:224] of the return value.
/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_unpacklo_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_unpacklo_ps(__m256 __a, __m256 __b) {
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
}
@@ -4379,7 +4367,7 @@ _mm256_setzero_si256(void) {
/// A 256-bit floating-point vector of [4 x double].
/// \returns A 256-bit floating-point vector of [8 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castpd_ps(__m256d __a)
{
return (__m256)__a;
@@ -4396,7 +4384,7 @@ _mm256_castpd_ps(__m256d __a)
/// A 256-bit floating-point vector of [4 x double].
/// \returns A 256-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline __m256i __DEFAULT_FN_ATTRS
+static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castpd_si256(__m256d __a)
{
return (__m256i)__a;
@@ -4413,7 +4401,7 @@ _mm256_castpd_si256(__m256d __a)
/// A 256-bit floating-point vector of [8 x float].
/// \returns A 256-bit floating-point vector of [4 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castps_pd(__m256 __a)
{
return (__m256d)__a;
@@ -4430,7 +4418,7 @@ _mm256_castps_pd(__m256 __a)
/// A 256-bit floating-point vector of [8 x float].
/// \returns A 256-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline __m256i __DEFAULT_FN_ATTRS
+static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castps_si256(__m256 __a)
{
return (__m256i)__a;
@@ -4447,7 +4435,7 @@ _mm256_castps_si256(__m256 __a)
/// A 256-bit integer vector.
/// \returns A 256-bit floating-point vector of [8 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castsi256_ps(__m256i __a)
{
return (__m256)__a;
@@ -4464,7 +4452,7 @@ _mm256_castsi256_ps(__m256i __a)
/// A 256-bit integer vector.
/// \returns A 256-bit floating-point vector of [4 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castsi256_pd(__m256i __a)
{
return (__m256d)__a;
@@ -4481,7 +4469,7 @@ _mm256_castsi256_pd(__m256i __a)
/// A 256-bit floating-point vector of [4 x double].
/// \returns A 128-bit floating-point vector of [2 x double] containing the
/// lower 128 bits of the parameter.
-static __inline __m128d __DEFAULT_FN_ATTRS
+static __inline __m128d __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castpd256_pd128(__m256d __a)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
@@ -4498,7 +4486,7 @@ _mm256_castpd256_pd128(__m256d __a)
/// A 256-bit floating-point vector of [8 x float].
/// \returns A 128-bit floating-point vector of [4 x float] containing the
/// lower 128 bits of the parameter.
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castps256_ps128(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
@@ -4514,7 +4502,7 @@ _mm256_castps256_ps128(__m256 __a)
/// A 256-bit integer vector.
/// \returns A 128-bit integer vector containing the lower 128 bits of the
/// parameter.
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
_mm256_castsi256_si128(__m256i __a)
{
return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
diff --git a/clang/lib/Headers/cpuid.h b/clang/lib/Headers/cpuid.h
index 52addb7..ce8c79e 100644
--- a/clang/lib/Headers/cpuid.h
+++ b/clang/lib/Headers/cpuid.h
@@ -345,10 +345,15 @@ static __inline int __get_cpuid_count (unsigned int __leaf,
// In some configurations, __cpuidex is defined as a builtin (primarily
// -fms-extensions) which will conflict with the __cpuidex definition below.
#if !(__has_builtin(__cpuidex))
+// In some cases, offloading will set the host as the aux triple and define the
+// builtin. Given __has_builtin does not detect builtins on aux triples, we need
+// to explicitly check for some offloading cases.
+#ifndef __NVPTX__
static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) {
__cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2],
__cpu_info[3]);
}
#endif
+#endif
#endif /* __CPUID_H */
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 78e8a42..60d2000 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -2127,8 +2127,9 @@ _mm_add_epi32(__m128i __a, __m128i __b) {
/// \param __b
/// A 64-bit integer.
/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b) {
- return (__m64)(((unsigned long long)__a) + ((unsigned long long)__b));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_si64(__m64 __a,
+ __m64 __b) {
+ return (__m64)(((__v1du)__a)[0] + ((__v1du)__b)[0]);
}
/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
@@ -2393,8 +2394,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhi_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2412,9 +2413,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
/// of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
- __m128i __b) {
- return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhi_epu16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hu)__a, (__v8hu)__b);
}
/// Multiplies the corresponding elements of two signed [8 x i16]
@@ -2431,8 +2432,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mullo_epi16(__m128i __a, __m128i __b) {
return (__m128i)((__v8hu)__a * (__v8hu)__b);
}
@@ -2557,8 +2558,9 @@ _mm_sub_epi32(__m128i __a, __m128i __b) {
/// A 64-bit integer vector containing the subtrahend.
/// \returns A 64-bit integer vector containing the difference of the values in
/// the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b) {
- return (__m64)((unsigned long long)__a - (unsigned long long)__b);
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_si64(__m64 __a,
+ __m64 __b) {
+ return (__m64)(((__v1du)__a)[0] - ((__v1du)__b)[0]);
}
/// Subtracts the corresponding elements of two [2 x i64] vectors.
@@ -2676,8 +2678,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise AND of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_and_si128(__m128i __a, __m128i __b) {
return (__m128i)((__v2du)__a & (__v2du)__b);
}
@@ -2695,8 +2697,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
/// A 128-bit vector containing the right source operand.
/// \returns A 128-bit integer vector containing the bitwise AND of the one's
/// complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_andnot_si128(__m128i __a, __m128i __b) {
return (__m128i)(~(__v2du)__a & (__v2du)__b);
}
/// Performs a bitwise OR of two 128-bit integer vectors.
@@ -2711,8 +2713,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise OR of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_or_si128(__m128i __a, __m128i __b) {
return (__m128i)((__v2du)__a | (__v2du)__b);
}
@@ -2728,8 +2730,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
/// values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_xor_si128(__m128i __a, __m128i __b) {
return (__m128i)((__v2du)__a ^ (__v2du)__b);
}
diff --git a/clang/lib/Headers/fma4intrin.h b/clang/lib/Headers/fma4intrin.h
index 694801b..69977fb 100644
--- a/clang/lib/Headers/fma4intrin.h
+++ b/clang/lib/Headers/fma4intrin.h
@@ -23,13 +23,15 @@
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -47,13 +49,15 @@ _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -71,13 +75,15 @@ _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -95,13 +101,15 @@ _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -143,49 +151,57 @@ _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256
diff --git a/clang/lib/Headers/fmaintrin.h b/clang/lib/Headers/fmaintrin.h
index 22d1a78..24584a9 100644
--- a/clang/lib/Headers/fmaintrin.h
+++ b/clang/lib/Headers/fmaintrin.h
@@ -35,7 +35,8 @@
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
/// Computes a multiply-add of 128-bit vectors of [2 x double].
@@ -55,7 +56,8 @@ _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
/// Computes a scalar multiply-add of the single-precision values in the
@@ -133,7 +135,8 @@ _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
/// Computes a multiply-subtract of 128-bit vectors of [2 x double].
@@ -153,7 +156,8 @@ _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
/// Computes a scalar multiply-subtract of the single-precision values in
@@ -231,7 +235,8 @@ _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C);
}
/// Computes a negated multiply-add of 128-bit vectors of [2 x double].
@@ -251,7 +256,8 @@ _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ (__v2df)__C);
}
/// Computes a scalar negated multiply-add of the single-precision values in
@@ -329,7 +335,8 @@ _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
+ return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+ -(__v4sf)__C);
}
/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
@@ -349,7 +356,8 @@ _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
+ return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+ -(__v2df)__C);
}
/// Computes a scalar negated multiply-subtract of the single-precision
@@ -531,7 +539,8 @@ _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
/// Computes a multiply-add of 256-bit vectors of [4 x double].
@@ -551,7 +560,8 @@ _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
/// Computes a multiply-subtract of 256-bit vectors of [8 x float].
@@ -571,7 +581,8 @@ _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
/// Computes a multiply-subtract of 256-bit vectors of [4 x double].
@@ -591,7 +602,8 @@ _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
/// Computes a negated multiply-add of 256-bit vectors of [8 x float].
@@ -611,7 +623,8 @@ _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C);
}
/// Computes a negated multiply-add of 256-bit vectors of [4 x double].
@@ -631,7 +644,8 @@ _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ (__v4df)__C);
}
/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
@@ -651,7 +665,8 @@ _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
+ return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+ -(__v8sf)__C);
}
/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
@@ -671,7 +686,8 @@ _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
+ return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+ -(__v4df)__C);
}
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h
index dc0fa5c..3961b79 100644
--- a/clang/lib/Headers/mmintrin.h
+++ b/clang/lib/Headers/mmintrin.h
@@ -57,6 +57,9 @@ typedef char __v16qi __attribute__((__vector_size__(16)));
#define __trunc64(x) \
(__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0)
+#define __zext128(x) \
+ (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
+ 1, 2, 3)
#define __anyext128(x) \
(__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
1, -1, -1)
@@ -85,7 +88,7 @@ _mm_empty(void) {
/// A 32-bit integer value.
/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the
/// parameter. The upper 32 bits are set to 0.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtsi32_si64(int __i)
{
return __extension__ (__m64)(__v2si){__i, 0};
@@ -102,7 +105,7 @@ _mm_cvtsi32_si64(int __i)
/// A 64-bit integer vector.
/// \returns A 32-bit signed integer value containing the lower 32 bits of the
/// parameter.
-static __inline__ int __DEFAULT_FN_ATTRS_SSE2
+static __inline__ int __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtsi64_si32(__m64 __m)
{
return ((__v2si)__m)[0];
@@ -118,10 +121,10 @@ _mm_cvtsi64_si32(__m64 __m)
/// A 64-bit signed integer.
/// \returns A 64-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtsi64_m64(long long __i)
{
- return (__m64)__i;
+ return __extension__ (__m64)(__v1di){__i};
}
/// Casts a 64-bit integer vector into a 64-bit signed integer value.
@@ -134,10 +137,10 @@ _mm_cvtsi64_m64(long long __i)
/// A 64-bit integer vector.
/// \returns A 64-bit signed integer containing the same bitwise pattern as the
/// parameter.
-static __inline__ long long __DEFAULT_FN_ATTRS_SSE2
+static __inline__ long long __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cvtm64_si64(__m64 __m)
{
- return (long long)__m;
+ return ((__v1di)__m)[0];
}
/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
@@ -379,7 +382,7 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_add_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2));
@@ -400,7 +403,7 @@ _mm_add_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_add_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2));
@@ -421,7 +424,7 @@ _mm_add_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32].
/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_add_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v2su)__m1) + ((__v2su)__m2));
@@ -536,7 +539,7 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8] containing the subtrahends.
/// \returns A 64-bit integer vector of [8 x i8] containing the differences of
/// both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_sub_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2));
@@ -557,7 +560,7 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16] containing the subtrahends.
/// \returns A 64-bit integer vector of [4 x i16] containing the differences of
/// both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_sub_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2));
@@ -578,7 +581,7 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32] containing the subtrahends.
/// \returns A 64-bit integer vector of [2 x i32] containing the differences of
/// both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_sub_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v2su)__m1) - ((__v2su)__m2));
@@ -723,11 +726,11 @@ _mm_madd_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits
/// of the products of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
{
- return __trunc64(__builtin_ia32_pmulhw128((__v8hi)__anyext128(__m1),
- (__v8hi)__anyext128(__m2)));
+ return __trunc64(__builtin_ia32_pmulhw128((__v8hi)__zext128(__m1),
+ (__v8hi)__zext128(__m2)));
}
/// Multiplies each 16-bit signed integer element of the first 64-bit
@@ -745,7 +748,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits
/// of the products of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_mullo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2));
@@ -1134,7 +1137,7 @@ _mm_srli_si64(__m64 __m, int __count)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise AND of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_and_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v1du)__m1) & ((__v1du)__m2));
@@ -1155,7 +1158,7 @@ _mm_and_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise AND of the second
/// parameter and the one's complement of the first parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_andnot_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(~((__v1du)__m1) & ((__v1du)__m2));
@@ -1173,7 +1176,7 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise OR of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_or_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v1du)__m1) | ((__v1du)__m2));
@@ -1191,7 +1194,7 @@ _mm_or_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector.
/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both
/// parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_xor_si64(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2));
@@ -1213,7 +1216,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2));
@@ -1235,7 +1238,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2));
@@ -1257,7 +1260,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32].
/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)(((__v2si)__m1) == ((__v2si)__m2));
@@ -1279,7 +1282,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [8 x i8].
/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
{
/* This function always performs a signed comparison, but __v8qi is a char
@@ -1303,7 +1306,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [4 x i16].
/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 > (__v4hi)__m2);
@@ -1325,7 +1328,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
/// A 64-bit integer vector of [2 x i32].
/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
/// results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)((__v2si)__m1 > (__v2si)__m2);
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 6a64369..7bf6b84 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -24,6 +24,7 @@ typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1)));
/* Unsigned types */
typedef unsigned int __v4su __attribute__((__vector_size__(16)));
+typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
/* This header should only be included in a hosted environment as it depends on
* a standard library to provide allocation routines. */
@@ -2447,11 +2448,11 @@ _mm_movemask_pi8(__m64 __a)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
_mm_mulhi_pu16(__m64 __a, __m64 __b)
{
- return __trunc64(__builtin_ia32_pmulhuw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+ return __trunc64(__builtin_ia32_pmulhuw128((__v8hu)__zext128(__a),
+ (__v8hu)__zext128(__b)));
}
/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the
diff --git a/clang/lib/Index/IndexTypeSourceInfo.cpp b/clang/lib/Index/IndexTypeSourceInfo.cpp
index adc33b3..9a699c3 100644
--- a/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -59,7 +59,7 @@ public:
bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
SourceLocation Loc = TL.getNameLoc();
- TypedefNameDecl *ND = TL.getTypedefNameDecl();
+ TypedefNameDecl *ND = TL.getDecl();
if (ND->isTransparentTag()) {
TagDecl *Underlying = ND->getUnderlyingType()->getAsTagDecl();
return IndexCtx.handleReference(Underlying, Loc, Parent,
@@ -117,7 +117,7 @@ public:
}
bool VisitTagTypeLoc(TagTypeLoc TL) {
- TagDecl *D = TL.getDecl();
+ TagDecl *D = TL.getOriginalDecl();
if (!IndexCtx.shouldIndexFunctionLocalSymbols() &&
D->getParentFunctionOrMethod())
return true;
@@ -172,7 +172,8 @@ public:
return true;
}
- bool TraverseTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
+ bool TraverseTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL,
+ bool TraverseQualifier) {
if (!WalkUpFromTemplateSpecializationTypeLoc(TL))
return false;
if (!TraverseTemplateName(TL.getTypePtr()->getTemplateName()))
@@ -202,11 +203,6 @@ public:
return true;
}
- bool VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- return IndexCtx.handleReference(TL.getDecl(), TL.getNameLoc(), Parent,
- ParentDC, SymbolRoleSet(), Relations);
- }
-
bool VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
std::vector<const NamedDecl *> Symbols =
IndexCtx.getResolver()->resolveDependentNameType(TL.getTypePtr());
@@ -248,32 +244,28 @@ void IndexingContext::indexTypeLoc(TypeLoc TL,
TypeIndexer(*this, Parent, DC, isBase, isIBType).TraverseTypeLoc(TL);
}
-void IndexingContext::indexNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
- const NamedDecl *Parent,
- const DeclContext *DC) {
- if (!NNS)
- return;
-
- if (NestedNameSpecifierLoc Prefix = NNS.getPrefix())
- indexNestedNameSpecifierLoc(Prefix, Parent, DC);
-
+void IndexingContext::indexNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc QualifierLoc, const NamedDecl *Parent,
+ const DeclContext *DC) {
if (!DC)
DC = Parent->getLexicalDeclContext();
- SourceLocation Loc = NNS.getLocalBeginLoc();
-
- switch (NNS.getNestedNameSpecifier()->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ switch (NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Qualifier.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
break;
- case NestedNameSpecifier::Namespace:
- handleReference(NNS.getNestedNameSpecifier()->getAsNamespace(),
- Loc, Parent, DC, SymbolRoleSet());
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = QualifierLoc.castAsNamespaceAndPrefix();
+ indexNestedNameSpecifierLoc(Prefix, Parent, DC);
+ handleReference(Namespace, QualifierLoc.getLocalBeginLoc(), Parent, DC,
+ SymbolRoleSet());
break;
+ }
- case NestedNameSpecifier::TypeSpec:
- indexTypeLoc(NNS.getTypeLoc(), Parent, DC);
+ case NestedNameSpecifier::Kind::Type:
+ indexTypeLoc(QualifierLoc.castAsTypeLoc(), Parent, DC);
break;
}
}
diff --git a/clang/lib/Index/USRGeneration.cpp b/clang/lib/Index/USRGeneration.cpp
index 6a884f7..6bdddcc6 100644
--- a/clang/lib/Index/USRGeneration.cpp
+++ b/clang/lib/Index/USRGeneration.cpp
@@ -653,14 +653,14 @@ bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
}
static void printQualifier(llvm::raw_ostream &Out, const LangOptions &LangOpts,
- NestedNameSpecifier *NNS) {
+ NestedNameSpecifier NNS) {
// FIXME: Encode the qualifier, don't just print it.
PrintingPolicy PO(LangOpts);
PO.SuppressTagKeyword = true;
PO.SuppressUnwrittenScope = true;
PO.ConstantArraySizeAsWritten = false;
PO.AnonymousTagLocations = false;
- NNS->print(Out, PO);
+ NNS.print(Out, PO);
}
void USRGenerator::VisitType(QualType T) {
@@ -910,9 +910,14 @@ void USRGenerator::VisitType(QualType T) {
continue;
}
if (const TagType *TT = T->getAs<TagType>()) {
- Out << '$';
- VisitTagDecl(TT->getDecl());
- return;
+ if (const auto *ICNT = dyn_cast<InjectedClassNameType>(TT)) {
+ T = ICNT->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ Ctx);
+ } else {
+ Out << '$';
+ VisitTagDecl(TT->getOriginalDecl());
+ return;
+ }
}
if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) {
Out << '$';
@@ -945,10 +950,6 @@ void USRGenerator::VisitType(QualType T) {
Out << ':' << DNT->getIdentifier()->getName();
return;
}
- if (const InjectedClassNameType *InjT = T->getAs<InjectedClassNameType>()) {
- T = InjT->getInjectedSpecializationType();
- continue;
- }
if (const auto *VT = T->getAs<VectorType>()) {
Out << (T->isExtVectorType() ? ']' : '[');
Out << VT->getNumElements();
diff --git a/clang/lib/InstallAPI/Visitor.cpp b/clang/lib/InstallAPI/Visitor.cpp
index 487be2c..f12e040 100644
--- a/clang/lib/InstallAPI/Visitor.cpp
+++ b/clang/lib/InstallAPI/Visitor.cpp
@@ -424,7 +424,7 @@ std::string
InstallAPIVisitor::getMangledCXXRTTIName(const CXXRecordDecl *D) const {
SmallString<256> Name;
raw_svector_ostream NameStream(Name);
- MC->mangleCXXRTTIName(QualType(D->getTypeForDecl(), 0), NameStream);
+ MC->mangleCXXRTTIName(MC->getASTContext().getCanonicalTagType(D), NameStream);
return getBackendMangledName(Name);
}
@@ -432,7 +432,7 @@ InstallAPIVisitor::getMangledCXXRTTIName(const CXXRecordDecl *D) const {
std::string InstallAPIVisitor::getMangledCXXRTTI(const CXXRecordDecl *D) const {
SmallString<256> Name;
raw_svector_ostream NameStream(Name);
- MC->mangleCXXRTTI(QualType(D->getTypeForDecl(), 0), NameStream);
+ MC->mangleCXXRTTI(MC->getASTContext().getCanonicalTagType(D), NameStream);
return getBackendMangledName(Name);
}
@@ -543,8 +543,8 @@ void InstallAPIVisitor::emitVTableSymbols(const CXXRecordDecl *D,
}
for (const auto &It : D->bases()) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(It.getType()->castAs<RecordType>()->getDecl());
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(
+ It.getType()->castAs<RecordType>()->getOriginalDecl());
const auto BaseAccess = getAccessForDecl(Base);
if (!BaseAccess)
continue;
diff --git a/clang/lib/Interpreter/InterpreterValuePrinter.cpp b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
index 0ea6274..4f5f43e 100644
--- a/clang/lib/Interpreter/InterpreterValuePrinter.cpp
+++ b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
@@ -66,10 +66,10 @@ static std::string QualTypeToString(ASTContext &Ctx, QualType QT) {
const QualType NonRefTy = QT.getNonReferenceType();
if (const auto *TTy = llvm::dyn_cast<TagType>(NonRefTy))
- return DeclTypeToString(NonRefTy, TTy->getDecl());
+ return DeclTypeToString(NonRefTy, TTy->getOriginalDecl());
if (const auto *TRy = dyn_cast<RecordType>(NonRefTy))
- return DeclTypeToString(NonRefTy, TRy->getDecl());
+ return DeclTypeToString(NonRefTy, TRy->getOriginalDecl());
const QualType Canon = NonRefTy.getCanonicalType();
@@ -105,7 +105,7 @@ static std::string EnumToString(const Value &V) {
const EnumType *EnumTy = DesugaredTy.getNonReferenceType()->getAs<EnumType>();
assert(EnumTy && "Fail to cast to enum type");
- EnumDecl *ED = EnumTy->getDecl();
+ EnumDecl *ED = EnumTy->getOriginalDecl()->getDefinitionOrSelf();
uint64_t Data = V.convertTo<uint64_t>();
bool IsFirst = true;
llvm::APSInt AP = Ctx.MakeIntValue(Data, DesugaredTy);
@@ -666,7 +666,7 @@ __clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
VRef.setPtr(va_arg(args, void *));
} else {
if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
+ QT = ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
switch (QT->castAs<BuiltinType>()->getKind()) {
default:
llvm_unreachable("unknown type kind!");
diff --git a/clang/lib/Interpreter/RemoteJITUtils.cpp b/clang/lib/Interpreter/RemoteJITUtils.cpp
index c0e663b..c100f46 100644
--- a/clang/lib/Interpreter/RemoteJITUtils.cpp
+++ b/clang/lib/Interpreter/RemoteJITUtils.cpp
@@ -33,6 +33,10 @@
using namespace llvm;
using namespace llvm::orc;
+#if LLVM_ON_UNIX
+static std::vector<pid_t> LaunchedExecutorPID;
+#endif
+
Expected<uint64_t> getSlabAllocSize(StringRef SizeString) {
SizeString = SizeString.trim();
@@ -89,9 +93,14 @@ createSharedMemoryManager(SimpleRemoteEPC &SREPC,
SlabSize, SREPC, SAs);
}
+// Launches an out-of-process executor for remote JIT. The calling program can
+// provide a CustomizeFork callback, which allows it to run custom code in the
+// child process before exec. This enables sending custom setup or code to be
+// executed in the child (out-of-process) executor.
Expected<std::unique_ptr<SimpleRemoteEPC>>
launchExecutor(StringRef ExecutablePath, bool UseSharedMemory,
- llvm::StringRef SlabAllocateSizeString) {
+ llvm::StringRef SlabAllocateSizeString,
+ std::function<void()> CustomizeFork) {
#ifndef LLVM_ON_UNIX
// FIXME: Add support for Windows.
return make_error<StringError>("-" + ExecutablePath +
@@ -134,6 +143,9 @@ launchExecutor(StringRef ExecutablePath, bool UseSharedMemory,
close(ToExecutor[WriteEnd]);
close(FromExecutor[ReadEnd]);
+ if (CustomizeFork)
+ CustomizeFork();
+
// Execute the child process.
std::unique_ptr<char[]> ExecutorPath, FDSpecifier;
{
@@ -158,6 +170,8 @@ launchExecutor(StringRef ExecutablePath, bool UseSharedMemory,
}
// else we're the parent...
+ LaunchedExecutorPID.push_back(ChildPID);
+
// Close the child ends of the pipes
close(ToExecutor[ReadEnd]);
close(FromExecutor[WriteEnd]);
@@ -265,3 +279,18 @@ connectTCPSocket(StringRef NetworkAddress, bool UseSharedMemory,
std::move(S), *SockFD, *SockFD);
#endif
}
+
+#if LLVM_ON_UNIX
+
+pid_t getLastLaunchedExecutorPID() {
+ if (!LaunchedExecutorPID.size())
+ return -1;
+ return LaunchedExecutorPID.back();
+}
+
+pid_t getNthLaunchedExecutorPID(int n) {
+ if (n - 1 < 0 || n - 1 >= static_cast<int>(LaunchedExecutorPID.size()))
+ return -1;
+ return LaunchedExecutorPID.at(n - 1);
+}
+#endif \ No newline at end of file
diff --git a/clang/lib/Interpreter/Value.cpp b/clang/lib/Interpreter/Value.cpp
index be2ab55..84ba508 100644
--- a/clang/lib/Interpreter/Value.cpp
+++ b/clang/lib/Interpreter/Value.cpp
@@ -102,7 +102,7 @@ static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) {
return Value::K_Void;
if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
+ QT = ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
const auto *BT = QT->getAs<BuiltinType>();
if (!BT || BT->isNullPtrType())
@@ -149,9 +149,9 @@ Value::Value(const Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) {
}
if (const auto *RT = DtorTy->getAs<RecordType>()) {
if (CXXRecordDecl *CXXRD =
- llvm::dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ llvm::dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
if (llvm::Expected<llvm::orc::ExecutorAddr> Addr =
- Interp.CompileDtorCall(CXXRD))
+ Interp.CompileDtorCall(CXXRD->getDefinitionOrSelf()))
DtorF = reinterpret_cast<void *>(Addr->getValue());
else
llvm::logAllUnhandledErrors(Addr.takeError(), llvm::errs());
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index bc8841c..3214e6f 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -591,7 +591,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
NextToken().isRegularKeywordAttribute() ||
NextToken().is(tok::kw___attribute)) &&
D.SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
- D.SS.getScopeRep()->getKind() != NestedNameSpecifier::Namespace) {
+ D.SS.getScopeRep().getKind() != NestedNameSpecifier::Kind::Namespace) {
SourceLocation IdLoc = ConsumeToken();
ParsedType Type =
Actions.getInheritingConstructorName(D.SS, IdLoc, *LastII);
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index 8dce226..8605ba2 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -195,9 +195,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
if (DS.getTypeSpecType() == DeclSpec::TST_error)
return false;
- QualType Type = Actions.ActOnPackIndexingType(
- DS.getRepAsType().get(), DS.getPackIndexingExpr(), DS.getBeginLoc(),
- DS.getEllipsisLoc());
+ QualType Pattern = Sema::GetTypeFromParser(DS.getRepAsType());
+ QualType Type =
+ Actions.ActOnPackIndexingType(Pattern, DS.getPackIndexingExpr(),
+ DS.getBeginLoc(), DS.getEllipsisLoc());
if (Type.isNull())
return false;
@@ -2355,8 +2356,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(
// Constructor and destructor names.
TypeResult Type = Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateKWLoc, Template, Name, NameLoc, LAngleLoc,
- TemplateArgsPtr, RAngleLoc, /*IsCtorOrDtorName=*/true);
+ getCurScope(), ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS, TemplateKWLoc, Template,
+ Name, NameLoc, LAngleLoc, TemplateArgsPtr, RAngleLoc,
+ /*IsCtorOrDtorName=*/true);
if (Type.isInvalid())
return true;
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index 0277dfb..0c1cd8c 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -1128,12 +1128,14 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
- TypeResult Type = ArgsInvalid
- ? TypeError()
- : Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateKWLoc, Template,
- TemplateName.Identifier, TemplateNameLoc,
- LAngleLoc, TemplateArgsPtr, RAngleLoc);
+ TypeResult Type =
+ ArgsInvalid
+ ? TypeError()
+ : Actions.ActOnTemplateIdType(
+ getCurScope(), ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS, TemplateKWLoc,
+ Template, TemplateName.Identifier, TemplateNameLoc, LAngleLoc,
+ TemplateArgsPtr, RAngleLoc);
Tok.setKind(tok::annot_typename);
setTypeAnnotation(Tok, Type);
@@ -1194,10 +1196,11 @@ void Parser::AnnotateTemplateIdTokenAsType(
TemplateId->isInvalid()
? TypeError()
: Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateId->TemplateKWLoc,
- TemplateId->Template, TemplateId->Name,
- TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
- TemplateArgsPtr, TemplateId->RAngleLoc,
+ getCurScope(), ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
+ TemplateId->TemplateKWLoc, TemplateId->Template,
+ TemplateId->Name, TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc,
/*IsCtorOrDtorName=*/false, IsClassName, AllowImplicitTypename);
// Create the new "type" annotation token.
Tok.setKind(tok::annot_typename);
@@ -1263,7 +1266,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
Actions.ActOnTemplateName(getCurScope(), SS, TemplateKWLoc, Name,
/*ObjectType=*/nullptr,
/*EnteringContext=*/false, Template))
- Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ Result = ParsedTemplateArgument(TemplateKWLoc, SS, Template,
+ Name.StartLocation);
}
} else if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id) ||
Tok.is(tok::annot_non_type)) {
@@ -1300,7 +1304,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
TNK == TNK_Var_template || TNK == TNK_Concept_template) {
// We have an id-expression that refers to a class template or
// (C++0x) alias template.
- Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ Result = ParsedTemplateArgument(/*TemplateKwLoc=*/SourceLocation(), SS,
+ Template, Name.StartLocation);
}
}
}
diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp
index 2a731a1..82f2294 100644
--- a/clang/lib/Parse/ParseTentative.cpp
+++ b/clang/lib/Parse/ParseTentative.cpp
@@ -1328,7 +1328,7 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
Tok.getAnnotationRange(),
SS);
- if (SS.getScopeRep() && SS.getScopeRep()->isDependent()) {
+ if (SS.getScopeRep().isDependent()) {
RevertingTentativeParsingAction PA(*this);
ConsumeAnnotationToken();
ConsumeToken();
diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp
index a8cfe20..e57a789 100644
--- a/clang/lib/Parse/Parser.cpp
+++ b/clang/lib/Parse/Parser.cpp
@@ -1774,9 +1774,9 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC,
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
ParsedType Ty = Classification.getType();
+ QualType T = Actions.GetTypeFromParser(Ty);
if (getLangOpts().ObjC && NextToken().is(tok::less) &&
- (Ty.get()->isObjCObjectType() ||
- Ty.get()->isObjCObjectPointerType())) {
+ (T->isObjCObjectType() || T->isObjCObjectPointerType())) {
// Consume the name.
SourceLocation IdentifierLoc = ConsumeToken();
SourceLocation NewEndLoc;
@@ -2032,11 +2032,12 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(
if (SS.isNotEmpty()) // it was a C++ qualified type name.
BeginLoc = SS.getBeginLoc();
+ QualType T = Actions.GetTypeFromParser(Ty);
+
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
if (getLangOpts().ObjC && NextToken().is(tok::less) &&
- (Ty.get()->isObjCObjectType() ||
- Ty.get()->isObjCObjectPointerType())) {
+ (T->isObjCObjectType() || T->isObjCObjectPointerType())) {
// Consume the name.
SourceLocation IdentifierLoc = ConsumeToken();
SourceLocation NewEndLoc;
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index fdb6302..dd418f7 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -227,14 +227,11 @@ static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) {
// Skip function calls which are qualified with a templated class.
if (const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) {
- if (NestedNameSpecifier *NNS = DRE->getQualifier()) {
- if (NNS->getKind() == NestedNameSpecifier::TypeSpec &&
- isa<TemplateSpecializationType>(NNS->getAsType())) {
+ dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts()))
+ if (NestedNameSpecifier NNS = DRE->getQualifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ if (isa_and_nonnull<TemplateSpecializationType>(NNS.getAsType()))
continue;
- }
- }
- }
const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE);
if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) ||
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index f0f1d66..8756ce5 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -48,9 +48,9 @@ void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
EndLocation = TemplateId->RAngleLoc;
}
-void CXXScopeSpec::Extend(ASTContext &Context, TypeLoc TL,
- SourceLocation ColonColonLoc) {
- Builder.Extend(Context, TL, ColonColonLoc);
+void CXXScopeSpec::Make(ASTContext &Context, TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ Builder.Make(Context, TL, ColonColonLoc);
if (Range.getBegin().isInvalid())
Range.setBegin(TL.getBeginLoc());
Range.setEnd(ColonColonLoc);
@@ -59,19 +59,6 @@ void CXXScopeSpec::Extend(ASTContext &Context, TypeLoc TL,
"NestedNameSpecifierLoc range computation incorrect");
}
-void CXXScopeSpec::Extend(ASTContext &Context, IdentifierInfo *Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation ColonColonLoc) {
- Builder.Extend(Context, Identifier, IdentifierLoc, ColonColonLoc);
-
- if (Range.getBegin().isInvalid())
- Range.setBegin(IdentifierLoc);
- Range.setEnd(ColonColonLoc);
-
- assert(Range == Builder.getSourceRange() &&
- "NestedNameSpecifierLoc range computation incorrect");
-}
-
void CXXScopeSpec::Extend(ASTContext &Context, NamespaceBaseDecl *Namespace,
SourceLocation NamespaceLoc,
SourceLocation ColonColonLoc) {
@@ -95,10 +82,10 @@ void CXXScopeSpec::MakeGlobal(ASTContext &Context,
"NestedNameSpecifierLoc range computation incorrect");
}
-void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
- SourceLocation SuperLoc,
- SourceLocation ColonColonLoc) {
- Builder.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+void CXXScopeSpec::MakeMicrosoftSuper(ASTContext &Context, CXXRecordDecl *RD,
+ SourceLocation SuperLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.MakeMicrosoftSuper(Context, RD, SuperLoc, ColonColonLoc);
Range.setBegin(SuperLoc);
Range.setEnd(ColonColonLoc);
@@ -108,7 +95,7 @@ void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD,
}
void CXXScopeSpec::MakeTrivial(ASTContext &Context,
- NestedNameSpecifier *Qualifier, SourceRange R) {
+ NestedNameSpecifier Qualifier, SourceRange R) {
Builder.MakeTrivial(Context, Qualifier, R);
Range = R;
}
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
index 87f9ae0..806800c 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
@@ -313,9 +313,6 @@ TemplateParameterListBuilder::finalizeTemplateArgs(ConceptDecl *CD) {
Builder.Record->getDeclContext()->addDecl(Builder.Template);
Params.clear();
- QualType T = Builder.Template->getInjectedClassNameSpecialization();
- T = AST.getInjectedClassNameType(Builder.Record, T);
-
return Builder;
}
@@ -351,7 +348,7 @@ BuiltinTypeMethodBuilder::BuiltinTypeMethodBuilder(BuiltinTypeDeclBuilder &DB,
ASTContext &AST = DB.SemaRef.getASTContext();
if (IsCtor) {
Name = AST.DeclarationNames.getCXXConstructorName(
- DB.Record->getTypeForDecl()->getCanonicalTypeUnqualified());
+ AST.getCanonicalTagType(DB.Record));
} else {
const IdentifierInfo &II =
AST.Idents.get(NameStr, tok::TokenKind::identifier);
@@ -553,9 +550,9 @@ BuiltinTypeDeclBuilder::BuiltinTypeDeclBuilder(Sema &SemaRef,
return;
}
- Record = CXXRecordDecl::Create(AST, TagDecl::TagKind::Class, HLSLNamespace,
- SourceLocation(), SourceLocation(), &II,
- PrevDecl, true);
+ Record =
+ CXXRecordDecl::Create(AST, TagDecl::TagKind::Class, HLSLNamespace,
+ SourceLocation(), SourceLocation(), &II, PrevDecl);
Record->setImplicit(true);
Record->setLexicalDeclContext(HLSLNamespace);
Record->setHasExternalLexicalStorage();
@@ -570,18 +567,6 @@ BuiltinTypeDeclBuilder::~BuiltinTypeDeclBuilder() {
HLSLNamespace->addDecl(Record);
}
-CXXRecordDecl *BuiltinTypeDeclBuilder::finalizeForwardDeclaration() {
- // Force the QualType to be generated for the record declaration. In most
- // cases this will happen naturally when something uses the type the
- // QualType gets lazily created. Unfortunately, with our injected types if a
- // type isn't used in a translation unit the QualType may not get
- // automatically generated before a PCH is generated. To resolve this we
- // just force that the QualType is generated after we create a forward
- // declaration.
- (void)Record->getASTContext().getRecordType(Record);
- return Record;
-}
-
BuiltinTypeDeclBuilder &
BuiltinTypeDeclBuilder::addMemberVariable(StringRef Name, QualType Type,
llvm::ArrayRef<Attr *> Attrs,
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
index 36c4add..098b726 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
@@ -64,7 +64,7 @@ public:
BuiltinTypeDeclBuilder &addSimpleTemplateParams(ArrayRef<StringRef> Names,
ConceptDecl *CD);
- CXXRecordDecl *finalizeForwardDeclaration();
+ CXXRecordDecl *finalizeForwardDeclaration() { return Record; }
BuiltinTypeDeclBuilder &completeDefinition();
BuiltinTypeDeclBuilder &
diff --git a/clang/lib/Sema/HeuristicResolver.cpp b/clang/lib/Sema/HeuristicResolver.cpp
index 6874d30..9c56dd3 100644
--- a/clang/lib/Sema/HeuristicResolver.cpp
+++ b/clang/lib/Sema/HeuristicResolver.cpp
@@ -44,7 +44,7 @@ public:
resolveDependentNameType(const DependentNameType *DNT);
std::vector<const NamedDecl *> resolveTemplateSpecializationType(
const DependentTemplateSpecializationType *DTST);
- QualType resolveNestedNameSpecifierToType(const NestedNameSpecifier *NNS);
+ QualType resolveNestedNameSpecifierToType(NestedNameSpecifier NNS);
QualType getPointeeType(QualType T);
std::vector<const NamedDecl *>
lookupDependentName(CXXRecordDecl *RD, DeclarationName Name,
@@ -101,9 +101,8 @@ QualType resolveDeclsToType(const std::vector<const NamedDecl *> &Decls,
ASTContext &Ctx) {
if (Decls.size() != 1) // Names an overload set -- just bail.
return QualType();
- if (const auto *TD = dyn_cast<TypeDecl>(Decls[0])) {
- return Ctx.getTypeDeclType(TD);
- }
+ if (const auto *TD = dyn_cast<TypeDecl>(Decls[0]))
+ return Ctx.getCanonicalTypeDeclType(TD);
if (const auto *VD = dyn_cast<ValueDecl>(Decls[0])) {
return VD->getType();
}
@@ -139,8 +138,7 @@ TagDecl *HeuristicResolverImpl::resolveTypeToTagDecl(QualType QT) {
T = T->getCanonicalTypeInternal().getTypePtr();
}
- if (auto *TT = T->getAs<TagType>()) {
- TagDecl *TD = TT->getDecl();
+ if (auto *TD = T->getAsTagDecl()) {
// Template might not be instantiated yet, fall back to primary template
// in such cases.
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(TD)) {
@@ -151,11 +149,6 @@ TagDecl *HeuristicResolverImpl::resolveTypeToTagDecl(QualType QT) {
return TD;
}
- if (const auto *ICNT = T->getAs<InjectedClassNameType>())
- T = ICNT->getInjectedSpecializationType().getTypePtrOrNull();
- if (!T)
- return nullptr;
-
TemplateName TN = getReferencedTemplateName(T);
if (TN.isNull())
return nullptr;
@@ -292,7 +285,7 @@ std::vector<const NamedDecl *> HeuristicResolverImpl::resolveMemberExpr(
// an instance method, it's represented as a CXXDependentScopeMemberExpr
// with `this` as the base expression as `X` as the qualifier
// (which could be valid if `X` names a base class after instantiation).
- if (NestedNameSpecifier *NNS = ME->getQualifier()) {
+ if (NestedNameSpecifier NNS = ME->getQualifier()) {
if (QualType QualifierType = resolveNestedNameSpecifierToType(NNS);
!QualifierType.isNull()) {
auto Decls =
@@ -348,7 +341,10 @@ HeuristicResolverImpl::resolveCalleeOfCallExpr(const CallExpr *CE) {
std::vector<const NamedDecl *> HeuristicResolverImpl::resolveUsingValueDecl(
const UnresolvedUsingValueDecl *UUVD) {
- return resolveDependentMember(QualType(UUVD->getQualifier()->getAsType(), 0),
+ NestedNameSpecifier Qualifier = UUVD->getQualifier();
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Type)
+ return {};
+ return resolveDependentMember(QualType(Qualifier.getAsType(), 0),
UUVD->getNameInfo().getName(), ValueFilter);
}
@@ -399,23 +395,23 @@ QualType HeuristicResolverImpl::resolveExprToType(const Expr *E) {
}
QualType HeuristicResolverImpl::resolveNestedNameSpecifierToType(
- const NestedNameSpecifier *NNS) {
- if (!NNS)
- return QualType();
-
+ NestedNameSpecifier NNS) {
// The purpose of this function is to handle the dependent (Kind ==
// Identifier) case, but we need to recurse on the prefix because
// that may be dependent as well, so for convenience handle
// the TypeSpec cases too.
- switch (NNS->getKind()) {
- case NestedNameSpecifier::TypeSpec:
- return QualType(NNS->getAsType(), 0);
- case NestedNameSpecifier::Identifier: {
- return resolveDeclsToType(
- resolveDependentMember(
- resolveNestedNameSpecifierToType(NNS->getPrefix()),
- NNS->getAsIdentifier(), TypeFilter),
- Ctx);
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Type: {
+ const auto *T = NNS.getAsType();
+ // FIXME: Should this handle the DependentTemplateSpecializationType as
+ // well?
+ if (const auto *DTN = dyn_cast<DependentNameType>(T))
+ return resolveDeclsToType(
+ resolveDependentMember(
+ resolveNestedNameSpecifierToType(DTN->getQualifier()),
+ DTN->getIdentifier(), TypeFilter),
+ Ctx);
+ return QualType(T, 0);
}
default:
break;
@@ -590,7 +586,7 @@ HeuristicResolver::resolveTemplateSpecializationType(
return HeuristicResolverImpl(Ctx).resolveTemplateSpecializationType(DTST);
}
QualType HeuristicResolver::resolveNestedNameSpecifierToType(
- const NestedNameSpecifier *NNS) const {
+ NestedNameSpecifier NNS) const {
return HeuristicResolverImpl(Ctx).resolveNestedNameSpecifierToType(NNS);
}
std::vector<const NamedDecl *> HeuristicResolver::lookupDependentName(
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index cfb2f60..12e5c65 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -1883,9 +1883,10 @@ public:
for (const FieldDecl *FD : RD->fields()) {
QualType FT = FD->getType();
if (const auto *RT = FT->getAs<RecordType>())
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (ClassDecl->hasDefinition())
- if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
+ if (const auto *Def = ClassDecl->getDefinition())
+ if (CXXDestructorDecl *MemberDtor = Def->getDestructor())
asImpl().visitUsedDecl(MemberDtor->getLocation(), MemberDtor);
}
@@ -1893,9 +1894,10 @@ public:
for (const auto &Base : RD->bases()) {
QualType BaseType = Base.getType();
if (const auto *RT = BaseType->getAs<RecordType>())
- if (const auto *BaseDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (BaseDecl->hasDefinition())
- if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
+ if (const auto *BaseDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
+ if (const auto *Def = BaseDecl->getDefinition())
+ if (CXXDestructorDecl *BaseDtor = Def->getDestructor())
asImpl().visitUsedDecl(BaseDtor->getLocation(), BaseDtor);
}
}
@@ -1908,9 +1910,10 @@ public:
VD->needsDestruction(S.Context)) {
QualType VT = VD->getType();
if (const auto *RT = VT->getAs<RecordType>())
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- if (ClassDecl->hasDefinition())
- if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
+ if (const auto *Def = ClassDecl->getDefinition())
+ if (CXXDestructorDecl *Dtor = Def->getDestructor())
asImpl().visitUsedDecl(Dtor->getLocation(), Dtor);
}
diff --git a/clang/lib/Sema/SemaAccess.cpp b/clang/lib/Sema/SemaAccess.cpp
index 83a07a2..ba560d3 100644
--- a/clang/lib/Sema/SemaAccess.cpp
+++ b/clang/lib/Sema/SemaAccess.cpp
@@ -318,11 +318,8 @@ static AccessResult IsDerivedFromInclusive(const CXXRecordDecl *Derived,
const CXXRecordDecl *RD;
QualType T = I.getType();
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RD = cast<CXXRecordDecl>(RT->getDecl());
- } else if (const InjectedClassNameType *IT
- = T->getAs<InjectedClassNameType>()) {
- RD = IT->getDecl();
+ if (CXXRecordDecl *Rec = T->getAsCXXRecordDecl()) {
+ RD = Rec;
} else {
assert(T->isDependentType() && "non-dependent base wasn't a record?");
OnFailure = AR_dependent;
@@ -443,7 +440,9 @@ static AccessResult MatchesFriend(Sema &S,
const EffectiveContext &EC,
CanQualType Friend) {
if (const RecordType *RT = Friend->getAs<RecordType>())
- return MatchesFriend(S, EC, cast<CXXRecordDecl>(RT->getDecl()));
+ return MatchesFriend(
+ S, EC,
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf());
// TODO: we can do better than this
if (Friend->isDependentType())
@@ -671,11 +670,8 @@ struct ProtectedFriendContext {
const CXXRecordDecl *RD;
QualType T = I.getType();
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RD = cast<CXXRecordDecl>(RT->getDecl());
- } else if (const InjectedClassNameType *IT
- = T->getAs<InjectedClassNameType>()) {
- RD = IT->getDecl();
+ if (CXXRecordDecl *Rec = T->getAsCXXRecordDecl()) {
+ RD = Rec;
} else {
assert(T->isDependentType() && "non-dependent base wasn't a record?");
EverDependent = true;
@@ -1072,7 +1068,7 @@ static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
// TODO: it would be great to have a fixit here, since this is
// such an obvious error.
S.Diag(D->getLocation(), diag::note_access_protected_restricted_noobject)
- << S.Context.getTypeDeclType(ECRecord);
+ << S.Context.getCanonicalTagType(ECRecord);
return true;
}
@@ -1101,7 +1097,7 @@ static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
// Otherwise, use the generic diagnostic.
return S.Diag(D->getLocation(),
diag::note_access_protected_restricted_object)
- << S.Context.getTypeDeclType(ECRecord);
+ << S.Context.getCanonicalTagType(ECRecord);
}
return false;
@@ -1129,7 +1125,7 @@ static void diagnoseBadDirectAccess(Sema &S,
else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(D))
PrevDecl = TND->getPreviousDecl();
else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
- if (auto *RD = dyn_cast<CXXRecordDecl>(D);
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(TD);
RD && RD->isInjectedClassName())
break;
PrevDecl = TD->getPreviousDecl();
@@ -1284,10 +1280,10 @@ static void DiagnoseBadAccess(Sema &S, SourceLocation Loc,
NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : nullptr);
S.Diag(Loc, Entity.getDiag())
- << (Entity.getAccess() == AS_protected)
- << (D ? D->getDeclName() : DeclarationName())
- << S.Context.getTypeDeclType(NamingClass)
- << S.Context.getTypeDeclType(DeclaringClass);
+ << (Entity.getAccess() == AS_protected)
+ << (D ? D->getDeclName() : DeclarationName())
+ << S.Context.getCanonicalTagType(NamingClass)
+ << S.Context.getCanonicalTagType(DeclaringClass);
DiagnoseAccessPath(S, EC, Entity);
}
@@ -1640,7 +1636,8 @@ Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
return AR_accessible;
CXXRecordDecl *NamingClass = Dtor->getParent();
- if (ObjectTy.isNull()) ObjectTy = Context.getTypeDeclType(NamingClass);
+ if (ObjectTy.isNull())
+ ObjectTy = Context.getCanonicalTagType(NamingClass);
AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
DeclAccessPair::make(Dtor, Access),
@@ -1728,7 +1725,7 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
AccessTarget AccessEntity(
Context, AccessTarget::Member, NamingClass,
DeclAccessPair::make(Constructor, Found.getAccess()),
- Context.getTypeDeclType(ObjectClass));
+ Context.getCanonicalTagType(ObjectClass));
AccessEntity.setDiag(PD);
return CheckAccess(*this, UseLoc, AccessEntity);
@@ -1776,7 +1773,7 @@ Sema::CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
return AR_accessible;
AccessTarget Entity(Context, AccessTarget::Member, DecomposedClass, Field,
- Context.getRecordType(DecomposedClass));
+ Context.getCanonicalTagType(DecomposedClass));
Entity.setDiag(diag::err_decomp_decl_inaccessible_field);
return CheckAccess(*this, UseLoc, Entity);
@@ -1790,7 +1787,8 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
return AR_accessible;
const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>();
- CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(RT->getDecl());
+ CXXRecordDecl *NamingClass =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
ObjectExpr->getType());
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
index 68a698f..1c48b3c 100644
--- a/clang/lib/Sema/SemaAvailability.cpp
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -102,7 +102,7 @@ Sema::ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message,
break;
for (const Type *T = TD->getUnderlyingType().getTypePtr(); /**/; /**/) {
if (auto *TT = dyn_cast<TagType>(T)) {
- D = TT->getDecl();
+ D = TT->getOriginalDecl()->getDefinitionOrSelf();
} else if (isa<SubstTemplateTypeParmType>(T)) {
// A Subst* node represents a use through a template.
// Any uses of the underlying declaration happened through it's template
@@ -1017,7 +1017,7 @@ bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
return true;
if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
- TagDecl *TD = TT->getDecl();
+ TagDecl *TD = TT->getOriginalDecl()->getDefinitionOrSelf();
DiagnoseDeclAvailability(TD, Range);
} else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
diff --git a/clang/lib/Sema/SemaBPF.cpp b/clang/lib/Sema/SemaBPF.cpp
index 7c00084..6428435 100644
--- a/clang/lib/Sema/SemaBPF.cpp
+++ b/clang/lib/Sema/SemaBPF.cpp
@@ -58,10 +58,10 @@ static bool isValidPreserveTypeInfoArg(Expr *Arg) {
// Record type or Enum type.
const Type *Ty = ArgType->getUnqualifiedDesugaredType();
if (const auto *RT = Ty->getAs<RecordType>()) {
- if (!RT->getDecl()->getDeclName().isEmpty())
+ if (!RT->getOriginalDecl()->getDeclName().isEmpty())
return true;
} else if (const auto *ET = Ty->getAs<EnumType>()) {
- if (!ET->getDecl()->getDeclName().isEmpty())
+ if (!ET->getOriginalDecl()->getDeclName().isEmpty())
return true;
}
@@ -105,7 +105,7 @@ static bool isValidPreserveEnumValueArg(Expr *Arg) {
return false;
// The enum value must be supported.
- return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
+ return llvm::is_contained(ET->getOriginalDecl()->enumerators(), Enumerator);
}
bool SemaBPF::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 24cb8c3..fbf64d3 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -426,7 +426,8 @@ bool SemaCUDA::inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
continue;
}
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getOriginalDecl())->getDefinitionOrSelf();
Sema::SpecialMemberOverloadResult SMOR =
SemaRef.LookupSpecialMember(BaseClassDecl, CSM,
/* ConstArg */ ConstRHS,
@@ -471,7 +472,9 @@ bool SemaCUDA::inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
continue;
}
- CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
+ CXXRecordDecl *FieldRecDecl =
+ cast<CXXRecordDecl>(FieldType->getOriginalDecl())
+ ->getDefinitionOrSelf();
Sema::SpecialMemberOverloadResult SMOR =
SemaRef.LookupSpecialMember(FieldRecDecl, CSM,
/* ConstArg */ ConstRHS && !F->isMutable(),
diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 6ac0483..45de8ff 100644
--- a/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -28,24 +28,21 @@ static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
if (T.isNull())
return nullptr;
- const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
- if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (Record->isCurrentInstantiation(CurContext))
- return Record;
-
- return nullptr;
- } else if (isa<InjectedClassNameType>(Ty))
- return cast<InjectedClassNameType>(Ty)->getDecl();
- else
+ const TagType *TagTy = dyn_cast<TagType>(T->getCanonicalTypeInternal());
+ if (!isa_and_present<RecordType, InjectedClassNameType>(TagTy))
return nullptr;
+ auto *RD =
+ cast<CXXRecordDecl>(TagTy->getOriginalDecl())->getDefinitionOrSelf();
+ if (isa<InjectedClassNameType>(TagTy) ||
+ RD->isCurrentInstantiation(CurContext))
+ return RD;
+ return nullptr;
}
DeclContext *Sema::computeDeclContext(QualType T) {
if (!T->isDependentType())
- if (const TagType *Tag = T->getAs<TagType>())
- return Tag->getDecl();
-
+ if (auto *D = T->getAsTagDecl())
+ return D;
return ::getCurrentInstantiationOf(T, CurContext);
}
@@ -54,18 +51,17 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
if (!SS.isSet() || SS.isInvalid())
return nullptr;
- NestedNameSpecifier *NNS = SS.getScopeRep();
- if (NNS->isDependent()) {
+ NestedNameSpecifier NNS = SS.getScopeRep();
+ if (NNS.isDependent()) {
// If this nested-name-specifier refers to the current
// instantiation, return its DeclContext.
if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS))
return Record;
if (EnteringContext) {
- const Type *NNSType = NNS->getAsType();
- if (!NNSType) {
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return nullptr;
- }
+ const Type *NNSType = NNS.getAsType();
// Look through type alias templates, per C++0x [temp.dep.type]p1.
NNSType = Context.getCanonicalType(NNSType);
@@ -118,38 +114,39 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
// If the type of the nested name specifier is the same as the
// injected class name of the named class template, we're entering
// into that class template definition.
- QualType Injected =
- ClassTemplate->getInjectedClassNameSpecialization();
+ CanQualType Injected =
+ ClassTemplate->getCanonicalInjectedSpecializationType(Context);
if (Context.hasSameType(Injected, QualType(SpecType, 0)))
return ClassTemplate->getTemplatedDecl();
}
- } else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) {
+ } else if (const auto *RecordT = dyn_cast<RecordType>(NNSType)) {
// The nested name specifier refers to a member of a class template.
- return RecordT->getDecl();
+ return RecordT->getOriginalDecl()->getDefinitionOrSelf();
}
}
return nullptr;
}
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- llvm_unreachable("Dependent nested-name-specifier has no DeclContext");
-
- case NestedNameSpecifier::Namespace:
- return NNS->getAsNamespace()->getNamespace();
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace:
+ return const_cast<NamespaceDecl *>(
+ NNS.getAsNamespaceAndPrefix().Namespace->getNamespace());
- case NestedNameSpecifier::TypeSpec: {
- const TagType *Tag = NNS->getAsType()->getAs<TagType>();
- assert(Tag && "Non-tag type in nested-name-specifier");
- return Tag->getDecl();
+ case NestedNameSpecifier::Kind::Type: {
+ auto *TD = NNS.getAsType()->getAsTagDecl();
+ assert(TD && "Non-tag type in nested-name-specifier");
+ return TD;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
return Context.getTranslationUnitDecl();
- case NestedNameSpecifier::Super:
- return NNS->getAsRecordDecl();
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return NNS.getAsMicrosoftSuper();
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
@@ -159,17 +156,17 @@ bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
if (!SS.isSet() || SS.isInvalid())
return false;
- return SS.getScopeRep()->isDependent();
+ return SS.getScopeRep().isDependent();
}
-CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
+CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier NNS) {
assert(getLangOpts().CPlusPlus && "Only callable in C++");
- assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
+ assert(NNS.isDependent() && "Only dependent nested-name-specifier allowed");
- if (!NNS->getAsType())
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return nullptr;
- QualType T = QualType(NNS->getAsType(), 0);
+ QualType T = QualType(NNS.getAsType(), 0);
return ::getCurrentInstantiationOf(T, CurContext);
}
@@ -195,8 +192,7 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
return false;
// Grab the tag definition, if there is one.
- QualType type = Context.getTypeDeclType(tag);
- tag = type->getAsTagDecl();
+ tag = tag->getDefinitionOrSelf();
// If we're currently defining this type, then lookup into the
// type is okay: don't complain that it isn't complete yet.
@@ -207,7 +203,8 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
if (loc.isInvalid()) loc = SS.getRange().getBegin();
// The type must be complete.
- if (RequireCompleteType(loc, type, diag::err_incomplete_nested_name_spec,
+ if (RequireCompleteType(loc, Context.getCanonicalTagType(tag),
+ diag::err_incomplete_nested_name_spec,
SS.getRange())) {
SS.SetInvalid(SS.getRange());
return true;
@@ -259,10 +256,10 @@ bool Sema::RequireCompleteEnumDecl(EnumDecl *EnumD, SourceLocation L,
if (SS) {
Diag(L, diag::err_incomplete_nested_name_spec)
- << QualType(EnumD->getTypeForDecl(), 0) << SS->getRange();
+ << Context.getCanonicalTagType(EnumD) << SS->getRange();
SS->SetInvalid(SS->getRange());
} else {
- Diag(L, diag::err_incomplete_enum) << QualType(EnumD->getTypeForDecl(), 0);
+ Diag(L, diag::err_incomplete_enum) << Context.getCanonicalTagType(EnumD);
Diag(EnumD->getLocation(), diag::note_declared_at);
}
@@ -304,7 +301,7 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
return true;
}
- SS.MakeSuper(Context, RD, SuperLoc, ColonColonLoc);
+ SS.MakeMicrosoftSuper(Context, RD, SuperLoc, ColonColonLoc);
return false;
}
@@ -324,9 +321,6 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
// Determine whether we have a class (or, in C++11, an enum) or
// a typedef thereof. If so, build the nested-name-specifier.
- QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
- if (T->isDependentType())
- return true;
if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
if (TD->getUnderlyingType()->isRecordType())
return true;
@@ -344,32 +338,42 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
if (IsExtension)
*IsExtension = true;
}
+ if (auto *TD = dyn_cast<TagDecl>(SD)) {
+ if (TD->isDependentType())
+ return true;
+ } else if (Context.getCanonicalTypeDeclType(cast<TypeDecl>(SD))
+ ->isDependentType()) {
+ return true;
+ }
return false;
}
-NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
- if (!S || !NNS)
+NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier NNS) {
+ if (!S)
return nullptr;
- while (NNS->getPrefix())
- NNS = NNS->getPrefix();
-
- if (NNS->getKind() != NestedNameSpecifier::Identifier)
- return nullptr;
+ while (NNS.getKind() == NestedNameSpecifier::Kind::Type) {
+ const Type *T = NNS.getAsType();
+ if ((NNS = T->getPrefix()))
+ continue;
- LookupResult Found(*this, NNS->getAsIdentifier(), SourceLocation(),
- LookupNestedNameSpecifierName);
- LookupName(Found, S);
- assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet");
+ const auto *DNT = dyn_cast<DependentNameType>(T);
+ if (!DNT)
+ break;
- if (!Found.isSingleResult())
- return nullptr;
+ LookupResult Found(*this, DNT->getIdentifier(), SourceLocation(),
+ LookupNestedNameSpecifierName);
+ LookupName(Found, S);
+ assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet");
- NamedDecl *Result = Found.getFoundDecl();
- if (isAcceptableNestedNameSpecifier(Result))
- return Result;
+ if (!Found.isSingleResult())
+ return nullptr;
+ NamedDecl *Result = Found.getFoundDecl();
+ if (isAcceptableNestedNameSpecifier(Result))
+ return Result;
+ }
return nullptr;
}
@@ -493,7 +497,18 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
// base object type or prior nested-name-specifier, so this
// nested-name-specifier refers to an unknown specialization. Just build
// a dependent nested-name-specifier.
- SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc, IdInfo.CCLoc);
+
+ TypeLocBuilder TLB;
+
+ QualType DTN = Context.getDependentNameType(
+ ElaboratedTypeKeyword::None, SS.getScopeRep(), IdInfo.Identifier);
+ auto DTNL = TLB.push<DependentNameTypeLoc>(DTN);
+ DTNL.setElaboratedKeywordLoc(SourceLocation());
+ DTNL.setNameLoc(IdInfo.IdentifierLoc);
+ DTNL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, DTN), IdInfo.CCLoc);
return false;
}
@@ -599,8 +614,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
OuterDecl->getCanonicalDecl() != SD->getCanonicalDecl() &&
(!isa<TypeDecl>(OuterDecl) || !isa<TypeDecl>(SD) ||
!Context.hasSameType(
- Context.getTypeDeclType(cast<TypeDecl>(OuterDecl)),
- Context.getTypeDeclType(cast<TypeDecl>(SD))))) {
+ Context.getCanonicalTypeDeclType(cast<TypeDecl>(OuterDecl)),
+ Context.getCanonicalTypeDeclType(cast<TypeDecl>(SD))))) {
if (ErrorRecoveryLookup)
return true;
@@ -613,7 +628,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
// Fall through so that we'll pick the name we found in the object
// type, since that's probably what the user wanted anyway.
- }
+ }
}
if (auto *TD = dyn_cast_or_null<TypedefNameDecl>(SD))
@@ -637,50 +652,44 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
return false;
}
- QualType T =
- Context.getTypeDeclType(cast<TypeDecl>(SD->getUnderlyingDecl()));
-
- if (T->isEnumeralType())
+ const auto *TD = cast<TypeDecl>(SD->getUnderlyingDecl());
+ if (isa<EnumDecl>(TD))
Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+ QualType T;
TypeLocBuilder TLB;
if (const auto *USD = dyn_cast<UsingShadowDecl>(SD)) {
- T = Context.getUsingType(USD, T);
- TLB.pushTypeSpec(T).setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<InjectedClassNameType>(T)) {
- InjectedClassNameTypeLoc InjectedTL
- = TLB.push<InjectedClassNameTypeLoc>(T);
- InjectedTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<RecordType>(T)) {
- RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T);
- RecordTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<TypedefType>(T)) {
- TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T);
- TypedefTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<EnumType>(T)) {
- EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T);
- EnumTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<TemplateTypeParmType>(T)) {
- TemplateTypeParmTypeLoc TemplateTypeTL
- = TLB.push<TemplateTypeParmTypeLoc>(T);
- TemplateTypeTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<UnresolvedUsingType>(T)) {
- UnresolvedUsingTypeLoc UnresolvedTL
- = TLB.push<UnresolvedUsingTypeLoc>(T);
- UnresolvedTL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<SubstTemplateTypeParmType>(T)) {
- SubstTemplateTypeParmTypeLoc TL
- = TLB.push<SubstTemplateTypeParmTypeLoc>(T);
- TL.setNameLoc(IdInfo.IdentifierLoc);
- } else if (isa<SubstTemplateTypeParmPackType>(T)) {
- SubstTemplateTypeParmPackTypeLoc TL
- = TLB.push<SubstTemplateTypeParmPackTypeLoc>(T);
- TL.setNameLoc(IdInfo.IdentifierLoc);
+ T = Context.getUsingType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ USD);
+ TLB.push<UsingTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context),
+ IdInfo.IdentifierLoc);
+ } else if (const auto *Tag = dyn_cast<TagDecl>(TD)) {
+ T = Context.getTagType(ElaboratedTypeKeyword::None, SS.getScopeRep(), Tag,
+ /*OwnsTag=*/false);
+ auto TTL = TLB.push<TagTypeLoc>(T);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
+ TTL.setNameLoc(IdInfo.IdentifierLoc);
+ } else if (auto *TN = dyn_cast<TypedefNameDecl>(TD)) {
+ T = Context.getTypedefType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ TN);
+ TLB.push<TypedefTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(SemaRef.Context),
+ IdInfo.IdentifierLoc);
+ } else if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD)) {
+ T = Context.getUnresolvedUsingType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), UD);
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(SemaRef.Context), IdInfo.IdentifierLoc);
} else {
- llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
+ assert(SS.isEmpty());
+ T = Context.getTypeDeclType(TD);
+ TLB.pushTypeSpec(T).setNameLoc(IdInfo.IdentifierLoc);
}
-
- SS.Extend(Context, TLB.getTypeLocInContext(Context, T), IdInfo.CCLoc);
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, T), IdInfo.CCLoc);
return false;
}
@@ -722,16 +731,34 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
Diag(IdInfo.IdentifierLoc,
diag::ext_undeclared_unqual_id_with_dependent_base)
<< IdInfo.Identifier << ContainingClass;
+
+ TypeLocBuilder TLB;
+
// Fake up a nested-name-specifier that starts with the
// injected-class-name of the enclosing class.
- QualType T = Context.getTypeDeclType(ContainingClass);
- TypeLocBuilder TLB;
- TLB.pushTrivial(Context, T, IdInfo.IdentifierLoc);
- SS.Extend(Context, TLB.getTypeLocInContext(Context, T),
- IdInfo.IdentifierLoc);
- // Add the identifier to form a dependent name.
- SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc,
- IdInfo.CCLoc);
+ // FIXME: This should be done as part of an adjustment, so that this
+ // doesn't get confused with something written in source.
+ QualType Result =
+ Context.getTagType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ ContainingClass, /*OwnsTag=*/false);
+ auto TTL = TLB.push<TagTypeLoc>(Result);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TTL.setNameLoc(IdInfo.IdentifierLoc);
+ SS.Make(Context, TLB.getTypeLocInContext(Context, Result),
+ SourceLocation());
+
+ TLB.clear();
+
+ // Form a DependentNameType.
+ QualType DTN = Context.getDependentNameType(
+ ElaboratedTypeKeyword::None, SS.getScopeRep(), IdInfo.Identifier);
+ auto DTNL = TLB.push<DependentNameTypeLoc>(DTN);
+ DTNL.setElaboratedKeywordLoc(SourceLocation());
+ DTNL.setNameLoc(IdInfo.IdentifierLoc);
+ DTNL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, DTN), IdInfo.CCLoc);
return false;
}
}
@@ -739,8 +766,19 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
if (!Found.empty()) {
if (TypeDecl *TD = Found.getAsSingle<TypeDecl>()) {
+ QualType T;
+ if (auto *TN = dyn_cast<TypedefNameDecl>(TD)) {
+ T = Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TN);
+ } else {
+ // FIXME: Enumerate the possibilities here.
+ assert(!isa<TagDecl>(TD));
+ assert(SS.isEmpty());
+ T = Context.getTypeDeclType(TD);
+ }
+
Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
- << Context.getTypeDeclType(TD) << getLangOpts().CPlusPlus;
+ << T << getLangOpts().CPlusPlus;
} else if (Found.getAsSingle<TemplateDecl>()) {
ParsedType SuggestedType;
DiagnoseUnknownTypeName(IdInfo.Identifier, IdInfo.IdentifierLoc, S, &SS,
@@ -786,17 +824,19 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
if (T.isNull())
return true;
- if (!T->isDependentType() && !T->getAs<TagType>()) {
+ if (!T->isDependentType() && !isa<TagType>(T.getCanonicalType())) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class_or_namespace)
<< T << getLangOpts().CPlusPlus;
return true;
}
+ assert(SS.isEmpty());
+
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
- SS.Extend(Context, TLB.getTypeLocInContext(Context, T), ColonColonLoc);
+ SS.Make(Context, TLB.getTypeLocInContext(Context, T), ColonColonLoc);
return false;
}
@@ -812,13 +852,15 @@ bool Sema::ActOnCXXNestedNameSpecifierIndexedPack(CXXScopeSpec &SS,
if (Type.isNull())
return true;
+ assert(SS.isEmpty());
+
TypeLocBuilder TLB;
TLB.pushTrivial(getASTContext(),
cast<PackIndexingType>(Type.getTypePtr())->getPattern(),
DS.getBeginLoc());
PackIndexingTypeLoc PIT = TLB.push<PackIndexingTypeLoc>(Type);
PIT.setEllipsisLoc(DS.getEllipsisLoc());
- SS.Extend(Context, TLB.getTypeLocInContext(Context, Type), ColonColonLoc);
+ SS.Make(Context, TLB.getTypeLocInContext(Context, Type), ColonColonLoc);
return false;
}
@@ -858,7 +900,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
assert(DTN->getQualifier() == SS.getScopeRep());
QualType T = Context.getDependentTemplateSpecializationType(
ElaboratedTypeKeyword::None,
- {/*Qualifier=*/nullptr, DTN->getName().getIdentifier(),
+ {SS.getScopeRep(), DTN->getName().getIdentifier(),
TemplateKWLoc.isValid()},
TemplateArgs.arguments());
@@ -867,7 +909,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
DependentTemplateSpecializationTypeLoc SpecTL
= Builder.push<DependentTemplateSpecializationTypeLoc>(T);
SpecTL.setElaboratedKeywordLoc(SourceLocation());
- SpecTL.setQualifierLoc(NestedNameSpecifierLoc());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateNameLoc);
SpecTL.setLAngleLoc(LAngleLoc);
@@ -875,7 +917,8 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
- SS.Extend(Context, Builder.getTypeLocInContext(Context, T), CCLoc);
+ SS.clear();
+ SS.Make(Context, Builder.getTypeLocInContext(Context, T), CCLoc);
return false;
}
@@ -900,30 +943,28 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
// We were able to resolve the template name to an actual template.
// Build an appropriate nested-name-specifier.
- QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+ QualType T = CheckTemplateIdType(ElaboratedTypeKeyword::None, Template,
+ TemplateNameLoc, TemplateArgs);
if (T.isNull())
return true;
// Alias template specializations can produce types which are not valid
// nested name specifiers.
- if (!T->isDependentType() && !T->getAs<TagType>()) {
+ if (!T->isDependentType() && !isa<TagType>(T.getCanonicalType())) {
Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T;
NoteAllFoundTemplates(Template);
return true;
}
// Provide source-location information for the template specialization type.
- TypeLocBuilder Builder;
- TemplateSpecializationTypeLoc SpecTL
- = Builder.push<TemplateSpecializationTypeLoc>(T);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateNameLoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
-
- SS.Extend(Context, Builder.getTypeLocInContext(Context, T), CCLoc);
+ TypeLocBuilder TLB;
+ TLB.push<TemplateSpecializationTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), TemplateKWLoc, TemplateNameLoc,
+ TemplateArgs);
+
+ SS.clear();
+ SS.Make(Context, TLB.getTypeLocInContext(Context, T), CCLoc);
return false;
}
@@ -931,7 +972,7 @@ namespace {
/// A structure that stores a nested-name-specifier annotation,
/// including both the nested-name-specifier
struct NestedNameSpecifierAnnotation {
- NestedNameSpecifier *NNS;
+ NestedNameSpecifier NNS = std::nullopt;
};
}
@@ -970,8 +1011,6 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
if (isa<ObjCContainerDecl>(CurContext) || isa<ObjCMethodDecl>(CurContext))
return false;
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
-
// There are only two places a well-formed program may qualify a
// declarator: first, when defining a namespace or class member
// out-of-line, and second, when naming an explicitly-qualified
@@ -986,18 +1025,20 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
// granting friendship.
// i.e. we don't push a scope unless it's a class member.
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Namespace:
+ switch (SS.getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::Namespace:
// These are always namespace scopes. We never want to enter a
// namespace scope from anything but a file context.
return CurContext->getRedeclContext()->isFileContext();
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Type:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
// These are never namespace scopes.
return true;
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index 01252a4..da43848 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -964,7 +964,7 @@ void CastOperation::CheckDynamicCast() {
}
// C++ 5.2.7p6: Otherwise, v shall be [polymorphic].
- const RecordDecl *SrcDecl = SrcRecord->getDecl()->getDefinition();
+ const RecordDecl *SrcDecl = SrcRecord->getOriginalDecl()->getDefinition();
assert(SrcDecl && "Definition missing");
if (!cast<CXXRecordDecl>(SrcDecl)->isPolymorphic()) {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_polymorphic)
@@ -1455,7 +1455,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// converted to an integral type. [...] A value of a scoped enumeration type
// can also be explicitly converted to a floating-point type [...].
if (const EnumType *Enum = SrcType->getAs<EnumType>()) {
- if (Enum->getDecl()->isScoped()) {
+ if (Enum->getOriginalDecl()->isScoped()) {
if (DestType->isBooleanType()) {
Kind = CK_IntegralToBoolean;
return TC_Success;
@@ -1487,8 +1487,8 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// [expr.static.cast]p10 If the enumeration type has a fixed underlying
// type, the value is first converted to that type by integral conversion
const EnumType *Enum = DestType->castAs<EnumType>();
- Kind = Enum->getDecl()->isFixed() &&
- Enum->getDecl()->getIntegerType()->isBooleanType()
+ const EnumDecl *ED = Enum->getOriginalDecl()->getDefinitionOrSelf();
+ Kind = ED->isFixed() && ED->getIntegerType()->isBooleanType()
? CK_IntegralToBoolean
: CK_IntegralCast;
return TC_Success;
@@ -1856,7 +1856,7 @@ TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr,
FoundOverload)) {
CXXMethodDecl *M = cast<CXXMethodDecl>(Fn);
SrcType = Self.Context.getMemberPointerType(
- Fn->getType(), /*Qualifier=*/nullptr, M->getParent());
+ Fn->getType(), /*Qualifier=*/std::nullopt, M->getParent());
WasOverloadedFunction = true;
}
}
@@ -2102,9 +2102,9 @@ void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
return;
}
// or one of the types is a tag type.
- if (SrcTy->getAs<TagType>() || DestTy->getAs<TagType>()) {
+ if (isa<TagType>(SrcTy.getCanonicalType()) ||
+ isa<TagType>(DestTy.getCanonicalType()))
return;
- }
// FIXME: Scoped enums?
if ((SrcTy->isUnsignedIntegerType() && DestTy->isSignedIntegerType()) ||
@@ -3097,27 +3097,26 @@ void CastOperation::CheckCStyleCast() {
if (!DestType->isScalarType() && !DestType->isVectorType() &&
!DestType->isMatrixType()) {
- const RecordType *DestRecordTy = DestType->getAs<RecordType>();
-
- if (DestRecordTy && Self.Context.hasSameUnqualifiedType(DestType, SrcType)){
- // GCC struct/union extension: allow cast to self.
- Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
- << DestType << SrcExpr.get()->getSourceRange();
- Kind = CK_NoOp;
- return;
- }
-
- // GCC's cast to union extension.
- if (DestRecordTy && DestRecordTy->getDecl()->isUnion()) {
- RecordDecl *RD = DestRecordTy->getDecl();
- if (CastExpr::getTargetFieldForToUnionCast(RD, SrcType)) {
- Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
- << SrcExpr.get()->getSourceRange();
- Kind = CK_ToUnion;
+ if (const RecordType *DestRecordTy = DestType->getAs<RecordType>()) {
+ if (Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
+ // GCC struct/union extension: allow cast to self.
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
+ << DestType << SrcExpr.get()->getSourceRange();
+ Kind = CK_NoOp;
return;
- } else {
+ }
+
+ // GCC's cast to union extension.
+ if (RecordDecl *RD = DestRecordTy->getOriginalDecl(); RD->isUnion()) {
+ if (CastExpr::getTargetFieldForToUnionCast(RD->getDefinitionOrSelf(),
+ SrcType)) {
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
+ << SrcExpr.get()->getSourceRange();
+ Kind = CK_ToUnion;
+ return;
+ }
Self.Diag(OpRange.getBegin(), diag::err_typecheck_cast_to_union_no_type)
- << SrcType << SrcExpr.get()->getSourceRange();
+ << SrcType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 3c4511b..9ecee18 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -604,7 +604,7 @@ struct BuiltinDumpStructGenerator {
bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
Expr *IndentLit = getIndentString(Depth);
- Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
+ Expr *TypeLit = getTypeString(S.Context.getCanonicalTagType(RD));
if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
: callPrintFunction("%s", {TypeLit}))
return true;
@@ -2289,7 +2289,7 @@ static ExprResult BuiltinInvoke(Sema &S, CallExpr *TheCall) {
return ExprError();
}
- const Type *MemPtrClass = MPT->getQualifier()->getAsType();
+ const Type *MemPtrClass = MPT->getQualifier().getAsType();
QualType ObjectT = Args[1]->getType();
if (MPT->isMemberDataPointer() && S.checkArgCount(TheCall, 2))
@@ -3318,7 +3318,9 @@ static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
// As a special case, transparent unions initialized with zero are
// considered null for the purposes of the nonnull attribute.
if (const RecordType *UT = Expr->getType()->getAsUnionType();
- UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ UT && UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>()) {
if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Expr))
if (const auto *ILE = dyn_cast<InitListExpr>(CLE->getInitializer()))
Expr = ILE->getInit(0);
@@ -5175,7 +5177,9 @@ bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
return false;
if (!Type->isEnumeralType())
return true;
- const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
+ const EnumDecl *ED = Type->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
return !(ED &&
Context.typesAreCompatible(ED->getPromotionType(), Type));
}()) {
@@ -7791,8 +7795,11 @@ CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
if (!RT)
return Results;
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD || !RD->getDefinition())
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
+ if (!RD)
+ return Results;
+ RD = RD->getDefinition();
+ if (!RD)
return Results;
LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
@@ -7801,7 +7808,7 @@ CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
// We just need to include all members of the right kind turned up by the
// filter, at this point.
- if (S.LookupQualifiedName(R, RT->getDecl()))
+ if (S.LookupQualifiedName(R, RD))
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
NamedDecl *decl = (*I)->getUnderlyingDecl();
if (MemberKind *FK = dyn_cast<MemberKind>(decl))
@@ -8328,7 +8335,8 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
bool IsScopedEnum = false;
QualType IntendedTy = ExprTy;
if (auto EnumTy = ExprTy->getAs<EnumType>()) {
- IntendedTy = EnumTy->getDecl()->getIntegerType();
+ IntendedTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (EnumTy->isUnscopedEnumerationType()) {
ExprTy = IntendedTy;
// This controls whether we're talking about the underlying type or not,
@@ -8364,7 +8372,9 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
NamedDecl *ND = Result.getFoundDecl();
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
if (TD->getUnderlyingType() == IntendedTy)
- IntendedTy = S.Context.getTypedefType(TD);
+ IntendedTy =
+ S.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
}
}
}
@@ -9622,7 +9632,10 @@ struct SearchNonTrivialToInitializeField
S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
}
void visitStruct(QualType FT, SourceLocation SL) {
- for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
+ for (const FieldDecl *FD : FT->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields())
visit(FD->getType(), FD->getLocation());
}
void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
@@ -9667,7 +9680,10 @@ struct SearchNonTrivialToCopyField
S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
}
void visitStruct(QualType FT, SourceLocation SL) {
- for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
+ for (const FieldDecl *FD : FT->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields())
visit(FD->getType(), FD->getLocation());
}
void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
@@ -9948,8 +9964,9 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
getLangOpts().CPlusPlus && !RT->isIncompleteType() &&
!RT->desugar().isTriviallyCopyableType(Context);
+ const auto *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
- RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
+ RD->isNonTrivialToPrimitiveDefaultInitialize()) {
DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
PDiag(diag::warn_cstruct_memaccess)
<< ArgIdx << FnName << PointeeTy << 0);
@@ -9962,7 +9979,7 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
PDiag(diag::warn_cxxstruct_memaccess)
<< FnName << PointeeTy);
} else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
- RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
+ RD->isNonTrivialToPrimitiveCopy()) {
DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
PDiag(diag::warn_cstruct_memaccess)
<< ArgIdx << FnName << PointeeTy << 1);
@@ -10468,10 +10485,14 @@ struct IntRange {
if (!C.getLangOpts().CPlusPlus) {
// For enum types in C code, use the underlying datatype.
if (const auto *ET = dyn_cast<EnumType>(T))
- T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
+ T = ET->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType()
+ .getDesugaredType(C)
+ .getTypePtr();
} else if (const auto *ET = dyn_cast<EnumType>(T)) {
// For enum types in C++, use the known bit width of the enumerators.
- EnumDecl *Enum = ET->getDecl();
+ EnumDecl *Enum = ET->getOriginalDecl()->getDefinitionOrSelf();
// In C++11, enums can have a fixed underlying type. Use this type to
// compute the range.
if (Enum->isFixed()) {
@@ -10513,7 +10534,9 @@ struct IntRange {
if (const AtomicType *AT = dyn_cast<AtomicType>(T))
T = AT->getValueType().getTypePtr();
if (const EnumType *ET = dyn_cast<EnumType>(T))
- T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
+ T = C.getCanonicalType(
+ ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType())
+ .getTypePtr();
if (const auto *EIT = dyn_cast<BitIntType>(T))
return IntRange(EIT->getNumBits(), EIT->isUnsigned());
@@ -11485,7 +11508,9 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
return false;
if (BitfieldType->isEnumeralType()) {
- EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl();
+ EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// If the underlying enum type was not explicitly specified as an unsigned
// type and the enum contain only positive values, MSVC++ will cause an
// inconsistency by storing this as a signed type.
@@ -11522,7 +11547,7 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
EnumTy = PTAttr->getType()->getAs<EnumType>();
}
if (EnumTy) {
- EnumDecl *ED = EnumTy->getDecl();
+ EnumDecl *ED = EnumTy->getOriginalDecl()->getDefinitionOrSelf();
bool SignedBitfield = BitfieldType->isSignedIntegerOrEnumerationType();
// Enum types are implicitly signed on Windows, so check if there are any
@@ -12588,8 +12613,8 @@ void Sema::CheckImplicitConversion(Expr *E, QualType T, SourceLocation CC,
if (const EnumType *SourceEnum = Source->getAs<EnumType>())
if (const EnumType *TargetEnum = Target->getAs<EnumType>())
- if (SourceEnum->getDecl()->hasNameForLinkage() &&
- TargetEnum->getDecl()->hasNameForLinkage() &&
+ if (SourceEnum->getOriginalDecl()->hasNameForLinkage() &&
+ TargetEnum->getOriginalDecl()->hasNameForLinkage() &&
SourceEnum != TargetEnum) {
if (SourceMgr.isInSystemMacro(CC))
return;
@@ -15113,10 +15138,9 @@ static bool isLayoutCompatible(const ASTContext &C, const EnumDecl *ED1,
static bool isLayoutCompatible(const ASTContext &C, const FieldDecl *Field1,
const FieldDecl *Field2,
bool AreUnionMembers = false) {
- [[maybe_unused]] const Type *Field1Parent =
- Field1->getParent()->getTypeForDecl();
- [[maybe_unused]] const Type *Field2Parent =
- Field2->getParent()->getTypeForDecl();
+#ifndef NDEBUG
+ CanQualType Field1Parent = C.getCanonicalTagType(Field1->getParent());
+ CanQualType Field2Parent = C.getCanonicalTagType(Field2->getParent());
assert(((Field1Parent->isStructureOrClassType() &&
Field2Parent->isStructureOrClassType()) ||
(Field1Parent->isUnionType() && Field2Parent->isUnionType())) &&
@@ -15125,6 +15149,7 @@ static bool isLayoutCompatible(const ASTContext &C, const FieldDecl *Field1,
assert(((!AreUnionMembers && Field1Parent->isStructureOrClassType()) ||
(AreUnionMembers && Field1Parent->isUnionType())) &&
"AreUnionMembers should be 'true' for union fields (only).");
+#endif
if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
return false;
@@ -15229,16 +15254,16 @@ static bool isLayoutCompatible(const ASTContext &C, QualType T1, QualType T2) {
return false;
if (TC1 == Type::Enum) {
- return isLayoutCompatible(C,
- cast<EnumType>(T1)->getDecl(),
- cast<EnumType>(T2)->getDecl());
+ return isLayoutCompatible(
+ C, cast<EnumType>(T1)->getOriginalDecl()->getDefinitionOrSelf(),
+ cast<EnumType>(T2)->getOriginalDecl()->getDefinitionOrSelf());
} else if (TC1 == Type::Record) {
if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
return false;
- return isLayoutCompatible(C,
- cast<RecordType>(T1)->getDecl(),
- cast<RecordType>(T2)->getDecl());
+ return isLayoutCompatible(
+ C, cast<RecordType>(T1)->getOriginalDecl()->getDefinitionOrSelf(),
+ cast<RecordType>(T2)->getOriginalDecl()->getDefinitionOrSelf());
}
return false;
@@ -15592,7 +15617,9 @@ void Sema::RefersToMemberWithReducedAlignment(
return;
if (ME->isArrow())
BaseType = BaseType->getPointeeType();
- RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = BaseType->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (RD->isInvalidDecl())
return;
@@ -15637,7 +15664,7 @@ void Sema::RefersToMemberWithReducedAlignment(
// Compute the CompleteObjectAlignment as the alignment of the whole chain.
CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
- ReverseMemberChain.back()->getParent()->getTypeForDecl());
+ Context.getCanonicalTagType(ReverseMemberChain.back()->getParent()));
// The base expression of the innermost MemberExpr may give
// stronger guarantees than the class containing the member.
@@ -15667,9 +15694,9 @@ void Sema::RefersToMemberWithReducedAlignment(
if (FDI->hasAttr<PackedAttr>() ||
FDI->getParent()->hasAttr<PackedAttr>()) {
FD = FDI;
- Alignment = std::min(
- Context.getTypeAlignInChars(FD->getType()),
- Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
+ Alignment = std::min(Context.getTypeAlignInChars(FD->getType()),
+ Context.getTypeAlignInChars(
+ Context.getCanonicalTagType(FD->getParent())));
break;
}
}
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index 0de5580..e4f2760 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -732,7 +732,7 @@ ResultBuilder::ShadowMapEntry::end() const {
///
/// \returns a nested name specifier that refers into the target context, or
/// NULL if no qualification is needed.
-static NestedNameSpecifier *
+static NestedNameSpecifier
getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
const DeclContext *TargetContext) {
SmallVector<const DeclContext *, 4> TargetParents;
@@ -747,7 +747,7 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
TargetParents.push_back(CommonAncestor);
}
- NestedNameSpecifier *Result = nullptr;
+ NestedNameSpecifier Result = std::nullopt;
while (!TargetParents.empty()) {
const DeclContext *Parent = TargetParents.pop_back_val();
@@ -755,10 +755,12 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
if (!Namespace->getIdentifier())
continue;
- Result = NestedNameSpecifier::Create(Context, Result, Namespace);
- } else if (const auto *TD = dyn_cast<TagDecl>(Parent))
- Result = NestedNameSpecifier::Create(
- Context, Result, Context.getTypeDeclType(TD).getTypePtr());
+ Result = NestedNameSpecifier(Context, Namespace, Result);
+ } else if (const auto *TD = dyn_cast<TagDecl>(Parent)) {
+ QualType TT = Context.getTagType(ElaboratedTypeKeyword::None, Result, TD,
+ /*OwnsTag=*/false);
+ Result = NestedNameSpecifier(TT.getTypePtr());
+ }
}
return Result;
}
@@ -937,11 +939,12 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
/// Get the type that a given expression will have if this declaration
/// is used as an expression in its "typical" code-completion form.
-QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
+QualType clang::getDeclUsageType(ASTContext &C, NestedNameSpecifier Qualifier,
+ const NamedDecl *ND) {
ND = ND->getUnderlyingDecl();
if (const auto *Type = dyn_cast<TypeDecl>(ND))
- return C.getTypeDeclType(Type);
+ return C.getTypeDeclType(ElaboratedTypeKeyword::None, Qualifier, Type);
if (const auto *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
return C.getObjCInterfaceType(Iface);
@@ -951,7 +954,9 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
else if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND))
T = Method->getSendResultType();
else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND))
- T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
+ T = C.getTagType(ElaboratedTypeKeyword::None, Qualifier,
+ cast<EnumDecl>(Enumerator->getDeclContext()),
+ /*OwnsTag=*/false);
else if (const auto *Property = dyn_cast<ObjCPropertyDecl>(ND))
T = Property->getType();
else if (const auto *Value = dyn_cast<ValueDecl>(ND))
@@ -1053,7 +1058,7 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
// If we have a preferred type, adjust the priority for results with exactly-
// matching or nearly-matching types.
if (!PreferredType.isNull()) {
- QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
+ QualType T = getDeclUsageType(SemaRef.Context, R.Qualifier, R.Declaration);
if (!T.isNull()) {
CanQualType TC = SemaRef.Context.getCanonicalType(T);
// Check for exactly-matching types (modulo qualifiers).
@@ -1070,10 +1075,9 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
static DeclContext::lookup_result getConstructors(ASTContext &Context,
const CXXRecordDecl *Record) {
- QualType RecordTy = Context.getTypeDeclType(Record);
+ CanQualType RecordTy = Context.getCanonicalTagType(Record);
DeclarationName ConstructorName =
- Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(RecordTy));
+ Context.DeclarationNames.getCXXConstructorName(RecordTy);
return Record->lookup(ConstructorName);
}
@@ -1216,11 +1220,13 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
R.Qualifier =
- NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
+ NestedNameSpecifier(SemaRef.Context, Namespace, std::nullopt);
else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(
- SemaRef.Context, nullptr,
- SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ R.Qualifier = NestedNameSpecifier(
+ SemaRef.Context
+ .getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Tag, /*OwnsTag=*/false)
+ .getTypePtr());
else
R.QualifierIsInformative = false;
}
@@ -1405,11 +1411,13 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const auto *Namespace = dyn_cast<NamespaceDecl>(Ctx))
R.Qualifier =
- NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
+ NestedNameSpecifier(SemaRef.Context, Namespace, std::nullopt);
else if (const auto *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(
- SemaRef.Context, nullptr,
- SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ R.Qualifier = NestedNameSpecifier(
+ SemaRef.Context
+ .getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Tag, /*OwnsTag=*/false)
+ .getTypePtr());
else
R.QualifierIsInformative = false;
}
@@ -1664,7 +1672,8 @@ static bool isObjCReceiverType(ASTContext &C, QualType T) {
}
bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
- QualType T = getDeclUsageType(SemaRef.Context, ND);
+ QualType T =
+ getDeclUsageType(SemaRef.Context, /*Qualifier=*/std::nullopt, ND);
if (T.isNull())
return false;
@@ -1689,7 +1698,8 @@ bool ResultBuilder::IsObjCCollection(const NamedDecl *ND) const {
(!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
return false;
- QualType T = getDeclUsageType(SemaRef.Context, ND);
+ QualType T =
+ getDeclUsageType(SemaRef.Context, /*Qualifier=*/std::nullopt, ND);
if (T.isNull())
return false;
@@ -1745,8 +1755,10 @@ public:
void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
bool InBaseClass) override {
- ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
- false, IsAccessible(ND, Ctx), FixIts);
+ ResultBuilder::Result Result(ND, Results.getBasePriority(ND),
+ /*Qualifier=*/std::nullopt,
+ /*QualifierIsInformative=*/false,
+ IsAccessible(ND, Ctx), FixIts);
Results.AddResult(Result, InitialLookupCtx, Hiding, InBaseClass, BaseType);
}
@@ -2010,7 +2022,6 @@ static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
Policy.AnonymousTagLocations = false;
Policy.SuppressStrongLifetime = true;
Policy.SuppressUnwrittenScope = true;
- Policy.SuppressScope = true;
Policy.CleanUglifiedParameters = true;
return Policy;
}
@@ -2035,7 +2046,7 @@ static const char *GetCompletionTypeString(QualType T, ASTContext &Context,
// Anonymous tag types are constant strings.
if (const TagType *TagT = dyn_cast<TagType>(T))
- if (TagDecl *Tag = TagT->getDecl())
+ if (TagDecl *Tag = TagT->getOriginalDecl())
if (!Tag->hasNameForLinkage()) {
switch (Tag->getTagKind()) {
case TagTypeKind::Struct:
@@ -2925,8 +2936,8 @@ static void AddResultTypeChunk(ASTContext &Context,
else
T = Method->getReturnType();
} else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND)) {
- T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
- T = clang::TypeName::getFullyQualifiedType(T, Context);
+ T = Context.getCanonicalTagType(
+ cast<EnumDecl>(Enumerator->getDeclContext()));
} else if (isa<UnresolvedUsingValueDecl>(ND)) {
/* Do nothing: ignore unresolved using declarations*/
} else if (const auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
@@ -3021,7 +3032,7 @@ static void findTypeLocationForBlockDecl(const TypeSourceInfo *TSInfo,
if (!SuppressBlock) {
if (TypedefTypeLoc TypedefTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
if (TypeSourceInfo *InnerTSInfo =
- TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
+ TypedefTL.getDecl()->getTypeSourceInfo()) {
TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
continue;
}
@@ -3381,7 +3392,7 @@ static void AddTemplateParameterChunks(
/// Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
- NestedNameSpecifier *Qualifier,
+ NestedNameSpecifier Qualifier,
bool QualifierIsInformative,
ASTContext &Context,
const PrintingPolicy &Policy) {
@@ -3391,7 +3402,7 @@ static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
std::string PrintedNNS;
{
llvm::raw_string_ostream OS(PrintedNNS);
- Qualifier->print(OS, Policy);
+ Qualifier.print(OS, Policy);
}
if (QualifierIsInformative)
Result.AddInformativeChunk(Result.getAllocator().CopyString(PrintedNNS));
@@ -3520,11 +3531,9 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
case DeclarationName::CXXConstructorName: {
CXXRecordDecl *Record = nullptr;
QualType Ty = Name.getCXXNameType();
- if (const auto *RecordTy = Ty->getAs<RecordType>())
- Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- else if (const auto *InjectedTy = Ty->getAs<InjectedClassNameType>())
- Record = InjectedTy->getDecl();
- else {
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ Record = RD;
+ } else {
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
break;
@@ -4506,12 +4515,12 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
// If we need a nested-name-specifier, add one now.
if (!InContext) {
- NestedNameSpecifier *NNS = getRequiredQualification(
+ NestedNameSpecifier NNS = getRequiredQualification(
S.Context, CurContext, Overridden->getDeclContext());
if (NNS) {
std::string Str;
llvm::raw_string_ostream OS(Str);
- NNS->print(OS, Policy);
+ NNS.print(OS, Policy);
Builder.AddTextChunk(Results.getAllocator().CopyString(Str));
}
} else if (!InContext->Equals(Overridden->getDeclContext()))
@@ -4920,14 +4929,14 @@ namespace {
/// Information that allows to avoid completing redundant enumerators.
struct CoveredEnumerators {
llvm::SmallPtrSet<EnumConstantDecl *, 8> Seen;
- NestedNameSpecifier *SuggestedQualifier = nullptr;
+ NestedNameSpecifier SuggestedQualifier = std::nullopt;
};
} // namespace
static void AddEnumerators(ResultBuilder &Results, ASTContext &Context,
EnumDecl *Enum, DeclContext *CurContext,
const CoveredEnumerators &Enumerators) {
- NestedNameSpecifier *Qualifier = Enumerators.SuggestedQualifier;
+ NestedNameSpecifier Qualifier = Enumerators.SuggestedQualifier;
if (Context.getLangOpts().CPlusPlus && !Qualifier && Enumerators.Seen.empty()) {
// If there are no prior enumerators in C++, check whether we have to
// qualify the names of the enumerators that we suggest, because they
@@ -5059,9 +5068,9 @@ void SemaCodeCompletion::CodeCompleteExpression(
Data.PreferredType->isMemberPointerType() ||
Data.PreferredType->isBlockPointerType();
if (Data.PreferredType->isEnumeralType()) {
- EnumDecl *Enum = Data.PreferredType->castAs<EnumType>()->getDecl();
- if (auto *Def = Enum->getDefinition())
- Enum = Def;
+ EnumDecl *Enum = Data.PreferredType->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// FIXME: collect covered enumerators in cases like:
// if (x == my_enum::one) { ... } else if (x == ^) {}
AddEnumerators(Results, getASTContext(), Enum, SemaRef.CurContext,
@@ -5185,7 +5194,8 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
// expressions.
if (!P->getType().getTypePtr()->isBlockPointerType() ||
!IsBaseExprStatement) {
- Result R = Result(P, Results.getBasePriority(P), nullptr);
+ Result R =
+ Result(P, Results.getBasePriority(P), /*Qualifier=*/std::nullopt);
if (!InOriginalClass)
setInBaseClass(R);
Results.MaybeAddResult(R, CurContext);
@@ -5199,7 +5209,8 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
findTypeLocationForBlockDecl(P->getTypeSourceInfo(), BlockLoc,
BlockProtoLoc);
if (!BlockLoc) {
- Result R = Result(P, Results.getBasePriority(P), nullptr);
+ Result R =
+ Result(P, Results.getBasePriority(P), /*Qualifier=*/std::nullopt);
if (!InOriginalClass)
setInBaseClass(R);
Results.MaybeAddResult(R, CurContext);
@@ -5611,15 +5622,18 @@ private:
// In T::foo, `foo` is a static member function/variable.
bool VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) override {
- if (E->getQualifier() && isApprox(E->getQualifier()->getAsType(), T))
+ NestedNameSpecifier Qualifier = E->getQualifier();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Type &&
+ isApprox(Qualifier.getAsType(), T))
addValue(E, E->getDeclName(), Member::Colons);
return true;
}
// In T::typename foo, `foo` is a type.
bool VisitDependentNameType(DependentNameType *DNT) override {
- const auto *Q = DNT->getQualifier();
- if (Q && isApprox(Q->getAsType(), T))
+ NestedNameSpecifier Q = DNT->getQualifier();
+ if (Q.getKind() == NestedNameSpecifier::Kind::Type &&
+ isApprox(Q.getAsType(), T))
addType(DNT->getIdentifier());
return true;
}
@@ -5628,10 +5642,15 @@ private:
// VisitNNS() doesn't exist, and TraverseNNS isn't always called :-(
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSL) override {
if (NNSL) {
- NestedNameSpecifier *NNS = NNSL.getNestedNameSpecifier();
- const auto *Q = NNS->getPrefix();
- if (Q && isApprox(Q->getAsType(), T))
- addType(NNS->getAsIdentifier());
+ NestedNameSpecifier NNS = NNSL.getNestedNameSpecifier();
+ if (NNS.getKind() == NestedNameSpecifier::Kind::Type) {
+ const Type *NNST = NNS.getAsType();
+ if (NestedNameSpecifier Q = NNST->getPrefix();
+ Q.getKind() == NestedNameSpecifier::Kind::Type &&
+ isApprox(Q.getAsType(), T))
+ if (const auto *DNT = dyn_cast_or_null<DependentNameType>(NNST))
+ addType(DNT->getIdentifier());
+ }
}
// FIXME: also handle T::foo<X>::bar
return DynamicRecursiveASTVisitor::TraverseNestedNameSpecifierLoc(NNSL);
@@ -5777,7 +5796,7 @@ QualType getApproximateType(const Expr *E, HeuristicResolver &Resolver) {
if (auto Decls = Resolver.resolveDependentNameType(DNT);
Decls.size() == 1) {
if (const auto *TD = dyn_cast<TypeDecl>(Decls[0]))
- return QualType(TD->getTypeForDecl(), 0);
+ return TD->getASTContext().getTypeDeclType(TD);
}
}
// We only resolve DependentTy, or undeduced autos (including auto* etc).
@@ -6184,9 +6203,8 @@ void SemaCodeCompletion::CodeCompleteCase(Scope *S) {
// Code-complete the cases of a switch statement over an enumeration type
// by providing the list of
- EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
- if (EnumDecl *Def = Enum->getDefinition())
- Enum = Def;
+ EnumDecl *Enum =
+ type->castAs<EnumType>()->getOriginalDecl()->getDefinitionOrSelf();
// Determine which enumerators we have already seen in the switch statement.
// FIXME: Ideally, we would also be able to look *past* the code-completion
@@ -6887,8 +6905,8 @@ void SemaCodeCompletion::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// Try to instantiate any non-dependent declaration contexts before
// we look in them. Bail out if we fail.
- NestedNameSpecifier *NNS = SS.getScopeRep();
- if (NNS != nullptr && SS.isValid() && !NNS->isDependent()) {
+ NestedNameSpecifier NNS = SS.getScopeRep();
+ if (NNS && !NNS.isDependent()) {
if (Ctx == nullptr || SemaRef.RequireCompleteDeclContext(SS, Ctx))
return;
}
@@ -6902,14 +6920,13 @@ void SemaCodeCompletion::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
// FIXME: results is always empty, this appears to be dead.
- if (!Results.empty() && NNS && NNS->isDependent())
+ if (!Results.empty() && NNS.isDependent())
Results.AddResult("template");
// If the scope is a concept-constrained type parameter, infer nested
// members based on the constraints.
- if (NNS) {
- if (const auto *TTPT =
- dyn_cast_or_null<TemplateTypeParmType>(NNS->getAsType())) {
+ if (NNS.getKind() == NestedNameSpecifier::Kind::Type) {
+ if (const auto *TTPT = dyn_cast<TemplateTypeParmType>(NNS.getAsType())) {
for (const auto &R : ConceptInfo(*TTPT, S).members()) {
if (R.Operator != ConceptInfo::Member::Colons)
continue;
@@ -7034,7 +7051,7 @@ void SemaCodeCompletion::CodeCompleteNamespaceDecl(Scope *S) {
NS != NSEnd; ++NS)
Results.AddResult(
CodeCompletionResult(NS->second, Results.getBasePriority(NS->second),
- nullptr),
+ /*Qualifier=*/std::nullopt),
SemaRef.CurContext, nullptr, false);
Results.ExitScope();
}
@@ -7821,7 +7838,8 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
if (!Selectors.insert(M->getSelector()).second)
continue;
- Result R = Result(M, Results.getBasePriority(M), nullptr);
+ Result R =
+ Result(M, Results.getBasePriority(M), /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = (WantKind != MK_Any);
if (!InOriginalClass)
@@ -8412,7 +8430,8 @@ AddClassMessageCompletions(Sema &SemaRef, Scope *S, ParsedType Receiver,
continue;
Result R(MethList->getMethod(),
- Results.getBasePriority(MethList->getMethod()), nullptr);
+ Results.getBasePriority(MethList->getMethod()),
+ /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, SemaRef.CurContext);
@@ -8588,7 +8607,8 @@ void SemaCodeCompletion::CodeCompleteObjCInstanceMessage(
continue;
Result R(MethList->getMethod(),
- Results.getBasePriority(MethList->getMethod()), nullptr);
+ Results.getBasePriority(MethList->getMethod()),
+ /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, SemaRef.CurContext);
@@ -8704,9 +8724,9 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
// Record any protocols we find.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
if (!OnlyForwardDeclarations || !Proto->hasDefinition())
- Results.AddResult(
- Result(Proto, Results.getBasePriority(Proto), nullptr), CurContext,
- nullptr, false);
+ Results.AddResult(Result(Proto, Results.getBasePriority(Proto),
+ /*Qualifier=*/std::nullopt),
+ CurContext, nullptr, false);
}
}
@@ -8772,9 +8792,9 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
(!OnlyUnimplemented || !Class->getImplementation()))
- Results.AddResult(
- Result(Class, Results.getBasePriority(Class), nullptr), CurContext,
- nullptr, false);
+ Results.AddResult(Result(Class, Results.getBasePriority(Class),
+ /*Qualifier=*/std::nullopt),
+ CurContext, nullptr, false);
}
}
@@ -8886,9 +8906,9 @@ void SemaCodeCompletion::CodeCompleteObjCInterfaceCategory(
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
- Results.AddResult(
- Result(Category, Results.getBasePriority(Category), nullptr),
- SemaRef.CurContext, nullptr, false);
+ Results.AddResult(Result(Category, Results.getBasePriority(Category),
+ /*Qualifier=*/std::nullopt),
+ SemaRef.CurContext, nullptr, false);
Results.ExitScope();
HandleCodeCompleteResults(&SemaRef, CodeCompleter,
@@ -8923,7 +8943,8 @@ void SemaCodeCompletion::CodeCompleteObjCImplementationCategory(
for (const auto *Cat : Class->visible_categories()) {
if ((!IgnoreImplemented || !Cat->getImplementation()) &&
CategoryNames.insert(Cat->getIdentifier()).second)
- Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
+ Results.AddResult(Result(Cat, Results.getBasePriority(Cat),
+ /*Qualifier=*/std::nullopt),
SemaRef.CurContext, nullptr, false);
}
@@ -9023,7 +9044,8 @@ void SemaCodeCompletion::CodeCompleteObjCPropertySynthesizeIvar(
for (; Class; Class = Class->getSuperClass()) {
for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
- Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
+ Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar),
+ /*Qualifier=*/std::nullopt),
SemaRef.CurContext, nullptr, false);
// Determine whether we've seen an ivar with a name similar to the
@@ -10039,7 +10061,8 @@ void SemaCodeCompletion::CodeCompleteObjCMethodDeclSelector(
}
Result R(MethList->getMethod(),
- Results.getBasePriority(MethList->getMethod()), nullptr);
+ Results.getBasePriority(MethList->getMethod()),
+ /*Qualifier=*/std::nullopt);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
R.DeclaringEntity = true;
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index d193a33..cc03616 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -89,8 +89,8 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
AddArg(T);
// Build the template-id.
- QualType CoroTrait =
- S.CheckTemplateIdType(TemplateName(CoroTraits), KwLoc, Args);
+ QualType CoroTrait = S.CheckTemplateIdType(
+ ElaboratedTypeKeyword::None, TemplateName(CoroTraits), KwLoc, Args);
if (CoroTrait.isNull())
return QualType();
if (S.RequireCompleteType(KwLoc, CoroTrait,
@@ -111,23 +111,18 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
<< RD;
return QualType();
}
- // The promise type is required to be a class type.
- QualType PromiseType = S.Context.getTypeDeclType(Promise);
-
- auto buildElaboratedType = [&]() {
- auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, S.getStdNamespace());
- NNS = NestedNameSpecifier::Create(S.Context, NNS, CoroTrait.getTypePtr());
- return S.Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS,
- PromiseType);
- };
+ NestedNameSpecifier Qualifier(CoroTrait.getTypePtr());
+ QualType PromiseType = S.Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ Qualifier, Promise);
+ // The promise type is required to be a class type.
if (!PromiseType->getAsCXXRecordDecl()) {
S.Diag(FuncLoc,
diag::err_implied_std_coroutine_traits_promise_type_not_class)
- << buildElaboratedType();
+ << PromiseType;
return QualType();
}
- if (S.RequireCompleteType(FuncLoc, buildElaboratedType(),
+ if (S.RequireCompleteType(FuncLoc, PromiseType,
diag::err_coroutine_promise_type_incomplete))
return QualType();
@@ -167,8 +162,8 @@ static QualType lookupCoroutineHandleType(Sema &S, QualType PromiseType,
S.Context.getTrivialTypeSourceInfo(PromiseType, Loc)));
// Build the template-id.
- QualType CoroHandleType =
- S.CheckTemplateIdType(TemplateName(CoroHandle), Loc, Args);
+ QualType CoroHandleType = S.CheckTemplateIdType(
+ ElaboratedTypeKeyword::None, TemplateName(CoroHandle), Loc, Args);
if (CoroHandleType.isNull())
return QualType();
if (S.RequireCompleteType(Loc, CoroHandleType,
@@ -643,7 +638,9 @@ static void checkNoThrow(Sema &S, const Stmt *E,
QualType::DestructionKind::DK_cxx_destructor) {
const auto *T =
cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
- checkDeclNoexcept(cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
+ checkDeclNoexcept(cast<CXXRecordDecl>(T->getOriginalDecl())
+ ->getDefinition()
+ ->getDestructor(),
/*IsDtor=*/true);
}
} else
@@ -1083,9 +1080,9 @@ static Expr *buildStdNoThrowDeclRef(Sema &S, SourceLocation Loc) {
static TypeSourceInfo *getTypeSourceInfoForStdAlignValT(Sema &S,
SourceLocation Loc) {
- EnumDecl *StdAlignValT = S.getStdAlignValT();
- QualType StdAlignValDecl = S.Context.getTypeDeclType(StdAlignValT);
- return S.Context.getTrivialTypeSourceInfo(StdAlignValDecl);
+ EnumDecl *StdAlignValDecl = S.getStdAlignValT();
+ CanQualType StdAlignValT = S.Context.getCanonicalTagType(StdAlignValDecl);
+ return S.Context.getTrivialTypeSourceInfo(StdAlignValT);
}
// When searching for custom allocators on the PromiseType we want to
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index b5eb825..cb59782 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -140,8 +140,8 @@ class TypeNameValidatorCCC final : public CorrectionCandidateCallback {
} // end anonymous namespace
-QualType Sema::getTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
- TypeDecl *TD, SourceLocation NameLoc) {
+void Sema::checkTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
+ TypeDecl *TD, SourceLocation NameLoc) {
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
auto *FoundRD = dyn_cast<CXXRecordDecl>(TD);
if (DCK != DiagCtorKind::None && LookupRD && FoundRD &&
@@ -157,7 +157,6 @@ QualType Sema::getTypeDeclType(DeclContext *LookupCtx, DiagCtorKind DCK,
DiagnoseUseOfDecl(TD, NameLoc);
MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
- return Context.getTypeDeclType(TD);
}
namespace {
@@ -182,13 +181,13 @@ lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II,
UnqualifiedTypeNameLookupResult FoundTypeDecl =
UnqualifiedTypeNameLookupResult::NotFound;
for (const auto &Base : RD->bases()) {
- const CXXRecordDecl *BaseRD = nullptr;
- if (auto *BaseTT = Base.getType()->getAs<TagType>())
- BaseRD = BaseTT->getAsCXXRecordDecl();
- else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) {
+ const CXXRecordDecl *BaseRD = Base.getType()->getAsCXXRecordDecl();
+ if (BaseRD) {
+ } else if (auto *TST = dyn_cast<TemplateSpecializationType>(
+ Base.getType().getCanonicalType())) {
// Look for type decls in dependent base classes that have known primary
// templates.
- if (!TST || !TST->isDependentType())
+ if (!TST->isDependentType())
continue;
auto *TD = TST->getTemplateName().getAsTemplateDecl();
if (!TD)
@@ -253,8 +252,7 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
S.Diag(NameLoc, diag::ext_found_in_dependent_base) << &II;
ASTContext &Context = S.Context;
- auto *NNS = NestedNameSpecifier::Create(
- Context, nullptr, cast<Type>(Context.getRecordType(RD)));
+ NestedNameSpecifier NNS(Context.getCanonicalTagType(RD).getTypePtr());
QualType T =
Context.getDependentNameType(ElaboratedTypeKeyword::None, NNS, &II);
@@ -269,45 +267,6 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
-/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
-static ParsedType buildNamedType(Sema &S, const CXXScopeSpec *SS, QualType T,
- SourceLocation NameLoc,
- bool WantNontrivialTypeSourceInfo = true) {
- switch (T->getTypeClass()) {
- case Type::DeducedTemplateSpecialization:
- case Type::Enum:
- case Type::InjectedClassName:
- case Type::Record:
- case Type::Typedef:
- case Type::UnresolvedUsing:
- case Type::Using:
- break;
- // These can never be qualified so an ElaboratedType node
- // would carry no additional meaning.
- case Type::ObjCInterface:
- case Type::ObjCTypeParam:
- case Type::TemplateTypeParm:
- return ParsedType::make(T);
- default:
- llvm_unreachable("Unexpected Type Class");
- }
-
- if (!SS || SS->isEmpty())
- return ParsedType::make(S.Context.getElaboratedType(
- ElaboratedTypeKeyword::None, nullptr, T, nullptr));
-
- QualType ElTy = S.getElaboratedType(ElaboratedTypeKeyword::None, *SS, T);
- if (!WantNontrivialTypeSourceInfo)
- return ParsedType::make(ElTy);
-
- TypeLocBuilder Builder;
- Builder.pushTypeSpec(T).setNameLoc(NameLoc);
- ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(ElTy);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- ElabTL.setQualifierLoc(SS->getWithLocInContext(S.Context));
- return S.CreateParsedType(ElTy, Builder.getTypeSourceInfo(S.Context, ElTy));
-}
-
ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS, bool isClassName,
bool HasTrailingDot, ParsedType ObjectTypePtr,
@@ -348,9 +307,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
if (AllowImplicitTypename == ImplicitTypenameContext::No)
return nullptr;
SourceLocation QualifiedLoc = SS->getRange().getBegin();
- auto DB =
- DiagCompat(QualifiedLoc, diag_compat::implicit_typename)
- << NestedNameSpecifier::Create(Context, SS->getScopeRep(), &II);
+ // FIXME: Defer the diagnostic after we build the type and use it.
+ auto DB = DiagCompat(QualifiedLoc, diag_compat::implicit_typename)
+ << Context.getDependentNameType(ElaboratedTypeKeyword::None,
+ SS->getScopeRep(), &II);
if (!getLangOpts().CPlusPlus20)
DB << FixItHint::CreateInsertion(QualifiedLoc, "typename ");
}
@@ -430,7 +390,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
bool MemberOfUnknownSpecialization;
UnqualifiedId TemplateName;
TemplateName.setIdentifier(NewII, NameLoc);
- NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier();
+ NestedNameSpecifier NNS = Correction.getCorrectionSpecifier();
CXXScopeSpec NewSS, *NewSSPtr = SS;
if (SS && NNS) {
NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
@@ -530,20 +490,78 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
assert(IIDecl && "Didn't find decl");
- QualType T;
+ TypeLocBuilder TLB;
if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
- // C++ [class.qual]p2: A lookup that would find the injected-class-name
- // instead names the constructors of the class, except when naming a class.
- // This is ill-formed when we're not actually forming a ctor or dtor name.
- T = getTypeDeclType(LookupCtx,
- IsImplicitTypename ? DiagCtorKind::Implicit
- : DiagCtorKind::None,
- TD, NameLoc);
- } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
+ checkTypeDeclType(LookupCtx,
+ IsImplicitTypename ? DiagCtorKind::Implicit
+ : DiagCtorKind::None,
+ TD, NameLoc);
+ QualType T;
+ if (FoundUsingShadow) {
+ T = Context.getUsingType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt,
+ FoundUsingShadow);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ TLB.push<UsingTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ NameLoc);
+ } else if (auto *Tag = dyn_cast<TagDecl>(TD)) {
+ T = Context.getTagType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt, Tag,
+ /*OwnsTag=*/false);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ auto TL = TLB.push<TagTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc());
+ TL.setNameLoc(NameLoc);
+ } else if (auto *TN = dyn_cast<TypedefNameDecl>(TD);
+ TN && !isa<ObjCTypeParamDecl>(TN)) {
+ T = Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt, TN);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ TLB.push<TypedefTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
+ NameLoc);
+ } else if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD)) {
+ T = Context.getUnresolvedUsingType(ElaboratedTypeKeyword::None,
+ SS ? SS->getScopeRep() : std::nullopt,
+ UD);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(),
+ NameLoc);
+ } else {
+ T = Context.getTypeDeclType(TD);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ if (isa<ObjCTypeParamType>(T))
+ TLB.push<ObjCTypeParamTypeLoc>(T).setNameLoc(NameLoc);
+ else
+ TLB.pushTypeSpec(T).setNameLoc(NameLoc);
+ }
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
- if (!HasTrailingDot)
- T = Context.getObjCInterfaceType(IDecl);
- FoundUsingShadow = nullptr; // FIXME: Target must be a TypeDecl.
+ if (!HasTrailingDot) {
+ // FIXME: Support UsingType for this case.
+ QualType T = Context.getObjCInterfaceType(IDecl);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(T);
+ auto TL = TLB.push<ObjCInterfaceTypeLoc>(T);
+ TL.setNameLoc(NameLoc);
+ // FIXME: Pass in this source location.
+ TL.setNameEndLoc(NameLoc);
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
} else if (auto *UD = dyn_cast<UnresolvedUsingIfExistsDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(UD, NameLoc);
// Recover with 'int'
@@ -551,41 +569,38 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl)) {
assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
+ // FIXME: Support UsingType here.
TemplateName Template = Context.getQualifiedTemplateName(
- SS ? SS->getScopeRep() : nullptr, /*TemplateKeyword=*/false,
+ SS ? SS->getScopeRep() : std::nullopt, /*TemplateKeyword=*/false,
FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD));
- T = Context.getDeducedTemplateSpecializationType(Template, QualType(),
- false);
- // Don't wrap in a further UsingType.
- FoundUsingShadow = nullptr;
+ QualType T = Context.getDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, Template, QualType(), false);
+ auto TL = TLB.push<DeducedTemplateSpecializationTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setNameLoc(NameLoc);
+ TL.setQualifierLoc(SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc());
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
}
- if (T.isNull()) {
- // If it's not plausibly a type, suppress diagnostics.
- Result.suppressDiagnostics();
- return nullptr;
- }
-
- if (FoundUsingShadow)
- T = Context.getUsingType(FoundUsingShadow, T);
-
- return buildNamedType(*this, SS, T, NameLoc, WantNontrivialTypeSourceInfo);
+ // As it's not plausibly a type, suppress diagnostics.
+ Result.suppressDiagnostics();
+ return nullptr;
}
// Builds a fake NNS for the given decl context.
-static NestedNameSpecifier *
+static NestedNameSpecifier
synthesizeCurrentNestedNameSpecifier(ASTContext &Context, DeclContext *DC) {
for (;; DC = DC->getLookupParent()) {
DC = DC->getPrimaryContext();
auto *ND = dyn_cast<NamespaceDecl>(DC);
if (ND && !ND->isInline() && !ND->isAnonymousNamespace())
- return NestedNameSpecifier::Create(Context, nullptr, ND);
+ return NestedNameSpecifier(Context, ND, std::nullopt);
if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
- return NestedNameSpecifier::Create(Context, nullptr,
- RD->getTypeForDecl());
+ return NestedNameSpecifier(Context.getCanonicalTagType(RD)->getTypePtr());
if (isa<TranslationUnitDecl>(DC))
- return NestedNameSpecifier::GlobalSpecifier(Context);
+ return NestedNameSpecifier::getGlobal();
}
llvm_unreachable("something isn't in TU scope?");
}
@@ -610,7 +625,7 @@ ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
bool IsTemplateTypeArg) {
assert(getLangOpts().MSVCCompat && "shouldn't be called in non-MSVC mode");
- NestedNameSpecifier *NNS = nullptr;
+ NestedNameSpecifier NNS = std::nullopt;
if (IsTemplateTypeArg && getCurScope()->isTemplateParamScope()) {
// If we weren't able to parse a default template argument, delay lookup
// until instantiation time by making a non-dependent DependentTypeName. We
@@ -625,7 +640,7 @@ ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
findRecordWithDependentBasesOfEnclosingMethod(CurContext)) {
// Build a DependentNameType that will perform lookup into RD at
// instantiation time.
- NNS = NestedNameSpecifier::Create(Context, nullptr, RD->getTypeForDecl());
+ NNS = NestedNameSpecifier(Context.getCanonicalTagType(RD)->getTypePtr());
// Diagnose that this identifier was undeclared, and retry the lookup during
// template instantiation.
@@ -678,19 +693,22 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
}
bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
- if (CurContext->isRecord()) {
- if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
- return true;
-
- const Type *Ty = SS->getScopeRep()->getAsType();
+ if (!CurContext->isRecord())
+ return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
- CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
- for (const auto &Base : RD->bases())
- if (Ty && Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
+ switch (SS->getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return true;
+ case NestedNameSpecifier::Kind::Type: {
+ QualType T(SS->getScopeRep().getAsType(), 0);
+ for (const auto &Base : cast<CXXRecordDecl>(CurContext)->bases())
+ if (Context.hasSameUnqualifiedType(T, Base.getType()))
return true;
+ [[fallthrough]];
+ }
+ default:
return S->isFunctionPrototypeScope();
}
- return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
}
void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
@@ -786,7 +804,7 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
Diag(IILoc, IsTemplateName ? diag::err_no_member_template
: diag::err_typename_nested_not_found)
<< II << DC << SS->getRange();
- else if (SS->isValid() && SS->getScopeRep()->containsErrors()) {
+ else if (SS->isValid() && SS->getScopeRep().containsErrors()) {
SuggestedType =
ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get();
} else if (isDependentScopeSpecifier(*SS)) {
@@ -794,12 +812,13 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
DiagID = diag::ext_typename_missing;
+ SuggestedType =
+ ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get();
+
Diag(SS->getRange().getBegin(), DiagID)
- << NestedNameSpecifier::Create(Context, SS->getScopeRep(), II)
+ << GetTypeFromParser(SuggestedType)
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
- SuggestedType = ActOnTypenameType(S, SourceLocation(),
- *SS, *II, IILoc).get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
@@ -1156,10 +1175,34 @@ Corrected:
}
auto BuildTypeFor = [&](TypeDecl *Type, NamedDecl *Found) {
- QualType T = Context.getTypeDeclType(Type);
- if (const auto *USD = dyn_cast<UsingShadowDecl>(Found))
- T = Context.getUsingType(USD, T);
- return buildNamedType(*this, &SS, T, NameLoc);
+ QualType T;
+ TypeLocBuilder TLB;
+ if (const auto *USD = dyn_cast<UsingShadowDecl>(Found)) {
+ T = Context.getUsingType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ USD);
+ TLB.push<UsingTypeLoc>(T).set(/*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameLoc);
+ } else {
+ T = Context.getTypeDeclType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ Type);
+ if (isa<TagType>(T)) {
+ auto TTL = TLB.push<TagTypeLoc>(T);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TTL.setNameLoc(NameLoc);
+ } else if (isa<TypedefType>(T)) {
+ TLB.push<TypedefTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameLoc);
+ } else if (isa<UnresolvedUsingType>(T)) {
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameLoc);
+ } else {
+ TLB.pushTypeSpec(T).setNameLoc(NameLoc);
+ }
+ }
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
};
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
@@ -2009,8 +2052,7 @@ static bool ShouldDiagnoseUnusedDecl(const LangOptions &LangOpts,
// consistent for both scalars and arrays.
Ty = Ty->getBaseElementTypeUnsafe();
- if (const TagType *TT = Ty->getAs<TagType>()) {
- const TagDecl *Tag = TT->getDecl();
+ if (const TagDecl *Tag = Ty->getAsTagDecl()) {
if (Tag->hasAttr<UnusedAttr>())
return false;
@@ -2070,7 +2112,7 @@ void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D,
DiagReceiverTy DiagReceiver) {
- if (D->getTypeForDecl()->isDependentType())
+ if (D->isDependentType())
return;
for (auto *TmpD : D->decls()) {
@@ -2128,8 +2170,7 @@ void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD,
if (Ty->isReferenceType() || Ty->isDependentType())
return;
- if (const TagType *TT = Ty->getAs<TagType>()) {
- const TagDecl *Tag = TT->getDecl();
+ if (const TagDecl *Tag = Ty->getAsTagDecl()) {
if (Tag->hasAttr<UnusedAttr>())
return;
// In C++, don't warn for record types that don't have WarnUnusedAttr, to
@@ -2508,7 +2549,8 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
}
Context.setObjCIdRedefinitionType(T);
// Install the built-in type for 'id', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
+ New->setModedTypeSourceInfo(New->getTypeSourceInfo(),
+ Context.getObjCIdType());
return;
}
case 5:
@@ -2516,14 +2558,16 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
break;
Context.setObjCClassRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'Class', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCClassType().getTypePtr());
+ New->setModedTypeSourceInfo(New->getTypeSourceInfo(),
+ Context.getObjCClassType());
return;
case 3:
if (!TypeID->isStr("SEL"))
break;
Context.setObjCSelRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'SEL', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCSelType().getTypePtr());
+ New->setModedTypeSourceInfo(New->getTypeSourceInfo(),
+ Context.getObjCSelType());
return;
}
// Fall through - the typedef name was not a builtin type.
@@ -2555,7 +2599,6 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
!hasVisibleDefinition(OldTag, &Hidden)) {
// There is a definition of this tag, but it is not visible. Use it
// instead of our tag.
- New->setTypeForDecl(OldTD->getTypeForDecl());
if (OldTD->isModed())
New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(),
OldTD->getUnderlyingType());
@@ -2742,7 +2785,7 @@ static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
if (ValueDecl *VD = dyn_cast<ValueDecl>(New))
Ty = VD->getType();
else
- Ty = S.Context.getTagDeclType(cast<TagDecl>(New));
+ Ty = S.Context.getCanonicalTagType(cast<TagDecl>(New));
if (OldAlign == 0)
OldAlign = S.Context.getTypeAlign(Ty);
@@ -2913,8 +2956,11 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
}
static const NamedDecl *getDefinition(const Decl *D) {
- if (const TagDecl *TD = dyn_cast<TagDecl>(D))
- return TD->getDefinition();
+ if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (const auto *Def = TD->getDefinition(); Def && !Def->isBeingDefined())
+ return Def;
+ return nullptr;
+ }
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
const VarDecl *Def = VD->getDefinition();
if (Def)
@@ -5024,7 +5070,7 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
// The type must match the tag exactly; no qualifiers allowed.
if (!Context.hasSameType(NewTD->getUnderlyingType(),
- Context.getTagDeclType(TagFromDeclSpec))) {
+ Context.getCanonicalTagType(TagFromDeclSpec))) {
if (getLangOpts().CPlusPlus)
Context.addTypedefNameForUnnamedTagDecl(TagFromDeclSpec, NewTD);
return;
@@ -5232,9 +5278,9 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
Record = dyn_cast<RecordDecl>(Tag);
else if (const RecordType *RT =
DS.getRepAsType().get()->getAsStructureType())
- Record = RT->getDecl();
+ Record = RT->getOriginalDecl()->getDefinitionOrSelf();
else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType())
- Record = UT->getDecl();
+ Record = UT->getOriginalDecl()->getDefinitionOrSelf();
if (Record && getLangOpts().MicrosoftExt) {
Diag(DS.getBeginLoc(), diag::ext_ms_anonymous_record)
@@ -5762,7 +5808,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
Anon = FieldDecl::Create(
Context, OwningClass, DS.getBeginLoc(), Record->getLocation(),
- /*IdentifierInfo=*/nullptr, Context.getTypeDeclType(Record), TInfo,
+ /*IdentifierInfo=*/nullptr, Context.getCanonicalTagType(Record), TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Anon->setAccess(AS);
@@ -5782,7 +5828,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
- Context.getTypeDeclType(Record), TInfo, SC);
+ Context.getCanonicalTagType(Record), TInfo, SC);
if (Invalid)
Anon->setInvalidDecl();
@@ -5845,7 +5891,7 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
assert(TInfo && "couldn't build declarator info for anonymous struct");
auto *ParentDecl = cast<RecordDecl>(CurContext);
- QualType RecTy = Context.getTypeDeclType(Record);
+ CanQualType RecTy = Context.getCanonicalTagType(Record);
// Create a declaration for this anonymous struct.
NamedDecl *Anon =
@@ -5964,14 +6010,14 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return DeclarationNameInfo();
// Determine the type of the class being constructed.
- QualType CurClassType = Context.getTypeDeclType(CurClass);
+ CanQualType CurClassType = Context.getCanonicalTagType(CurClass);
// FIXME: Check two things: that the template-id names the same type as
// CurClassType, and that the template-id does not occur when the name
// was qualified.
- NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(CurClassType)));
+ NameInfo.setName(
+ Context.DeclarationNames.getCXXConstructorName(CurClassType));
// FIXME: should we retrieve TypeSourceInfo?
NameInfo.setNamedTypeInfo(nullptr);
return NameInfo;
@@ -6259,8 +6305,9 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
// that's the case, then drop this declaration entirely.
if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
Name.getNameKind() == DeclarationName::CXXDestructorName) &&
- !Context.hasSameType(Name.getCXXNameType(),
- Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
+ !Context.hasSameType(
+ Name.getCXXNameType(),
+ Context.getCanonicalTagType(cast<CXXRecordDecl>(Cur))))
return true;
return false;
@@ -6280,36 +6327,48 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
<< FixItHint::CreateRemoval(TemplateId->TemplateKWLoc);
NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
- do {
- if (TypeLoc TL = SpecLoc.getTypeLoc()) {
- if (SourceLocation TemplateKeywordLoc = TL.getTemplateKeywordLoc();
- TemplateKeywordLoc.isValid())
- Diag(Loc, diag::ext_template_after_declarative_nns)
- << FixItHint::CreateRemoval(TemplateKeywordLoc);
- }
-
- if (const Type *T = SpecLoc.getNestedNameSpecifier()->getAsType()) {
- if (const auto *TST = T->getAsAdjusted<TemplateSpecializationType>()) {
- // C++23 [expr.prim.id.qual]p3:
- // [...] If a nested-name-specifier N is declarative and has a
- // simple-template-id with a template argument list A that involves a
- // template parameter, let T be the template nominated by N without A.
- // T shall be a class template.
- if (TST->isDependentType() && TST->isTypeAlias())
- Diag(Loc, diag::ext_alias_template_in_declarative_nns)
- << SpecLoc.getLocalSourceRange();
- } else if (T->isDecltypeType() || T->getAsAdjusted<PackIndexingType>()) {
- // C++23 [expr.prim.id.qual]p2:
- // [...] A declarative nested-name-specifier shall not have a
- // computed-type-specifier.
- //
- // CWG2858 changed this from 'decltype-specifier' to
- // 'computed-type-specifier'.
- Diag(Loc, diag::err_computed_type_in_declarative_nns)
- << T->isDecltypeType() << SpecLoc.getTypeLoc().getSourceRange();
- }
+ for (TypeLoc TL = SpecLoc.getAsTypeLoc(), NextTL; TL;
+ TL = std::exchange(NextTL, TypeLoc())) {
+ SourceLocation TemplateKeywordLoc;
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::TemplateSpecialization: {
+ auto TST = TL.castAs<TemplateSpecializationTypeLoc>();
+ TemplateKeywordLoc = TST.getTemplateKeywordLoc();
+ if (auto *T = TST.getTypePtr(); T->isDependentType() && T->isTypeAlias())
+ Diag(Loc, diag::ext_alias_template_in_declarative_nns)
+ << TST.getLocalSourceRange();
+ break;
+ }
+ case TypeLoc::Decltype:
+ case TypeLoc::PackIndexing: {
+ const Type *T = TL.getTypePtr();
+ // C++23 [expr.prim.id.qual]p2:
+ // [...] A declarative nested-name-specifier shall not have a
+ // computed-type-specifier.
+ //
+ // CWG2858 changed this from 'decltype-specifier' to
+ // 'computed-type-specifier'.
+ Diag(Loc, diag::err_computed_type_in_declarative_nns)
+ << T->isDecltypeType() << TL.getSourceRange();
+ break;
}
- } while ((SpecLoc = SpecLoc.getPrefix()));
+ case TypeLoc::DependentName:
+ NextTL =
+ TL.castAs<DependentNameTypeLoc>().getQualifierLoc().getAsTypeLoc();
+ break;
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto TST = TL.castAs<DependentTemplateSpecializationTypeLoc>();
+ TemplateKeywordLoc = TST.getTemplateKeywordLoc();
+ NextTL = TST.getQualifierLoc().getAsTypeLoc();
+ break;
+ }
+ default:
+ break;
+ }
+ if (TemplateKeywordLoc.isValid())
+ Diag(Loc, diag::ext_template_after_declarative_nns)
+ << FixItHint::CreateRemoval(TemplateKeywordLoc);
+ }
return false;
}
@@ -9046,9 +9105,8 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// We really want to find the base class destructor here.
- QualType T = Context.getTypeDeclType(BaseRecord);
- CanQualType CT = Context.getCanonicalType(T);
- Name = Context.DeclarationNames.getCXXDestructorName(CT);
+ Name = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalTagType(BaseRecord));
}
for (NamedDecl *BaseND : BaseRecord->lookup(Name)) {
@@ -9734,7 +9792,8 @@ static void checkIsValidOpenCLKernelParameter(
// At this point we already handled everything except of a RecordType.
assert(PT->isRecordType() && "Unexpected type.");
- const RecordDecl *PD = PT->castAs<RecordType>()->getDecl();
+ const RecordDecl *PD =
+ PT->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
VisitStack.push_back(PD);
assert(VisitStack.back() && "First decl null?");
@@ -9762,7 +9821,9 @@ static void checkIsValidOpenCLKernelParameter(
"Unexpected type.");
const Type *FieldRecTy = FieldTy->getPointeeOrArrayElementType();
- RD = FieldRecTy->castAs<RecordType>()->getDecl();
+ RD = FieldRecTy->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
} else {
RD = cast<RecordDecl>(Next);
}
@@ -10768,7 +10829,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// redeclaration lookup found nothing at all. Diagnose that now;
// nothing will diagnose that error later.
if (isFriend &&
- (D.getCXXScopeSpec().getScopeRep()->isDependent() ||
+ (D.getCXXScopeSpec().getScopeRep().isDependent() ||
(!Previous.empty() && CurContext->isDependentContext()))) {
// ignore these
} else if (NewFD->isCPUDispatchMultiVersion() ||
@@ -12272,11 +12333,11 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// template struct A<B>;
if (NewFD->getFriendObjectKind() == Decl::FriendObjectKind::FOK_None ||
!Destructor->getFunctionObjectParameterType()->isDependentType()) {
- CXXRecordDecl *Record = Destructor->getParent();
- QualType ClassType = Context.getTypeDeclType(Record);
+ CanQualType ClassType =
+ Context.getCanonicalTagType(Destructor->getParent());
- DeclarationName Name = Context.DeclarationNames.getCXXDestructorName(
- Context.getCanonicalType(ClassType));
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXDestructorName(ClassType);
if (NewFD->getDeclName() != Name) {
Diag(NewFD->getLocation(), diag::err_destructor_name);
NewFD->setInvalidDecl();
@@ -13280,7 +13341,8 @@ struct DiagNonTrivalCUnionDefaultInitializeVisitor
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ QT->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
@@ -13346,7 +13408,8 @@ struct DiagNonTrivalCUnionDestructedTypeVisitor
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ QT->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
@@ -13411,7 +13474,8 @@ struct DiagNonTrivalCUnionCopyVisitor
}
void visitStruct(QualType QT, const FieldDecl *FD, bool InNonTrivialUnion) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ QT->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion()) {
if (OrigLoc.isValid()) {
bool IsUnion = false;
@@ -14384,7 +14448,9 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
if (const RecordType *Record
= Context.getBaseElementType(Type)->getAs<RecordType>()) {
- CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
+ CXXRecordDecl *CXXRecord =
+ cast<CXXRecordDecl>(Record->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Mark the function (if we're in one) for further checking even if the
// looser rules of C++11 do not require such checks, so that we can
// diagnose incompatibilities with C++98.
@@ -17483,6 +17549,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// FIXME: Check member specializations more carefully.
bool isMemberSpecialization = false;
+ bool IsInjectedClassName = false;
bool Invalid = false;
// We only need to do this matching if we have template parameters
@@ -17948,8 +18015,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
if (getLangOpts().CPlusPlus) {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
- if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
- TagDecl *Tag = TT->getDecl();
+ if (TagDecl *Tag = TD->getUnderlyingType()->getAsTagDecl()) {
if (Tag->getDeclName() == Name &&
Tag->getDeclContext()->getRedeclContext()
->Equals(TD->getDeclContext()->getRedeclContext())) {
@@ -17959,6 +18025,15 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
Previous.resolveKind();
}
}
+ } else if (auto *RD = dyn_cast<CXXRecordDecl>(PrevDecl);
+ RD && RD->isInjectedClassName()) {
+ // If lookup found the injected class name, the previous declaration is
+ // the class being injected into.
+ PrevDecl = cast<TagDecl>(RD->getDeclContext());
+ Previous.clear();
+ Previous.addDecl(PrevDecl);
+ Previous.resolveKind();
+ IsInjectedClassName = true;
}
}
@@ -18082,78 +18157,79 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Diagnose attempts to redefine a tag.
if (TUK == TagUseKind::Definition) {
- if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
- // If we're defining a specialization and the previous definition
- // is from an implicit instantiation, don't emit an error
- // here; we'll catch this in the general case below.
- bool IsExplicitSpecializationAfterInstantiation = false;
- if (isMemberSpecialization) {
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
- IsExplicitSpecializationAfterInstantiation =
- RD->getTemplateSpecializationKind() !=
- TSK_ExplicitSpecialization;
- else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
- IsExplicitSpecializationAfterInstantiation =
- ED->getTemplateSpecializationKind() !=
- TSK_ExplicitSpecialization;
- }
-
- // Note that clang allows ODR-like semantics for ObjC/C, i.e., do
- // not keep more that one definition around (merge them). However,
- // ensure the decl passes the structural compatibility check in
- // C11 6.2.7/1 (or 6.1.2.6/1 in C89).
- NamedDecl *Hidden = nullptr;
- if (SkipBody &&
- (!hasVisibleDefinition(Def, &Hidden) || getLangOpts().C23)) {
- // There is a definition of this tag, but it is not visible. We
- // explicitly make use of C++'s one definition rule here, and
- // assume that this definition is identical to the hidden one
- // we already have. Make the existing definition visible and
- // use it in place of this one.
- if (!getLangOpts().CPlusPlus) {
- // Postpone making the old definition visible until after we
- // complete parsing the new one and do the structural
- // comparison.
- SkipBody->CheckSameAsPrevious = true;
- SkipBody->New = createTagFromNewDecl();
- SkipBody->Previous = Def;
-
- ProcessDeclAttributeList(S, SkipBody->New, Attrs);
- return Def;
- } else {
- SkipBody->ShouldSkip = true;
- SkipBody->Previous = Def;
- makeMergedDefinitionVisible(Hidden);
- // Carry on and handle it like a normal definition. We'll
- // skip starting the definition later.
- }
- } else if (!IsExplicitSpecializationAfterInstantiation) {
- // A redeclaration in function prototype scope in C isn't
- // visible elsewhere, so merely issue a warning.
- if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
- Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
- else
- Diag(NameLoc, diag::err_redefinition) << Name;
- notePreviousDefinition(Def,
- NameLoc.isValid() ? NameLoc : KWLoc);
- // If this is a redefinition, recover by making this
- // struct be anonymous, which will make any later
- // references get the previous definition.
- Name = nullptr;
- Previous.clear();
- Invalid = true;
- }
- } else {
+ if (TagDecl *Def = PrevTagDecl->getDefinition()) {
// If the type is currently being defined, complain
// about a nested redefinition.
- auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl();
- if (TD->isBeingDefined()) {
+ if (Def->isBeingDefined()) {
Diag(NameLoc, diag::err_nested_redefinition) << Name;
Diag(PrevTagDecl->getLocation(),
diag::note_previous_definition);
Name = nullptr;
Previous.clear();
Invalid = true;
+ } else {
+ // If we're defining a specialization and the previous
+ // definition is from an implicit instantiation, don't emit an
+ // error here; we'll catch this in the general case below.
+ bool IsExplicitSpecializationAfterInstantiation = false;
+ if (isMemberSpecialization) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ RD->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ ED->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ }
+
+ // Note that clang allows ODR-like semantics for ObjC/C, i.e.,
+ // do not keep more that one definition around (merge them).
+ // However, ensure the decl passes the structural compatibility
+ // check in C11 6.2.7/1 (or 6.1.2.6/1 in C89).
+ NamedDecl *Hidden = nullptr;
+ if (SkipBody && (!hasVisibleDefinition(Def, &Hidden) ||
+ getLangOpts().C23)) {
+ // There is a definition of this tag, but it is not visible.
+ // We explicitly make use of C++'s one definition rule here,
+ // and assume that this definition is identical to the hidden
+ // one we already have. Make the existing definition visible
+ // and use it in place of this one.
+ if (!getLangOpts().CPlusPlus) {
+ // Postpone making the old definition visible until after we
+ // complete parsing the new one and do the structural
+ // comparison.
+ SkipBody->CheckSameAsPrevious = true;
+ SkipBody->New = createTagFromNewDecl();
+ SkipBody->Previous = Def;
+
+ ProcessDeclAttributeList(S, SkipBody->New, Attrs);
+ return Def;
+ } else {
+ SkipBody->ShouldSkip = true;
+ SkipBody->Previous = Def;
+ makeMergedDefinitionVisible(Hidden);
+ // Carry on and handle it like a normal definition. We'll
+ // skip starting the definition later.
+ }
+ } else if (!IsExplicitSpecializationAfterInstantiation) {
+ // A redeclaration in function prototype scope in C isn't
+ // visible elsewhere, so merely issue a warning.
+ if (!getLangOpts().CPlusPlus &&
+ S->containedInPrototypeScope())
+ Diag(NameLoc, diag::warn_redefinition_in_param_list)
+ << Name;
+ else
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ notePreviousDefinition(Def,
+ NameLoc.isValid() ? NameLoc : KWLoc);
+ // If this is a redefinition, recover by making this
+ // struct be anonymous, which will make any later
+ // references get the previous definition.
+ Name = nullptr;
+ Previous.clear();
+ Invalid = true;
+ }
}
}
@@ -18324,14 +18400,14 @@ CreateNewDecl:
(IsTypeSpecifier || IsTemplateParamOrArg) &&
TUK == TagUseKind::Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
- << Context.getTagDeclType(New);
+ << Context.getCanonicalTagType(New);
Invalid = true;
}
if (!Invalid && getLangOpts().CPlusPlus && TUK == TagUseKind::Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
- << Context.getTagDeclType(New);
+ << Context.getCanonicalTagType(New);
Invalid = true;
}
@@ -18412,7 +18488,8 @@ CreateNewDecl:
// In C23 mode, if the declaration is complete, we do not want to
// diagnose.
if (!getLangOpts().C23 || TUK != TagUseKind::Definition)
- Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
+ Diag(Loc, diag::warn_decl_in_param_list)
+ << Context.getCanonicalTagType(New);
}
}
@@ -18444,7 +18521,7 @@ CreateNewDecl:
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
- if (TUK == TagUseKind::Friend) {
+ if (TUK == TagUseKind::Friend || IsInjectedClassName) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
@@ -18564,14 +18641,12 @@ void Sema::ActOnStartCXXMemberDeclarations(
// as if it were a public member name.
CXXRecordDecl *InjectedClassName = CXXRecordDecl::Create(
Context, Record->getTagKind(), CurContext, Record->getBeginLoc(),
- Record->getLocation(), Record->getIdentifier(),
- /*PrevDecl=*/nullptr,
- /*DelayTypeCreation=*/true);
- Context.getTypeDeclType(InjectedClassName, Record);
+ Record->getLocation(), Record->getIdentifier());
InjectedClassName->setImplicit();
InjectedClassName->setAccess(AS_public);
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
InjectedClassName->setDescribedClassTemplate(Template);
+
PushOnScopeChains(InjectedClassName, S);
assert(InjectedClassName->isInjectedClassName() &&
"Broken injected-class-name");
@@ -19001,7 +19076,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
if (!InvalidDecl && getLangOpts().CPlusPlus) {
if (Record->isUnion()) {
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
- CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getOriginalDecl());
if (RDecl->getDefinition()) {
// C++ [class.union]p1: An object of a class with a non-trivial
// constructor, a non-trivial copy constructor, a non-trivial
@@ -19067,7 +19142,8 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
QualType EltTy = Context.getBaseElementType(FD->getType());
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
- CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ CXXRecordDecl *RDecl =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (RDecl->getDefinition()) {
// We check for copy constructors before constructors
// because otherwise we'll never get complaints about
@@ -19390,7 +19466,7 @@ bool Sema::EntirelyFunctionPointers(const RecordDecl *Record) {
}
// If a member is a struct entirely of function pointers, that counts too.
if (const RecordType *RT = FieldType->getAs<RecordType>()) {
- const RecordDecl *Record = RT->getDecl();
+ const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
if (Record->isStruct() && EntirelyFunctionPointers(Record))
return true;
}
@@ -19548,7 +19624,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
EnclosingDecl->setInvalidDecl();
continue;
} else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
- if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) {
+ if (Record && FDTTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember()) {
// A type which contains a flexible array member is considered to be a
// flexible array member.
Record->setHasFlexibleArrayMember(true);
@@ -19574,9 +19652,10 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Ivars can not have abstract class types
FD->setInvalidDecl();
}
- if (Record && FDTTy->getDecl()->hasObjectMember())
+ const RecordDecl *RD = FDTTy->getOriginalDecl()->getDefinitionOrSelf();
+ if (Record && RD->hasObjectMember())
Record->setHasObjectMember(true);
- if (Record && FDTTy->getDecl()->hasVolatileMember())
+ if (Record && RD->hasVolatileMember())
Record->setHasVolatileMember(true);
} else if (FDTy->isObjCObjectType()) {
/// A field cannot be an Objective-c object
@@ -19607,8 +19686,10 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Record->setHasObjectMember(true);
else if (Context.getAsArrayType(FD->getType())) {
QualType BaseType = Context.getBaseElementType(FD->getType());
- if (BaseType->isRecordType() &&
- BaseType->castAs<RecordType>()->getDecl()->hasObjectMember())
+ if (BaseType->isRecordType() && BaseType->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasObjectMember())
Record->setHasObjectMember(true);
else if (BaseType->isObjCObjectPointerType() ||
BaseType.isObjCGCStrong())
@@ -19641,7 +19722,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
}
if (const auto *RT = FT->getAs<RecordType>()) {
- if (RT->getDecl()->getArgPassingRestrictions() ==
+ if (RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getArgPassingRestrictions() ==
RecordArgPassingKind::CanNeverPassInRegs)
Record->setArgPassingRestrictions(
RecordArgPassingKind::CanNeverPassInRegs);
@@ -20407,7 +20490,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDeclX, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attrs) {
EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
- QualType EnumType = Context.getTypeDeclType(Enum);
+ CanQualType EnumType = Context.getCanonicalTagType(Enum);
ProcessDeclAttributeList(S, Enum, Attrs);
ProcessAPINotes(Enum);
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index f5f18b0..7a18510 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -177,7 +177,7 @@ static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
return !Result.empty();
};
- const RecordDecl *Record = RT->getDecl();
+ const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
bool foundStarOperator = IsOverloadedOperatorPresent(Record, OO_Star);
bool foundArrowOperator = IsOverloadedOperatorPresent(Record, OO_Arrow);
if (foundStarOperator && foundArrowOperator)
@@ -271,7 +271,8 @@ static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
if (threadSafetyCheckIsSmartPointer(S, RT))
return true;
- return checkRecordDeclForAttr<CapabilityAttr>(RT->getDecl());
+ return checkRecordDeclForAttr<CapabilityAttr>(
+ RT->getOriginalDecl()->getDefinitionOrSelf());
}
static bool checkRecordTypeForScopedCapability(Sema &S, QualType Ty) {
@@ -284,7 +285,8 @@ static bool checkRecordTypeForScopedCapability(Sema &S, QualType Ty) {
if (RT->isIncompleteType())
return true;
- return checkRecordDeclForAttr<ScopedLockableAttr>(RT->getDecl());
+ return checkRecordDeclForAttr<ScopedLockableAttr>(
+ RT->getOriginalDecl()->getDefinitionOrSelf());
}
static bool checkTypedefTypeForCapability(QualType Ty) {
@@ -1254,8 +1256,8 @@ bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
// The nonnull attribute, and other similar attributes, can be applied to a
// transparent union that contains a pointer type.
if (const RecordType *UT = T->getAsUnionType()) {
- if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
- RecordDecl *UD = UT->getDecl();
+ RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
for (const auto *I : UD->fields()) {
QualType QT = I->getType();
if (QT->isAnyPointerType() || QT->isBlockPointerType())
@@ -3560,7 +3562,9 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
Ty = getFunctionOrMethodResultType(D);
// replace instancetype with the class type
- auto Instancetype = S.Context.getObjCInstanceTypeDecl()->getTypeForDecl();
+ auto *Instancetype = cast<TypedefType>(S.Context.getTypedefType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ S.Context.getObjCInstanceTypeDecl()));
if (Ty->getAs<TypedefType>() == Instancetype)
if (auto *OMD = dyn_cast<ObjCMethodDecl>(D))
if (auto *Interface = OMD->getClassInterface())
@@ -4154,7 +4158,10 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
RecordDecl *RD = nullptr;
const auto *TD = dyn_cast<TypedefNameDecl>(D);
if (TD && TD->getUnderlyingType()->isUnionType())
- RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
+ RD = TD->getUnderlyingType()
+ ->getAsUnionType()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
else
RD = dyn_cast<RecordDecl>(D);
@@ -4507,7 +4514,7 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
if (const auto *VD = dyn_cast<ValueDecl>(D)) {
UnderlyingTy = DiagTy = VD->getType();
} else {
- UnderlyingTy = DiagTy = Context.getTagDeclType(cast<TagDecl>(D));
+ UnderlyingTy = DiagTy = Context.getCanonicalTagType(cast<TagDecl>(D));
if (const auto *ED = dyn_cast<EnumDecl>(D))
UnderlyingTy = ED->getIntegerType();
}
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index f5b4614..0477d37 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -1101,15 +1102,13 @@ static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
return std::string(OS.str());
}
-static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
- SourceLocation Loc, StringRef Trait,
- TemplateArgumentListInfo &Args,
- unsigned DiagID) {
+static QualType getStdTrait(Sema &S, SourceLocation Loc, StringRef Trait,
+ TemplateArgumentListInfo &Args, unsigned DiagID) {
auto DiagnoseMissing = [&] {
if (DiagID)
S.Diag(Loc, DiagID) << printTemplateArgs(S.Context.getPrintingPolicy(),
Args, /*Params*/ nullptr);
- return true;
+ return QualType();
};
// FIXME: Factor out duplication with lookupPromiseType in SemaCoroutine.
@@ -1122,12 +1121,12 @@ static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
// missing specialization, because this can only fail if the user has been
// declaring their own names in namespace std or we don't support the
// standard library implementation in use.
- LookupResult Result(S, &S.PP.getIdentifierTable().get(Trait),
- Loc, Sema::LookupOrdinaryName);
+ LookupResult Result(S, &S.PP.getIdentifierTable().get(Trait), Loc,
+ Sema::LookupOrdinaryName);
if (!S.LookupQualifiedName(Result, Std))
return DiagnoseMissing();
if (Result.isAmbiguous())
- return true;
+ return QualType();
ClassTemplateDecl *TraitTD = Result.getAsSingle<ClassTemplateDecl>();
if (!TraitTD) {
@@ -1135,28 +1134,31 @@ static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
NamedDecl *Found = *Result.begin();
S.Diag(Loc, diag::err_std_type_trait_not_class_template) << Trait;
S.Diag(Found->getLocation(), diag::note_declared_at);
- return true;
+ return QualType();
}
// Build the template-id.
- QualType TraitTy = S.CheckTemplateIdType(TemplateName(TraitTD), Loc, Args);
+ QualType TraitTy = S.CheckTemplateIdType(ElaboratedTypeKeyword::None,
+ TemplateName(TraitTD), Loc, Args);
if (TraitTy.isNull())
- return true;
+ return QualType();
+
if (!S.isCompleteType(Loc, TraitTy)) {
if (DiagID)
S.RequireCompleteType(
Loc, TraitTy, DiagID,
printTemplateArgs(S.Context.getPrintingPolicy(), Args,
TraitTD->getTemplateParameters()));
- return true;
+ return QualType();
}
+ return TraitTy;
+}
- CXXRecordDecl *RD = TraitTy->getAsCXXRecordDecl();
+static bool lookupMember(Sema &S, CXXRecordDecl *RD,
+ LookupResult &MemberLookup) {
assert(RD && "specialization of class template is not a class?");
-
- // Look up the member of the trait type.
- S.LookupQualifiedName(TraitMemberLookup, RD);
- return TraitMemberLookup.isAmbiguous();
+ S.LookupQualifiedName(MemberLookup, RD);
+ return MemberLookup.isAmbiguous();
}
static TemplateArgumentLoc
@@ -1178,17 +1180,20 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
EnterExpressionEvaluationContext ContextRAII(
S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- DeclarationName Value = S.PP.getIdentifierInfo("value");
- LookupResult R(S, Value, Loc, Sema::LookupOrdinaryName);
-
// Form template argument list for tuple_size<T>.
TemplateArgumentListInfo Args(Loc, Loc);
Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
+ QualType TraitTy = getStdTrait(S, Loc, "tuple_size", Args, /*DiagID=*/0);
+ if (TraitTy.isNull())
+ return IsTupleLike::NotTupleLike;
+
+ DeclarationName Value = S.PP.getIdentifierInfo("value");
+ LookupResult R(S, Value, Loc, Sema::LookupOrdinaryName);
+
// If there's no tuple_size specialization or the lookup of 'value' is empty,
// it's not tuple-like.
- if (lookupStdTypeTraitMember(S, R, Loc, "tuple_size", Args, /*DiagID*/ 0) ||
- R.empty())
+ if (lookupMember(S, TraitTy->getAsCXXRecordDecl(), R) || R.empty())
return IsTupleLike::NotTupleLike;
// If we get this far, we've committed to the tuple interpretation, but
@@ -1228,11 +1233,15 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
getTrivialIntegralTemplateArgument(S, Loc, S.Context.getSizeType(), I));
Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
+ QualType TraitTy =
+ getStdTrait(S, Loc, "tuple_element", Args,
+ diag::err_decomp_decl_std_tuple_element_not_specialized);
+ if (TraitTy.isNull())
+ return QualType();
+
DeclarationName TypeDN = S.PP.getIdentifierInfo("type");
LookupResult R(S, TypeDN, Loc, Sema::LookupOrdinaryName);
- if (lookupStdTypeTraitMember(
- S, R, Loc, "tuple_element", Args,
- diag::err_decomp_decl_std_tuple_element_not_specialized))
+ if (lookupMember(S, TraitTy->getAsCXXRecordDecl(), R))
return QualType();
auto *TD = R.getAsSingle<TypeDecl>();
@@ -1246,7 +1255,8 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
return QualType();
}
- return S.Context.getTypeDeclType(TD);
+ NestedNameSpecifier Qualifier(TraitTy.getTypePtr());
+ return S.Context.getTypeDeclType(ElaboratedTypeKeyword::None, Qualifier, TD);
}
namespace {
@@ -1452,7 +1462,7 @@ static DeclAccessPair findDecomposableBaseClass(Sema &S, SourceLocation Loc,
}
// ... [accessible, implied by other rules] base class of E.
- S.CheckBaseClassAccess(Loc, BaseType, S.Context.getRecordType(RD),
+ S.CheckBaseClassAccess(Loc, BaseType, S.Context.getCanonicalTagType(RD),
*BestPath, diag::err_decomp_decl_inaccessible_base);
AS = BestPath->Access;
@@ -1528,8 +1538,8 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
const auto *RD = cast_or_null<CXXRecordDecl>(BasePair.getDecl());
if (!RD)
return true;
- QualType BaseType = S.Context.getQualifiedType(S.Context.getRecordType(RD),
- DecompType.getQualifiers());
+ QualType BaseType = S.Context.getQualifiedType(
+ S.Context.getCanonicalTagType(RD), DecompType.getQualifiers());
auto *DD = cast<DecompositionDecl>(Src);
unsigned NumFields = llvm::count_if(
@@ -2174,7 +2184,10 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
return false;
}
} else if (Field->isAnonymousStructOrUnion()) {
- const RecordDecl *RD = Field->getType()->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD = Field->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
for (auto *I : RD->fields())
// If an anonymous union contains an anonymous struct of which any member
// is initialized, all members must be initialized.
@@ -2666,7 +2679,9 @@ void Sema::DiagnoseImmediateEscalatingReason(FunctionDecl *FD) {
return DynamicRecursiveASTVisitor::TraverseCXXConstructorDecl(Ctr);
}
- bool TraverseType(QualType T) override { return true; }
+ bool TraverseType(QualType T, bool TraverseQualifier) override {
+ return true;
+ }
bool VisitBlockExpr(BlockExpr *T) override { return true; }
} Visitor(*this, FD);
@@ -2969,7 +2984,8 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType);
if (const RecordType *Record = NewBaseType->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Class->isInterface() &&
(!RD->isInterfaceLike() ||
KnownBase->getAccessSpecifier() != AS_public)) {
@@ -3050,7 +3066,7 @@ bool Sema::IsDerivedFrom(SourceLocation Loc, CXXRecordDecl *Derived,
// FIXME: In a modules build, do we need the entire path to be visible for us
// to be able to use the inheritance relationship?
- if (!isCompleteType(Loc, Context.getTypeDeclType(Derived)) &&
+ if (!isCompleteType(Loc, Context.getCanonicalTagType(Derived)) &&
!Derived->isBeingDefined())
return false;
@@ -3205,7 +3221,8 @@ std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
// We haven't displayed a path to this particular base
// class subobject yet.
PathDisplayStr += "\n ";
- PathDisplayStr += Context.getTypeDeclType(Paths.getOrigin()).getAsString();
+ PathDisplayStr += QualType(Context.getCanonicalTagType(Paths.getOrigin()))
+ .getAsString();
for (CXXBasePath::const_iterator Element = Path->begin();
Element != Path->end(); ++Element)
PathDisplayStr += " -> " + Element->Base->getType().getAsString();
@@ -4245,7 +4262,7 @@ static bool FindBaseInitializer(Sema &SemaRef,
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/false);
if (SemaRef.IsDerivedFrom(ClassDecl->getLocation(),
- SemaRef.Context.getTypeDeclType(ClassDecl),
+ SemaRef.Context.getCanonicalTagType(ClassDecl),
BaseType, Paths)) {
for (CXXBasePaths::paths_iterator Path = Paths.begin();
Path != Paths.end(); ++Path) {
@@ -4481,7 +4498,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (getLangOpts().MSVCCompat && !getLangOpts().CPlusPlus20) {
if (auto UnqualifiedBase = R.getAsSingle<ClassTemplateDecl>()) {
auto *TempSpec = cast<TemplateSpecializationType>(
- UnqualifiedBase->getInjectedClassNameSpecialization());
+ UnqualifiedBase->getCanonicalInjectedSpecializationType(Context));
TemplateName TN = TempSpec->getTemplateName();
for (auto const &Base : ClassDecl->bases()) {
auto BaseTemplate =
@@ -4545,14 +4562,29 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
}
if (BaseType.isNull()) {
- BaseType = getElaboratedType(ElaboratedTypeKeyword::None, SS,
- Context.getTypeDeclType(TyD));
MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false);
- TInfo = Context.CreateTypeSourceInfo(BaseType);
- ElaboratedTypeLoc TL = TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>();
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
- TL.setElaboratedKeywordLoc(SourceLocation());
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ TypeLocBuilder TLB;
+ if (const auto *TD = dyn_cast<TagDecl>(TyD)) {
+ BaseType = Context.getTagType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TD, /*OwnsTag=*/false);
+ auto TL = TLB.push<TagTypeLoc>(BaseType);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(IdLoc);
+ } else if (auto *TN = dyn_cast<TypedefNameDecl>(TyD)) {
+ BaseType = Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TN);
+ TLB.push<TypedefTypeLoc>(BaseType).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), IdLoc);
+ } else {
+ // FIXME: What else can appear here?
+ assert(SS.isEmpty());
+ BaseType = Context.getTypeDeclType(TyD);
+ TLB.pushTypeSpec(BaseType).setNameLoc(IdLoc);
+ }
+ TInfo = TLB.getTypeSourceInfo(Context, BaseType);
}
}
@@ -4661,10 +4693,12 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs());
}
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
+
SourceRange InitRange = Init->getSourceRange();
// Initialize the object.
- InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation(
- QualType(ClassDecl->getTypeForDecl(), 0));
+ InitializedEntity DelegationEntity =
+ InitializedEntity::InitializeDelegation(ClassType);
InitializationKind Kind =
InitList ? InitializationKind::CreateDirectList(
NameLoc, Init->getBeginLoc(), Init->getEndLoc())
@@ -4686,9 +4720,8 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
}
if (DelegationInit.isInvalid()) {
- DelegationInit =
- CreateRecoveryExpr(InitRange.getBegin(), InitRange.getEnd(), Args,
- QualType(ClassDecl->getTypeForDecl(), 0));
+ DelegationInit = CreateRecoveryExpr(InitRange.getBegin(),
+ InitRange.getEnd(), Args, ClassType);
if (DelegationInit.isInvalid())
return true;
} else {
@@ -4753,8 +4786,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
const CXXBaseSpecifier *DirectBaseSpec = nullptr;
const CXXBaseSpecifier *VirtualBaseSpec = nullptr;
if (!Dependent) {
- if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0),
- BaseType))
+ if (declaresSameEntity(ClassDecl, BaseType->getAsCXXRecordDecl()))
return BuildDelegatingInitializer(BaseTInfo, Init, ClassDecl);
FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
@@ -4774,7 +4806,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
Dependent = true;
else
return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
- << BaseType << Context.getTypeDeclType(ClassDecl)
+ << BaseType << Context.getCanonicalTagType(ClassDecl)
<< BaseTInfo->getTypeLoc().getSourceRange();
}
}
@@ -5078,9 +5110,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (FieldBaseElementType->isReferenceType()) {
SemaRef.Diag(Constructor->getLocation(),
diag::err_uninitialized_member_in_ctor)
- << (int)Constructor->isImplicit()
- << SemaRef.Context.getTagDeclType(Constructor->getParent())
- << 0 << Field->getDeclName();
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getCanonicalTagType(Constructor->getParent()) << 0
+ << Field->getDeclName();
SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
return true;
}
@@ -5088,9 +5120,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (FieldBaseElementType.isConstQualified()) {
SemaRef.Diag(Constructor->getLocation(),
diag::err_uninitialized_member_in_ctor)
- << (int)Constructor->isImplicit()
- << SemaRef.Context.getTagDeclType(Constructor->getParent())
- << 1 << Field->getDeclName();
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getCanonicalTagType(Constructor->getParent()) << 1
+ << Field->getDeclName();
SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
return true;
}
@@ -5391,7 +5423,7 @@ static void MarkBaseDestructorsReferenced(Sema &S, SourceLocation Location,
S.CheckDestructorAccess(Base.getBeginLoc(), Dtor,
S.PDiag(diag::err_access_dtor_base)
<< Base.getType() << Base.getSourceRange(),
- S.Context.getTypeDeclType(ClassDecl));
+ S.Context.getCanonicalTagType(ClassDecl));
S.MarkFunctionReferenced(Location, Dtor);
S.DiagnoseUseOfDecl(Dtor, Location);
@@ -5594,9 +5626,9 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
static void PopulateKeysForFields(FieldDecl *Field, SmallVectorImpl<const void*> &IdealInits) {
if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl();
if (RD->isAnonymousStructOrUnion()) {
- for (auto *Field : RD->fields())
+ for (auto *Field : RD->getDefinitionOrSelf()->fields())
PopulateKeysForFields(Field, IdealInits);
return;
}
@@ -5928,16 +5960,14 @@ void Sema::MarkVirtualBaseDestructorsReferenced(
if (!Dtor)
continue;
- if (CheckDestructorAccess(
- ClassDecl->getLocation(), Dtor,
- PDiag(diag::err_access_dtor_vbase)
- << Context.getTypeDeclType(ClassDecl) << VBase.getType(),
- Context.getTypeDeclType(ClassDecl)) ==
- AR_accessible) {
+ CanQualType CT = Context.getCanonicalTagType(ClassDecl);
+ if (CheckDestructorAccess(ClassDecl->getLocation(), Dtor,
+ PDiag(diag::err_access_dtor_vbase)
+ << CT << VBase.getType(),
+ CT) == AR_accessible) {
CheckDerivedToBaseConversion(
- Context.getTypeDeclType(ClassDecl), VBase.getType(),
- diag::err_access_dtor_vbase, 0, ClassDecl->getLocation(),
- SourceRange(), DeclarationName(), nullptr);
+ CT, VBase.getType(), diag::err_access_dtor_vbase, 0,
+ ClassDecl->getLocation(), SourceRange(), DeclarationName(), nullptr);
}
MarkFunctionReferenced(Location, Dtor);
@@ -6052,10 +6082,8 @@ struct AbstractUsageInfo {
bool Invalid;
AbstractUsageInfo(Sema &S, CXXRecordDecl *Record)
- : S(S), Record(Record),
- AbstractType(S.Context.getCanonicalType(
- S.Context.getTypeDeclType(Record))),
- Invalid(false) {}
+ : S(S), Record(Record),
+ AbstractType(S.Context.getCanonicalTagType(Record)), Invalid(false) {}
void DiagnoseAbstractType() {
if (Invalid) return;
@@ -6868,8 +6896,8 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
bool isAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
uint64_t TypeSize = isAArch64 ? 128 : 64;
- if (CopyCtorIsTrivial &&
- S.getASTContext().getTypeSize(D->getTypeForDecl()) <= TypeSize)
+ if (CopyCtorIsTrivial && S.getASTContext().getTypeSize(
+ S.Context.getCanonicalTagType(D)) <= TypeSize)
return true;
return false;
}
@@ -7000,7 +7028,8 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
if ((!dtor || (!dtor->isVirtual() && dtor->getAccess() == AS_public)) &&
!Record->hasAttr<FinalAttr>())
Diag(dtor ? dtor->getLocation() : Record->getLocation(),
- diag::warn_non_virtual_dtor) << Context.getRecordType(Record);
+ diag::warn_non_virtual_dtor)
+ << Context.getCanonicalTagType(Record);
}
if (Record->isAbstract()) {
@@ -7022,7 +7051,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
(FA->isSpelledAsSealed() ? " sealed" : " final"));
Diag(Record->getLocation(),
diag::note_final_dtor_non_final_class_silence)
- << Context.getRecordType(Record) << FA->isSpelledAsSealed();
+ << Context.getCanonicalTagType(Record) << FA->isSpelledAsSealed();
}
}
}
@@ -7177,7 +7206,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
return true;
};
EffectivelyConstexprDestructor =
- Check(QualType(Record->getTypeForDecl(), 0), Check);
+ Check(Context.getCanonicalTagType(Record), Check);
}
// Define defaulted constexpr virtual functions that override a base class
@@ -7333,7 +7362,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
NewDecls.empty() ? NewKind : DeleteKind);
Diag(Record->getLocation(),
diag::err_type_aware_allocator_missing_matching_operator)
- << FoundOperator << Context.getRecordType(Record)
+ << FoundOperator << Context.getCanonicalTagType(Record)
<< MissingOperator;
for (auto MD : NewDecls)
Diag(MD->getLocation(),
@@ -7572,7 +7601,9 @@ static bool defaultedSpecialMemberIsConstexpr(
const RecordType *BaseType = B.getType()->getAs<RecordType>();
if (!BaseType)
continue;
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg,
InheritedCtor, Inherited))
return false;
@@ -7595,7 +7626,9 @@ static bool defaultedSpecialMemberIsConstexpr(
continue;
QualType BaseType = S.Context.getBaseElementType(F->getType());
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
- CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXRecordDecl *FieldRecDecl =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM,
BaseType.getCVRQualifiers(),
ConstArg && !F->isMutable()))
@@ -7782,9 +7815,9 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
ReturnType = Type->getReturnType();
QualType ThisType = MD->getFunctionObjectParameterType();
- QualType DeclType = Context.getTypeDeclType(RD);
- DeclType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- DeclType, nullptr);
+ QualType DeclType =
+ Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, RD, /*OwnsTag=*/false);
DeclType = Context.getAddrSpaceQualType(
DeclType, ThisType.getQualifiers().getAddressSpace());
QualType ExpectedReturnType = Context.getLValueReferenceType(DeclType);
@@ -7819,7 +7852,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
if (!ExplicitObjectParameter.isNull() &&
(!ExplicitObjectParameter->isReferenceType() ||
!Context.hasSameType(ExplicitObjectParameter.getNonReferenceType(),
- Context.getRecordType(RD)))) {
+ Context.getCanonicalTagType(RD)))) {
if (DeleteOnTypeMismatch)
ShouldDeleteForTypeMismatch = true;
else {
@@ -8278,7 +8311,7 @@ private:
Best->FoundDecl.getDecl()->isCXXClassMember()) {
QualType ObjectType = Subobj.Kind == Subobject::Member
? Args[0]->getType()
- : S.Context.getRecordType(RD);
+ : S.Context.getCanonicalTagType(RD);
if (!S.isMemberAccessibleForDeletion(
ArgClass, Best->FoundDecl, ObjectType, Subobj.Loc,
Diagnose == ExplainDeleted
@@ -8979,7 +9012,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
bool Ok = !IsMethod || FD->hasCXXExplicitFunctionObjectParameter();
QualType ExpectedTy;
if (RD)
- ExpectedTy = Context.getRecordType(RD);
+ ExpectedTy = Context.getCanonicalTagType(RD);
if (auto *Ref = CTy->getAs<LValueReferenceType>()) {
CTy = Ref->getPointeeType();
if (RD)
@@ -9002,7 +9035,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// corresponding defaulted 'operator<=>' already.
if (!FD->isImplicit()) {
if (RD) {
- QualType PlainTy = Context.getRecordType(RD);
+ CanQualType PlainTy = Context.getCanonicalTagType(RD);
QualType RefTy =
Context.getLValueReferenceType(PlainTy.withConst());
Diag(FD->getLocation(), diag::err_defaulted_comparison_param)
@@ -9033,7 +9066,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
} else {
// Out of class, require the defaulted comparison to be a friend (of a
// complete type, per CWG2547).
- if (RequireCompleteType(FD->getLocation(), Context.getRecordType(RD),
+ if (RequireCompleteType(FD->getLocation(), Context.getCanonicalTagType(RD),
diag::err_defaulted_comparison_not_friend, int(DCK),
int(1)))
return true;
@@ -9486,15 +9519,15 @@ bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj,
CXXMethodDecl *target) {
/// If we're operating on a base class, the object type is the
/// type of this special member.
- QualType objectTy;
+ CanQualType objectTy;
AccessSpecifier access = target->getAccess();
if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) {
- objectTy = S.Context.getTypeDeclType(MD->getParent());
+ objectTy = S.Context.getCanonicalTagType(MD->getParent());
access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access);
// If we're operating on a field, the object type is the type of the field.
} else {
- objectTy = S.Context.getTypeDeclType(target->getParent());
+ objectTy = S.Context.getCanonicalTagType(target->getParent());
}
return S.isMemberAccessibleForDeletion(
@@ -9912,7 +9945,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD,
// results in an ambiguity or in a function that is deleted or inaccessible
if (CSM == CXXSpecialMemberKind::Destructor && MD->isVirtual()) {
FunctionDecl *OperatorDelete = nullptr;
- QualType DeallocType = Context.getRecordType(RD);
+ CanQualType DeallocType = Context.getCanonicalTagType(RD);
DeclarationName Name =
Context.DeclarationNames.getCXXOperatorName(OO_Delete);
ImplicitDeallocationParameters IDP = {
@@ -10245,7 +10278,7 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD,
CXXSpecialMemberKind CSM) {
- QualType Ty = Context.getRecordType(RD);
+ CanQualType Ty = Context.getCanonicalTagType(RD);
bool ConstArg = (CSM == CXXSpecialMemberKind::CopyConstructor ||
CSM == CXXSpecialMemberKind::CopyAssignment);
@@ -10293,9 +10326,9 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
ClangABICompat14)) {
if (Diagnose)
Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
- << Param0->getSourceRange() << Param0->getType()
- << Context.getLValueReferenceType(
- Context.getRecordType(RD).withConst());
+ << Param0->getSourceRange() << Param0->getType()
+ << Context.getLValueReferenceType(
+ Context.getCanonicalTagType(RD).withConst());
return false;
}
@@ -10312,8 +10345,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
if (!RT || RT->getPointeeType().getCVRQualifiers()) {
if (Diagnose)
Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
- << Param0->getSourceRange() << Param0->getType()
- << Context.getRValueReferenceType(Context.getRecordType(RD));
+ << Param0->getSourceRange() << Param0->getType()
+ << Context.getRValueReferenceType(Context.getCanonicalTagType(RD));
return false;
}
break;
@@ -10432,8 +10465,10 @@ public:
/// method overloads virtual methods in a base class without overriding any,
/// to be used with CXXRecordDecl::lookupInBases().
bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- RecordDecl *BaseRecord =
- Specifier->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *BaseRecord = Specifier->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
DeclarationName Name = Method->getDeclName();
assert(Name.getNameKind() == DeclarationName::Identifier);
@@ -10591,7 +10626,8 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
if (const auto *RT = FT->getBaseElementTypeUnsafe()->getAs<RecordType>())
if (!RT->isDependentType() &&
- !cast<CXXRecordDecl>(RT->getDecl())->canPassInRegisters()) {
+ !cast<CXXRecordDecl>(RT->getOriginalDecl()->getDefinitionOrSelf())
+ ->canPassInRegisters()) {
PrintDiagAndRemoveAttr(5);
return;
}
@@ -10626,7 +10662,7 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
void Sema::checkIncorrectVTablePointerAuthenticationAttribute(
CXXRecordDecl &RD) {
- if (RequireCompleteType(RD.getLocation(), Context.getRecordType(&RD),
+ if (RequireCompleteType(RD.getLocation(), Context.getCanonicalTagType(&RD),
diag::err_incomplete_type_vtable_pointer_auth))
return;
@@ -11056,9 +11092,10 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
if (!Constructor->isInvalidDecl() &&
Constructor->hasOneParamOrDefaultArgs() &&
!Constructor->isFunctionTemplateSpecialization()) {
- QualType ParamType = Constructor->getParamDecl(0)->getType();
- QualType ClassTy = Context.getTagDeclType(ClassDecl);
- if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
+ CanQualType ParamType =
+ Constructor->getParamDecl(0)->getType()->getCanonicalTypeUnqualified();
+ CanQualType ClassTy = Context.getCanonicalTagType(ClassDecl);
+ if (ParamType == ClassTy) {
SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation();
const char *ConstRef
= Constructor->getParamDecl(0)->getIdentifier() ? "const &"
@@ -11429,8 +11466,7 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
// same object type (or a reference to it), to a (possibly
// cv-qualified) base class of that type (or a reference to it),
// or to (possibly cv-qualified) void.
- QualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
ConvType = ConvTypeRef->getPointeeType();
if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
@@ -11698,8 +11734,8 @@ bool Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
const QualifiedTemplateName *Qualifiers =
SpecifiedName.getAsQualifiedTemplateName();
assert(Qualifiers && "expected QualifiedTemplate");
- bool SimplyWritten = !Qualifiers->hasTemplateKeyword() &&
- Qualifiers->getQualifier() == nullptr;
+ bool SimplyWritten =
+ !Qualifiers->hasTemplateKeyword() && !Qualifiers->getQualifier();
if (SimplyWritten && TemplateMatches)
AcceptableReturnType = true;
else {
@@ -12004,10 +12040,11 @@ QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
// Use an elaborated type for diagnostics which has a name containing the
// prepended 'std' namespace but not any inline namespace names.
auto TyForDiags = [&](ComparisonCategoryInfo *Info) {
- auto *NNS =
- NestedNameSpecifier::Create(Context, nullptr, getStdNamespace());
- return Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS,
- Info->getType());
+ NestedNameSpecifier Qualifier(Context, getStdNamespace(),
+ /*Prefix=*/std::nullopt);
+ return Context.getTagType(ElaboratedTypeKeyword::None, Qualifier,
+ Info->Record,
+ /*OwnsTag=*/false);
};
// Check if we've already successfully checked the comparison category type
@@ -12143,26 +12180,14 @@ static bool isStdClassTemplate(Sema &S, QualType SugaredType, QualType *TypeArg,
ClassTemplateDecl *Template = nullptr;
ArrayRef<TemplateArgument> Arguments;
- {
- const TemplateSpecializationType *TST =
- SugaredType->getAsNonAliasTemplateSpecializationType();
- if (!TST)
- if (const auto *ICN = SugaredType->getAs<InjectedClassNameType>())
- TST = ICN->getInjectedTST();
- if (TST) {
- Template = dyn_cast_or_null<ClassTemplateDecl>(
- TST->getTemplateName().getAsTemplateDecl());
- Arguments = TST->template_arguments();
- } else if (const RecordType *RT = SugaredType->getAs<RecordType>()) {
- ClassTemplateSpecializationDecl *Specialization =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
- if (!Specialization) {
- ReportMatchingNameAsMalformed(RT->getDecl());
- return false;
- }
- Template = Specialization->getSpecializedTemplate();
- Arguments = Specialization->getTemplateArgs().asArray();
- }
+ if (const TemplateSpecializationType *TST =
+ SugaredType->getAsNonAliasTemplateSpecializationType()) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ Arguments = TST->template_arguments();
+ } else if (const auto *TT = SugaredType->getAs<TagType>()) {
+ Template = TT->getTemplateDecl();
+ Arguments = TT->getTemplateArgs(S.Context);
}
if (!Template) {
@@ -12288,13 +12313,8 @@ static QualType BuildStdClassTemplate(Sema &S, ClassTemplateDecl *CTD,
auto TSI = S.Context.getTrivialTypeSourceInfo(TypeParam, Loc);
Args.addArgument(TemplateArgumentLoc(TemplateArgument(TypeParam), TSI));
- QualType T = S.CheckTemplateIdType(TemplateName(CTD), Loc, Args);
- if (T.isNull())
- return QualType();
-
- return S.Context.getElaboratedType(
- ElaboratedTypeKeyword::None,
- NestedNameSpecifier::Create(S.Context, nullptr, S.getStdNamespace()), T);
+ return S.CheckTemplateIdType(ElaboratedTypeKeyword::None, TemplateName(CTD),
+ Loc, Args);
}
QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) {
@@ -12436,9 +12456,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
S = S->getDeclParent();
UsingDirectiveDecl *UDir = nullptr;
- NestedNameSpecifier *Qualifier = nullptr;
- if (SS.isSet())
- Qualifier = SS.getScopeRep();
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
// Lookup namespace name.
LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
@@ -12450,14 +12468,16 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
R.clear();
// Allow "using namespace std;" or "using namespace ::std;" even if
// "std" hasn't been defined yet, for GCC compatibility.
- if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) &&
+ if ((!Qualifier ||
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Global) &&
NamespcName->isStr("std")) {
Diag(IdentLoc, diag::ext_using_undefined_std);
R.addDecl(getOrCreateStdNamespace());
R.resolveKind();
}
// Otherwise, attempt typo correction.
- else TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
+ else
+ TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
}
if (!R.empty()) {
@@ -12584,7 +12604,7 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
return nullptr;
} else {
- if (!SS.getScopeRep()->containsUnexpandedParameterPack() &&
+ if (!SS.getScopeRep().containsUnexpandedParameterPack() &&
!TargetNameInfo.containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< SourceRange(SS.getBeginLoc(), TargetNameInfo.getEndLoc());
@@ -12869,7 +12889,7 @@ UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
UsingDecl *Using = cast<UsingDecl>(BUD);
bool IsVirtualBase =
isVirtualDirectBase(cast<CXXRecordDecl>(CurContext),
- Using->getQualifier()->getAsRecordDecl());
+ Using->getQualifier().getAsRecordDecl());
Shadow = ConstructorUsingShadowDecl::Create(
Context, CurContext, Using->getLocation(), Using, Orig, IsVirtualBase);
} else {
@@ -12935,7 +12955,7 @@ namespace {
class UsingValidatorCCC final : public CorrectionCandidateCallback {
public:
UsingValidatorCCC(bool HasTypenameKeyword, bool IsInstantiation,
- NestedNameSpecifier *NNS, CXXRecordDecl *RequireMemberOf)
+ NestedNameSpecifier NNS, CXXRecordDecl *RequireMemberOf)
: HasTypenameKeyword(HasTypenameKeyword),
IsInstantiation(IsInstantiation), OldNNS(NNS),
RequireMemberOf(RequireMemberOf) {}
@@ -12962,24 +12982,23 @@ public:
ASTContext &Ctx = ND->getASTContext();
if (!Ctx.getLangOpts().CPlusPlus11)
return false;
- QualType FoundType = Ctx.getRecordType(FoundRecord);
+ CanQualType FoundType = Ctx.getCanonicalTagType(FoundRecord);
// Check that the injected-class-name is named as a member of its own
// type; we don't want to suggest 'using Derived::Base;', since that
// means something else.
- NestedNameSpecifier *Specifier =
- Candidate.WillReplaceSpecifier()
- ? Candidate.getCorrectionSpecifier()
- : OldNNS;
- if (!Specifier->getAsType() ||
- !Ctx.hasSameType(QualType(Specifier->getAsType(), 0), FoundType))
+ NestedNameSpecifier Specifier = Candidate.WillReplaceSpecifier()
+ ? Candidate.getCorrectionSpecifier()
+ : OldNNS;
+ if (Specifier.getKind() != NestedNameSpecifier::Kind::Type ||
+ !Ctx.hasSameType(QualType(Specifier.getAsType(), 0), FoundType))
return false;
// Check that this inheriting constructor declaration actually names a
// direct base class of the current class.
bool AnyDependentBases = false;
if (!findDirectBaseWithType(RequireMemberOf,
- Ctx.getRecordType(FoundRecord),
+ Ctx.getCanonicalTagType(FoundRecord),
AnyDependentBases) &&
!AnyDependentBases)
return false;
@@ -13009,7 +13028,7 @@ public:
private:
bool HasTypenameKeyword;
bool IsInstantiation;
- NestedNameSpecifier *OldNNS;
+ NestedNameSpecifier OldNNS;
CXXRecordDecl *RequireMemberOf;
};
} // end anonymous namespace
@@ -13051,7 +13070,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
if (UsingName.getName().getNameKind() == DeclarationName::CXXConstructorName)
if (auto *RD = dyn_cast<CXXRecordDecl>(CurContext))
UsingName.setName(Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(Context.getRecordType(RD))));
+ Context.getCanonicalTagType(RD)));
// Do the redeclaration lookup in the current scope.
LookupResult Previous(*this, UsingName, LookupUsingDeclName,
@@ -13154,7 +13173,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
// equal to that of the current context.
if (CurContext->isRecord()) {
R.setBaseObjectType(
- Context.getTypeDeclType(cast<CXXRecordDecl>(CurContext)));
+ Context.getCanonicalTagType(cast<CXXRecordDecl>(CurContext)));
}
LookupQualifiedName(R, LookupContext);
@@ -13220,7 +13239,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
// constructor.
auto *CurClass = cast<CXXRecordDecl>(CurContext);
UsingName.setName(Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(Context.getRecordType(CurClass))));
+ Context.getCanonicalTagType(CurClass)));
UsingName.setNamedTypeInfo(nullptr);
for (auto *Ctor : LookupConstructors(RD))
R.addDecl(Ctor);
@@ -13366,20 +13385,16 @@ NamedDecl *Sema::BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
assert(!UD->hasTypename() && "expecting a constructor name");
- const Type *SourceType = UD->getQualifier()->getAsType();
- assert(SourceType &&
- "Using decl naming constructor doesn't have type in scope spec.");
+ QualType SourceType(UD->getQualifier().getAsType(), 0);
CXXRecordDecl *TargetClass = cast<CXXRecordDecl>(CurContext);
// Check whether the named type is a direct base class.
bool AnyDependentBases = false;
- auto *Base = findDirectBaseWithType(TargetClass, QualType(SourceType, 0),
- AnyDependentBases);
+ auto *Base =
+ findDirectBaseWithType(TargetClass, SourceType, AnyDependentBases);
if (!Base && !AnyDependentBases) {
- Diag(UD->getUsingLoc(),
- diag::err_using_decl_constructor_not_in_direct_base)
- << UD->getNameInfo().getSourceRange()
- << QualType(SourceType, 0) << TargetClass;
+ Diag(UD->getUsingLoc(), diag::err_using_decl_constructor_not_in_direct_base)
+ << UD->getNameInfo().getSourceRange() << SourceType << TargetClass;
UD->setInvalidDecl();
return true;
}
@@ -13395,7 +13410,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Prev) {
- NestedNameSpecifier *Qual = SS.getScopeRep();
+ NestedNameSpecifier Qual = SS.getScopeRep();
// C++03 [namespace.udecl]p8:
// C++0x [namespace.udecl]p10:
@@ -13410,7 +13425,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
// declaration in the same scope.
// FIXME: How should we check for dependent type-type conflicts at block
// scope?
- if (Qual->isDependent() && !HasTypenameKeyword) {
+ if (Qual.isDependent() && !HasTypenameKeyword) {
for (auto *D : Prev) {
if (!isa<TypeDecl>(D) && !isa<UsingDecl>(D) && !isa<UsingPackDecl>(D)) {
bool OldCouldBeEnumerator =
@@ -13427,13 +13442,12 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
return false;
}
- const NestedNameSpecifier *CNNS =
- Context.getCanonicalNestedNameSpecifier(Qual);
+ NestedNameSpecifier CNNS = Qual.getCanonical();
for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) {
NamedDecl *D = *I;
bool DTypename;
- NestedNameSpecifier *DQual;
+ NestedNameSpecifier DQual = std::nullopt;
if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) {
DTypename = UD->hasTypename();
DQual = UD->getQualifier();
@@ -13454,7 +13468,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
// using decls differ if they name different scopes (but note that
// template instantiation can cause this check to trigger when it
// didn't before instantiation).
- if (CNNS != Context.getCanonicalNestedNameSpecifier(DQual))
+ if (CNNS != DQual.getCanonical())
continue;
Diag(NameLoc, diag::err_using_decl_redeclaration) << SS.getRange();
@@ -13914,7 +13928,8 @@ bool SpecialMemberExceptionSpecInfo::visitBase(CXXBaseSpecifier *Base) {
if (!RT)
return false;
- auto *BaseClass = cast<CXXRecordDecl>(RT->getDecl());
+ auto *BaseClass =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
Sema::SpecialMemberOverloadResult SMOR = lookupInheritedCtor(BaseClass);
if (auto *BaseCtor = SMOR.getMethod()) {
visitSubobjectCall(Base, BaseCtor);
@@ -13940,8 +13955,9 @@ bool SpecialMemberExceptionSpecInfo::visitField(FieldDecl *FD) {
ExceptSpec.CalledExpr(E);
} else if (auto *RT = S.Context.getBaseElementType(FD->getType())
->getAs<RecordType>()) {
- visitClassSubobject(cast<CXXRecordDecl>(RT->getDecl()), FD,
- FD->getType().getCVRQualifiers());
+ visitClassSubobject(
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf(), FD,
+ FD->getType().getCVRQualifiers());
}
return false;
}
@@ -14003,7 +14019,7 @@ ComputeDefaultedSpecialMemberExceptionSpec(
// attempting to resolve an exception specification before it's known
// at a higher level.
if (S.RequireCompleteType(MD->getLocation(),
- S.Context.getRecordType(ClassDecl),
+ S.Context.getCanonicalTagType(ClassDecl),
diag::err_exception_spec_incomplete_type))
return Info.ExceptSpec;
@@ -14134,8 +14150,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
*this, ClassDecl, CXXSpecialMemberKind::DefaultConstructor, false);
// Create the actual constructor declaration.
- CanQualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(ClassType);
@@ -14420,8 +14435,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
*this, ClassDecl, CXXSpecialMemberKind::Destructor, false);
// Create the actual destructor declaration.
- CanQualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ CanQualType ClassType = Context.getCanonicalTagType(ClassDecl);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(ClassType);
@@ -14759,9 +14773,10 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
VK_PRValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
const Type *E = T->getBaseElementTypeUnsafe();
- bool NeedsCollectableMemCpy =
- E->isRecordType() &&
- E->castAs<RecordType>()->getDecl()->hasObjectMember();
+ bool NeedsCollectableMemCpy = E->isRecordType() && E->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasObjectMember();
// Create a reference to the __builtin_objc_memmove_collectable function
StringRef MemCpyName = NeedsCollectableMemCpy ?
@@ -14838,7 +14853,8 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// ignoring any possible virtual overriding functions in more derived
// classes);
if (const RecordType *RecordTy = T->getAs<RecordType>()) {
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXRecordDecl *ClassDecl =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
// Look for operator=.
DeclarationName Name
@@ -14885,10 +14901,9 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// reference to operator=; this is required to suppress the virtual
// call mechanism.
CXXScopeSpec SS;
+ // FIXME: Don't canonicalize this.
const Type *CanonicalT = S.Context.getCanonicalType(T.getTypePtr());
- SS.MakeTrivial(S.Context,
- NestedNameSpecifier::Create(S.Context, nullptr, CanonicalT),
- Loc);
+ SS.MakeTrivial(S.Context, NestedNameSpecifier(CanonicalT), Loc);
// Create the reference to operator=.
ExprResult OpEqualRef
@@ -15042,9 +15057,9 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- QualType ArgType = Context.getTypeDeclType(ClassDecl);
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
+ QualType ArgType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ArgType, AS);
@@ -15303,7 +15318,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Check for members of reference type; we can't copy those.
if (Field->getType()->isReferenceType()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 0
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15313,7 +15329,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
QualType BaseType = Context.getBaseElementType(Field->getType());
if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 1
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15398,9 +15415,9 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
// Note: The following rules are largely analoguous to the move
// constructor rules.
- QualType ArgType = Context.getTypeDeclType(ClassDecl);
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
+ QualType ArgType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ArgType, AS);
@@ -15690,7 +15707,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Check for members of reference type; we can't move those.
if (Field->getType()->isReferenceType()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 0
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15700,7 +15718,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
QualType BaseType = Context.getBaseElementType(Field->getType());
if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
- << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ << Context.getCanonicalTagType(ClassDecl) << 1
+ << Field->getDeclName();
Diag(Field->getLocation(), diag::note_declared_at);
Invalid = true;
continue;
@@ -15791,10 +15810,10 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ClassType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
QualType ArgType = ClassType;
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
bool Const = ClassDecl->implicitCopyConstructorHasConstParam();
if (Const)
ArgType = ArgType.withConst();
@@ -15938,11 +15957,11 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ClassType = Context.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ClassDecl,
+ /*OwnsTag=*/false);
QualType ArgType = ClassType;
- ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
- ArgType, nullptr);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ClassType, AS);
@@ -16297,7 +16316,8 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
if (VD->getInit() && VD->getInit()->containsErrors())
return;
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
+ CXXRecordDecl *ClassDecl =
+ cast<CXXRecordDecl>(Record->getOriginalDecl())->getDefinitionOrSelf();
if (ClassDecl->isInvalidDecl()) return;
if (ClassDecl->hasIrrelevantDestructor()) return;
if (ClassDecl->isDependentContext()) return;
@@ -16639,10 +16659,10 @@ static inline bool CheckOperatorNewDeleteTypes(
if (CheckType(SizeParameterIndex, SemaRef.Context.getSizeType(), "size_t"))
return true;
- TypeDecl *StdAlignValTDecl = SemaRef.getStdAlignValT();
- QualType StdAlignValT =
- StdAlignValTDecl ? SemaRef.Context.getTypeDeclType(StdAlignValTDecl)
- : QualType();
+ TagDecl *StdAlignValTDecl = SemaRef.getStdAlignValT();
+ CanQualType StdAlignValT =
+ StdAlignValTDecl ? SemaRef.Context.getCanonicalTagType(StdAlignValTDecl)
+ : CanQualType();
if (CheckType(SizeParameterIndex + 1, StdAlignValT, "std::align_val_t"))
return true;
@@ -16682,8 +16702,8 @@ CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
auto *MD = dyn_cast<CXXMethodDecl>(FnDecl);
auto ConstructDestroyingDeleteAddressType = [&]() {
assert(MD);
- return SemaRef.Context.getCanonicalType(SemaRef.Context.getPointerType(
- SemaRef.Context.getRecordType(MD->getParent())));
+ return SemaRef.Context.getPointerType(
+ SemaRef.Context.getCanonicalTagType(MD->getParent()));
};
// C++ P2719: A destroying operator delete cannot be type aware
@@ -16722,8 +16742,8 @@ CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
// function shall be of type void *.
CanQualType ExpectedAddressParamType =
MD && IsPotentiallyDestroyingOperatorDelete(SemaRef, MD)
- ? SemaRef.Context.getCanonicalType(SemaRef.Context.getPointerType(
- SemaRef.Context.getRecordType(MD->getParent())))
+ ? SemaRef.Context.getPointerType(
+ SemaRef.Context.getCanonicalTagType(MD->getParent()))
: SemaRef.Context.VoidPtrTy;
// C++ [basic.stc.dynamic.deallocation]p2:
@@ -17954,27 +17974,14 @@ DeclResult Sema::ActOnTemplatedFriendTag(
/*OOK=*/OffsetOfKind::Outside);
}
+ TypeSourceInfo *TSI = nullptr;
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTagTypeKind(Kind);
- QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc,
- *Name, NameLoc);
+ QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc, *Name,
+ NameLoc, &TSI, /*DeducedTSTContext=*/true);
if (T.isNull())
return true;
- TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
- if (isa<DependentNameType>(T)) {
- DependentNameTypeLoc TL =
- TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
- TL.setElaboratedKeywordLoc(TagLoc);
- TL.setQualifierLoc(QualifierLoc);
- TL.setNameLoc(NameLoc);
- } else {
- ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>();
- TL.setElaboratedKeywordLoc(TagLoc);
- TL.setQualifierLoc(QualifierLoc);
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(NameLoc);
- }
-
FriendDecl *Friend =
FriendDecl::Create(Context, CurContext, NameLoc, TSI, FriendLoc,
EllipsisLoc, TempParamLists);
@@ -18286,7 +18293,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// - There's a non-dependent scope specifier, in which case we
// compute it and do a previous lookup there for a function
// or function template.
- } else if (!SS.getScopeRep()->isDependent()) {
+ } else if (!SS.getScopeRep().isDependent()) {
DC = computeDeclContext(SS);
if (!DC) return nullptr;
@@ -18793,7 +18800,7 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
// complete at the point of declaration of D::f or shall be the class
// type D.
if (const RecordType *RT = NewClassTy->getAs<RecordType>()) {
- if (!RT->isBeingDefined() &&
+ if (!RT->getOriginalDecl()->isEntityBeingDefined() &&
RequireCompleteType(New->getLocation(), NewClassTy,
diag::err_covariant_return_incomplete,
New->getDeclName()))
@@ -19148,8 +19155,9 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
return;
for (const auto &I : RD->bases()) {
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *Base = cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Base->getNumVBases() == 0)
continue;
MarkVirtualMembersReferenced(Loc, Base);
diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp
index bbd1049..88ed83e 100644
--- a/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/clang/lib/Sema/SemaDeclObjC.cpp
@@ -591,12 +591,13 @@ void SemaObjC::ActOnSuperClassOfClassInterface(
// The previous declaration was not a class decl. Check if we have a
// typedef. If we do, get the underlying class type.
if (const TypedefNameDecl *TDecl =
- dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
QualType T = TDecl->getUnderlyingType();
if (T->isObjCObjectType()) {
if (NamedDecl *IDecl = T->castAs<ObjCObjectType>()->getInterface()) {
SuperClassDecl = dyn_cast<ObjCInterfaceDecl>(IDecl);
- SuperClassType = Context.getTypeDeclType(TDecl);
+ SuperClassType = Context.getTypeDeclType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt, TDecl);
// This handles the following case:
// @interface NewI @end
@@ -1389,11 +1390,9 @@ class ObjCTypeArgOrProtocolValidatorCCC final
// Make sure the type is something we would accept as a type
// argument.
- auto type = Context.getTypeDeclType(typeDecl);
- if (type->isObjCObjectPointerType() ||
- type->isBlockPointerType() ||
+ if (CanQualType type = Context.getCanonicalTypeDeclType(typeDecl);
type->isDependentType() ||
- type->isObjCObjectType())
+ isa<ObjCObjectPointerType, BlockPointerType, ObjCObjectType>(type))
return true;
return false;
@@ -1589,7 +1588,9 @@ void SemaObjC::actOnObjCTypeArgsOrProtocolQualifiers(
unsigned diagID; // unused
QualType type;
if (auto *actualTypeDecl = dyn_cast<TypeDecl *>(typeDecl))
- type = Context.getTypeDeclType(actualTypeDecl);
+ type =
+ Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, actualTypeDecl);
else
type = Context.getObjCInterfaceType(cast<ObjCInterfaceDecl *>(typeDecl));
TypeSourceInfo *parsedTSInfo = Context.getTrivialTypeSourceInfo(type, loc);
@@ -3231,8 +3232,10 @@ static bool tryMatchRecordTypes(ASTContext &Context,
assert(lt && rt && lt != rt);
if (!isa<RecordType>(lt) || !isa<RecordType>(rt)) return false;
- RecordDecl *left = cast<RecordType>(lt)->getDecl();
- RecordDecl *right = cast<RecordType>(rt)->getDecl();
+ RecordDecl *left =
+ cast<RecordType>(lt)->getOriginalDecl()->getDefinitionOrSelf();
+ RecordDecl *right =
+ cast<RecordType>(rt)->getOriginalDecl()->getDefinitionOrSelf();
// Require union-hood to match.
if (left->isUnion() != right->isUnion()) return false;
@@ -3846,7 +3849,9 @@ static bool IsVariableSizedType(QualType T) {
if (T->isIncompleteArrayType())
return true;
const auto *RecordTy = T->getAs<RecordType>();
- return (RecordTy && RecordTy->getDecl()->hasFlexibleArrayMember());
+ return (RecordTy && RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember());
}
static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
@@ -3892,7 +3897,9 @@ static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
<< TagTypeKind::Class; // Use "class" for Obj-C.
IsInvalidIvar = true;
} else if (const RecordType *RecordTy = IvarTy->getAs<RecordType>()) {
- if (RecordTy->getDecl()->hasFlexibleArrayMember()) {
+ if (RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember()) {
S.Diag(ivar->getLocation(),
diag::err_objc_variable_sized_type_not_at_end)
<< ivar->getDeclName() << IvarTy;
@@ -5537,7 +5544,8 @@ void SemaObjC::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
if (const RecordType *RecordTy =
Context.getBaseElementType(Field->getType())
->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(RD)) {
SemaRef.MarkFunctionReferenced(Field->getLocation(), Destructor);
SemaRef.CheckDestructorAccess(
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 0a6cea8..94413b5 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -163,8 +163,9 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
DiagID = diag::ext_incomplete_in_exception_spec;
ReturnValueOnError = false;
}
- if (!(PointeeT->isRecordType() &&
- PointeeT->castAs<RecordType>()->isBeingDefined()) &&
+ if (!(PointeeT->isRecordType() && PointeeT->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->isEntityBeingDefined()) &&
RequireCompleteType(Range.getBegin(), PointeeT, DiagID, Kind, Range))
return ReturnValueOnError;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 6793d6d..8532039 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -1528,8 +1528,8 @@ void Sema::checkEnumArithmeticConversions(Expr *LHS, Expr *RHS,
// are ill-formed.
if (getLangOpts().CPlusPlus26)
DiagID = diag::warn_conv_mixed_enum_types_cxx26;
- else if (!L->castAs<EnumType>()->getDecl()->hasNameForLinkage() ||
- !R->castAs<EnumType>()->getDecl()->hasNameForLinkage()) {
+ else if (!L->castAs<EnumType>()->getOriginalDecl()->hasNameForLinkage() ||
+ !R->castAs<EnumType>()->getOriginalDecl()->hasNameForLinkage()) {
// If either enumeration type is unnamed, it's less likely that the
// user cares about this, but this situation is still deprecated in
// C++2a. Use a different warning group.
@@ -2531,7 +2531,7 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
if (isa<CXXRecordDecl>(DC)) {
if (ExplicitTemplateArgs) {
if (LookupTemplateName(
- R, S, SS, Context.getRecordType(cast<CXXRecordDecl>(DC)),
+ R, S, SS, Context.getCanonicalTagType(cast<CXXRecordDecl>(DC)),
/*EnteringContext*/ false, TemplateNameIsRequired,
/*RequiredTemplateKind*/ nullptr, /*AllowTypoCorrection*/ true))
return true;
@@ -2607,11 +2607,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
}
R.addDecl(ND);
if (getLangOpts().CPlusPlus && ND->isCXXClassMember()) {
- CXXRecordDecl *Record = nullptr;
- if (Corrected.getCorrectionSpecifier()) {
- const Type *Ty = Corrected.getCorrectionSpecifier()->getAsType();
- Record = Ty->getAsCXXRecordDecl();
- }
+ CXXRecordDecl *Record =
+ Corrected.getCorrectionSpecifier().getAsRecordDecl();
if (!Record)
Record = cast<CXXRecordDecl>(
ND->getDeclContext()->getRedeclContext());
@@ -2707,8 +2704,7 @@ recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context,
// Synthesize a fake NNS that points to the derived class. This will
// perform name lookup during template instantiation.
CXXScopeSpec SS;
- auto *NNS =
- NestedNameSpecifier::Create(Context, nullptr, RD->getTypeForDecl());
+ NestedNameSpecifier NNS(Context.getCanonicalTagType(RD)->getTypePtr());
SS.MakeTrivial(Context, NNS, SourceRange(Loc, Loc));
return DependentScopeDeclRefExpr::Create(
Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
@@ -2831,8 +2827,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// If this name wasn't predeclared and if this is not a function
// call, diagnose the problem.
- DefaultFilterCCC DefaultValidator(II, SS.isValid() ? SS.getScopeRep()
- : nullptr);
+ DefaultFilterCCC DefaultValidator(II, SS.getScopeRep());
DefaultValidator.IsAddressOfOperand = IsAddressOfOperand;
assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) &&
"Typo correction callback misconfigured");
@@ -2943,8 +2938,28 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
}
if (const TypeDecl *TD = R.getAsSingle<TypeDecl>()) {
- QualType Ty = Context.getTypeDeclType(TD);
- QualType ET = getElaboratedType(ElaboratedTypeKeyword::None, SS, Ty);
+ QualType ET;
+ TypeLocBuilder TLB;
+ if (auto *TagD = dyn_cast<TagDecl>(TD)) {
+ ET = SemaRef.Context.getTagType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TagD,
+ /*OwnsTag=*/false);
+ auto TL = TLB.push<TagTypeLoc>(ET);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameInfo.getLoc());
+ } else if (auto *TypedefD = dyn_cast<TypedefNameDecl>(TD)) {
+ ET = SemaRef.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TypedefD);
+ TLB.push<TypedefTypeLoc>(ET).set(
+ /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context), NameInfo.getLoc());
+ } else {
+ // FIXME: What else can appear here?
+ ET = SemaRef.Context.getTypeDeclType(TD);
+ TLB.pushTypeSpec(ET).setNameLoc(NameInfo.getLoc());
+ assert(SS.isEmpty());
+ }
// Diagnose a missing typename if this resolved unambiguously to a type in
// a dependent context. If we can recover with a type, downgrade this to
@@ -2965,13 +2980,6 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
D << FixItHint::CreateInsertion(Loc, "typename ");
// Recover by pretending this was an elaborated type.
- TypeLocBuilder TLB;
- TLB.pushTypeSpec(Ty).setNameLoc(NameInfo.getLoc());
-
- ElaboratedTypeLoc QTL = TLB.push<ElaboratedTypeLoc>(ET);
- QTL.setElaboratedKeywordLoc(SourceLocation());
- QTL.setQualifierLoc(SS.getWithLocInContext(Context));
-
*RecoveryTSI = TLB.getTypeSourceInfo(Context, ET);
return ExprEmpty();
@@ -2987,11 +2995,10 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
return BuildDeclarationNameExpr(SS, R, /*ADL=*/false);
}
-ExprResult
-Sema::PerformObjectMemberConversion(Expr *From,
- NestedNameSpecifier *Qualifier,
- NamedDecl *FoundDecl,
- NamedDecl *Member) {
+ExprResult Sema::PerformObjectMemberConversion(Expr *From,
+ NestedNameSpecifier Qualifier,
+ NamedDecl *FoundDecl,
+ NamedDecl *Member) {
const auto *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
if (!RD)
return From;
@@ -3002,7 +3009,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
QualType FromType = From->getType();
bool PointerConversions = false;
if (isa<FieldDecl>(Member)) {
- DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD));
+ DestRecordType = Context.getCanonicalTagType(RD);
auto FromPtrType = FromType->getAs<PointerType>();
DestRecordType = Context.getAddrSpaceQualType(
DestRecordType, FromPtrType
@@ -3080,8 +3087,8 @@ Sema::PerformObjectMemberConversion(Expr *From,
// x = 17; // error: ambiguous base subobjects
// Derived1::x = 17; // okay, pick the Base subobject of Derived1
// }
- if (Qualifier && Qualifier->getAsType()) {
- QualType QType = QualType(Qualifier->getAsType(), 0);
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Type) {
+ QualType QType = QualType(Qualifier.getAsType(), 0);
assert(QType->isRecordType() && "lookup done with non-record type");
QualType QRecordType = QualType(QType->castAs<RecordType>(), 0);
@@ -4483,9 +4490,6 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::BitInt:
case Type::HLSLInlineSpirv:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- T = cast<ElaboratedType>(Ty)->getNamedType();
- break;
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
break;
@@ -5877,7 +5881,7 @@ static bool isParenthetizedAndQualifiedAddressOfExpr(Expr *Fn) {
return DRE->hasQualifier();
}
if (auto *OVL = dyn_cast<OverloadExpr>(UO->getSubExpr()->IgnoreParens()))
- return OVL->getQualifier();
+ return bool(OVL->getQualifier());
return false;
}
@@ -7076,7 +7080,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
for (unsigned i = 0, e = Args.size(); i != e; i++) {
if (const auto *RT =
dyn_cast<RecordType>(Args[i]->getType().getCanonicalType())) {
- if (RT->getDecl()->isOrContainsUnion())
+ if (RT->getOriginalDecl()->isOrContainsUnion())
Diag(Args[i]->getBeginLoc(), diag::warn_cmse_nonsecure_union)
<< 0 << i;
}
@@ -8605,7 +8609,8 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// type.
if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
if (const RecordType *RHSRT = RHSTy->getAs<RecordType>())
- if (LHSRT->getDecl() == RHSRT->getDecl())
+ if (declaresSameEntity(LHSRT->getOriginalDecl(),
+ RHSRT->getOriginalDecl()))
// "If both the operands have structure or union type, the result has
// that type." This implies that CV qualifiers are dropped.
return Context.getCommonSugaredType(LHSTy.getUnqualifiedType(),
@@ -9674,11 +9679,14 @@ Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType,
// If the ArgType is a Union type, we want to handle a potential
// transparent_union GCC extension.
const RecordType *UT = ArgType->getAsUnionType();
- if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!UT)
+ return AssignConvertType::Incompatible;
+
+ RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!UD->hasAttr<TransparentUnionAttr>())
return AssignConvertType::Incompatible;
// The field to initialize within the transparent union.
- RecordDecl *UD = UT->getDecl();
FieldDecl *InitField = nullptr;
// It's compatible if the expression matches any of the fields.
for (auto *it : UD->fields()) {
@@ -11401,7 +11409,7 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
static bool isScopedEnumerationType(QualType T) {
if (const EnumType *ET = T->getAs<EnumType>())
- return ET->getDecl()->isScoped();
+ return ET->getOriginalDecl()->isScoped();
return false;
}
@@ -12290,8 +12298,10 @@ static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
S.InvalidOperands(Loc, LHS, RHS);
return QualType();
}
- QualType IntType =
- LHSStrippedType->castAs<EnumType>()->getDecl()->getIntegerType();
+ QualType IntType = LHSStrippedType->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType();
assert(IntType->isArithmeticType());
// We can't use `CK_IntegralCast` when the underlying type is 'bool', so we
@@ -12457,7 +12467,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
auto computeResultTy = [&]() {
if (Opc != BO_Cmp)
- return Context.getLogicalOperationType();
+ return QualType(Context.getLogicalOperationType());
assert(getLangOpts().CPlusPlus);
assert(Context.hasSameType(LHS.get()->getType(), RHS.get()->getType()));
@@ -13694,8 +13704,10 @@ static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD,
// diagnostics in field nesting order.
while (RecordTypeList.size() > NextToCheckIndex) {
bool IsNested = NextToCheckIndex > 0;
- for (const FieldDecl *Field :
- RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ for (const FieldDecl *Field : RecordTypeList[NextToCheckIndex]
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields()) {
// First, check every field for constness.
QualType FieldTy = Field->getType();
if (FieldTy.isConstQualified()) {
@@ -16150,7 +16162,7 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
if (!RC)
return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type)
<< CurrentType);
- RecordDecl *RD = RC->getDecl();
+ RecordDecl *RD = RC->getOriginalDecl()->getDefinitionOrSelf();
// C++ [lib.support.types]p5:
// The macro offsetof accepts a restricted set of type arguments in this
@@ -16211,8 +16223,8 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// If the member was found in a base class, introduce OffsetOfNodes for
// the base class indirections.
CXXBasePaths Paths;
- if (IsDerivedFrom(OC.LocStart, CurrentType, Context.getTypeDeclType(Parent),
- Paths)) {
+ if (IsDerivedFrom(OC.LocStart, CurrentType,
+ Context.getCanonicalTagType(Parent), Paths)) {
if (Paths.getDetectedVirtual()) {
Diag(OC.LocEnd, diag::err_offsetof_field_of_virtual_base)
<< MemberDecl->getDeclName()
@@ -16777,7 +16789,8 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
// that.
QualType UnderlyingType = TInfo->getType();
if (const auto *ET = UnderlyingType->getAs<EnumType>())
- UnderlyingType = ET->getDecl()->getIntegerType();
+ UnderlyingType =
+ ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Context.typesAreCompatible(PromoteType, UnderlyingType,
/*CompareUnqualified*/ true))
PromoteType = QualType();
@@ -16923,7 +16936,7 @@ ExprResult Sema::ActOnSourceLocExpr(SourceLocIdentKind Kind,
return ExprError();
}
ResultTy = Context.getPointerType(
- Context.getRecordType(StdSourceLocationImplDecl).withConst());
+ Context.getCanonicalTagType(StdSourceLocationImplDecl).withConst());
break;
}
@@ -18772,7 +18785,9 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, ValueDecl *Var,
// Prohibit structs with flexible array members too.
// We cannot capture what is in the tail end of the struct.
if (const RecordType *VTTy = Var->getType()->getAs<RecordType>()) {
- if (VTTy->getDecl()->hasFlexibleArrayMember()) {
+ if (VTTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember()) {
if (Diagnose) {
if (IsBlock)
S.Diag(Loc, diag::err_ref_flexarray_type);
@@ -21213,7 +21228,7 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
NamedDecl *Temp = *ULE->decls_begin();
const bool IsTypeAliasTemplateDecl = isa<TypeAliasTemplateDecl>(Temp);
- NestedNameSpecifier *NNS = ULE->getQualifierLoc().getNestedNameSpecifier();
+ NestedNameSpecifier NNS = ULE->getQualifierLoc().getNestedNameSpecifier();
// FIXME: AssumedTemplate is not very appropriate for error recovery here,
// as it models only the unqualified-id case, where this case can clearly be
// qualified. Thus we can't just qualify an assumed template.
@@ -21239,16 +21254,16 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
QualType TST;
{
SFINAETrap Trap(*this);
- TST = CheckTemplateIdType(TN, NameInfo.getBeginLoc(), TAL);
+ TST = CheckTemplateIdType(ElaboratedTypeKeyword::None, TN,
+ NameInfo.getBeginLoc(), TAL);
}
if (TST.isNull())
TST = Context.getTemplateSpecializationType(
- TN, ULE->template_arguments(), /*CanonicalArgs=*/{},
+ ElaboratedTypeKeyword::None, TN, ULE->template_arguments(),
+ /*CanonicalArgs=*/{},
HasAnyDependentTA ? Context.DependentTy : Context.IntTy);
- QualType ET =
- Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS, TST);
return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {},
- ET);
+ TST);
}
// Overloaded expressions.
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index 0edfd60..29c9c47 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -57,11 +57,11 @@ using namespace sema;
ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
const IdentifierInfo &Name) {
- NestedNameSpecifier *NNS = SS.getScopeRep();
- if ([[maybe_unused]] const IdentifierInfo *II = NNS->getAsIdentifier())
- assert(II == &Name && "not a constructor name");
+ NestedNameSpecifier NNS = SS.getScopeRep();
+ QualType Type(NNS.getAsType(), 0);
+ if ([[maybe_unused]] const auto *DNT = dyn_cast<DependentNameType>(Type))
+ assert(DNT->getIdentifier() == &Name && "not a constructor name");
- QualType Type(NNS->translateToType(Context), 0);
// This reference to the type is located entirely at the location of the
// final identifier in the qualified-id.
return CreateParsedType(Type,
@@ -111,10 +111,8 @@ ParsedType Sema::getConstructorName(const IdentifierInfo &II,
return ParsedType();
}
- QualType T = Context.getTypeDeclType(InjectedClassName);
- DiagnoseUseOfDecl(InjectedClassName, NameLoc);
- MarkAnyDeclReferenced(NameLoc, InjectedClassName, /*OdrUse=*/false);
-
+ QualType T = Context.getTagType(ElaboratedTypeKeyword::None, SS.getScopeRep(),
+ InjectedClassName, /*OwnsTag=*/false);
return ParsedType::make(T);
}
@@ -175,7 +173,7 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
if (SearchType.isNull() || SearchType->isDependentType())
return true;
- QualType T = Context.getTypeDeclType(Type);
+ CanQualType T = Context.getCanonicalTypeDeclType(Type);
return Context.hasSameUnqualifiedType(T, SearchType);
};
@@ -207,7 +205,8 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
NamedDecl *D = F.next();
if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
Diag(D->getLocation(), diag::note_destructor_type_here)
- << Context.getTypeDeclType(TD);
+ << Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
else
Diag(D->getLocation(), diag::note_destructor_nontype_here);
@@ -222,11 +221,11 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
if (IsAcceptableResult(Type)) {
- QualType T = Context.getTypeDeclType(Type);
+ QualType T = Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Type);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
- return CreateParsedType(
- Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr, T),
- Context.getTrivialTypeSourceInfo(T, NameLoc));
+ return CreateParsedType(T,
+ Context.getTrivialTypeSourceInfo(T, NameLoc));
}
}
@@ -311,15 +310,23 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
// If both lookups succeed and find a dependent result, which result should
// we retain? (Same question for p->~type-name().)
- if (NestedNameSpecifier *Prefix =
- SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
+ auto Prefix = [&]() -> NestedNameSpecifierLoc {
+ NestedNameSpecifierLoc NNS = SS.getWithLocInContext(Context);
+ if (!NNS)
+ return NestedNameSpecifierLoc();
+ if (auto TL = NNS.getAsTypeLoc())
+ return TL.getPrefix();
+ return NNS.getAsNamespaceAndPrefix().Prefix;
+ }();
+
+ if (Prefix) {
// This is
//
// nested-name-specifier type-name :: ~ type-name
//
// Look for the second type-name in the nested-name-specifier.
CXXScopeSpec PrefixSS;
- PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
+ PrefixSS.Adopt(Prefix);
if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
return T;
} else {
@@ -374,7 +381,7 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
//
// also looks for type-name in the scope. Unfortunately, we can't
// reasonably apply this fallback for dependent nested-name-specifiers.
- if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
+ if (Prefix) {
if (ParsedType T = LookupInScope()) {
Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
<< FixItHint::CreateRemoval(SS.getRange());
@@ -419,9 +426,10 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
assert(!SearchType.isNull() &&
"should only reject a type result if we have a search type");
- QualType T = Context.getTypeDeclType(TD);
Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
- << T << SearchType << MakeFixItHint();
+ << Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD)
+ << SearchType << MakeFixItHint();
} else {
Diag(NameLoc, diag::err_destructor_expr_nontype)
<< &II << MakeFixItHint();
@@ -435,7 +443,8 @@ ParsedType Sema::getDestructorName(const IdentifierInfo &II,
for (NamedDecl *FoundD : FoundDecls) {
if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
Diag(FoundD->getLocation(), diag::note_destructor_type_here)
- << Context.getTypeDeclType(TD);
+ << Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
else
Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
<< FoundD;
@@ -501,12 +510,8 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
<< II << static_cast<int>(Status) << Hint;
}
- if (!SS.isValid())
- return false;
-
- switch (SS.getScopeRep()->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::TypeSpec:
+ switch (SS.getScopeRep().getKind()) {
+ case NestedNameSpecifier::Kind::Type:
// Per C++11 [over.literal]p2, literal operators can only be declared at
// namespace scope. Therefore, this unqualified-id cannot name anything.
// Reject it early, because we have no AST representation for this in the
@@ -515,9 +520,10 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
<< SS.getScopeRep();
return true;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ case NestedNameSpecifier::Kind::Namespace:
return false;
}
@@ -565,7 +571,8 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
QualType T = E->getType();
if (const RecordType *RecordT = T->getAs<RecordType>()) {
- CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
+ CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getOriginalDecl())
+ ->getDefinitionOrSelf();
// C++ [expr.typeid]p3:
// [...] If the type of the expression is a class type, the class
// shall be completely-defined.
@@ -659,7 +666,7 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
}
- QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
+ CanQualType TypeInfoType = Context.getCanonicalTagType(CXXTypeInfoDecl);
if (isType) {
// The operand is a type; handle it as such.
@@ -1214,7 +1221,7 @@ QualType Sema::getCurrentThisType() {
// This is a lambda call operator that is being instantiated as a default
// initializer. DC must point to the enclosing class type, so we can recover
// the 'this' type from it.
- QualType ClassTy = Context.getTypeDeclType(cast<CXXRecordDecl>(DC));
+ CanQualType ClassTy = Context.getCanonicalTagType(cast<CXXRecordDecl>(DC));
// There are no cv-qualifiers for 'this' within default initializers,
// per [expr.prim.general]p4.
ThisTy = Context.getPointerType(ClassTy);
@@ -1244,7 +1251,7 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
else
Record = cast<CXXRecordDecl>(ContextDecl);
- QualType T = S.Context.getRecordType(Record);
+ QualType T = S.Context.getCanonicalTagType(Record);
T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
S.CXXThisTypeOverride =
@@ -1732,7 +1739,7 @@ static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
S.Context.hasSameUnqualifiedType(
FD->getParamDecl(UsualParams)->getType(),
- S.Context.getTypeDeclType(S.getStdAlignValT())))
+ S.Context.getCanonicalTagType(S.getStdAlignValT())))
++UsualParams;
return UsualParams == FD->getNumParams();
@@ -1860,8 +1867,8 @@ namespace {
if (FunctionTemplateDecl *Best = S.getMoreSpecializedTemplate(
PrimaryTemplate, OtherPrimaryTemplate, SourceLocation(),
TPOC_Call, ImplicitArgCount,
- DC ? QualType(DC->getTypeForDecl(), 0) : QualType{},
- OtherDC ? QualType(OtherDC->getTypeForDecl(), 0) : QualType{},
+ DC ? S.Context.getCanonicalTagType(DC) : QualType{},
+ OtherDC ? S.Context.getCanonicalTagType(OtherDC) : QualType{},
false)) {
return Best == PrimaryTemplate ? 1 : -1;
}
@@ -1977,7 +1984,7 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
DeclarationName deleteName =
S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
- S.LookupQualifiedName(ops, record->getDecl());
+ S.LookupQualifiedName(ops, record->getOriginalDecl()->getDefinitionOrSelf());
// We're just doing this for information.
ops.suppressDiagnostics();
@@ -2505,7 +2512,7 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// because there might not be a `std::align_val_t` type.
EnumDecl *StdAlignValT = getStdAlignValT();
QualType AlignValT =
- StdAlignValT ? Context.getTypeDeclType(StdAlignValT) : SizeTy;
+ StdAlignValT ? Context.getCanonicalTagType(StdAlignValT) : SizeTy;
IntegerLiteral AlignmentLiteral(
Context,
llvm::APInt(Context.getTypeSize(SizeTy),
@@ -2966,7 +2973,7 @@ bool Sema::FindAllocationFunctions(
isTypeAwareAllocation(IAP.PassTypeIdentity);
if (IncludeAlignParam) {
DeclareGlobalNewDelete();
- AlignValT = Context.getTypeDeclType(getStdAlignValT());
+ AlignValT = Context.getCanonicalTagType(getStdAlignValT());
}
CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
if (IncludeAlignParam)
@@ -3048,8 +3055,9 @@ bool Sema::FindAllocationFunctions(
LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
if (AllocElemType->isRecordType() &&
DeleteScope != AllocationFunctionScope::Global) {
- auto *RD =
- cast<CXXRecordDecl>(AllocElemType->castAs<RecordType>()->getDecl());
+ auto *RD = cast<CXXRecordDecl>(
+ AllocElemType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
LookupQualifiedName(FoundDelete, RD);
}
if (FoundDelete.isAmbiguous())
@@ -3426,7 +3434,7 @@ void Sema::DeclareGlobalNewDelete() {
for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
if (Aligned)
- Params.push_back(Context.getTypeDeclType(getStdAlignValT()));
+ Params.push_back(Context.getCanonicalTagType(getStdAlignValT()));
DeclareGlobalAllocationFunction(
Context.DeclarationNames.getCXXOperatorName(Kind), Return, Params);
@@ -3483,7 +3491,7 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
bool HasBadAllocExceptionSpec = Name.isAnyOperatorNew();
if (HasBadAllocExceptionSpec) {
if (!getLangOpts().CPlusPlus11) {
- BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
+ BadAllocType = Context.getCanonicalTagType(getStdBadAlloc());
assert(StdBadAlloc && "Must have std::bad_alloc declared");
EPI.ExceptionSpec.Type = EST_Dynamic;
EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
@@ -3497,6 +3505,19 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
}
auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
+ // The MSVC STL has explicit cdecl on its (host-side) allocation function
+ // specializations for the allocation, so in order to prevent a CC clash
+ // we use the host's CC, if available, or CC_C as a fallback, for the
+ // host-side implicit decls, knowing these do not get emitted when compiling
+ // for device.
+ if (getLangOpts().CUDAIsDevice && ExtraAttr &&
+ isa<CUDAHostAttr>(ExtraAttr) &&
+ Context.getTargetInfo().getTriple().isSPIRV()) {
+ if (auto *ATI = Context.getAuxTargetInfo())
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(ATI->getDefaultCallingConv());
+ else
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallingConv::CC_C);
+ }
QualType FnType = Context.getFunctionType(Return, Params, EPI);
FunctionDecl *Alloc = FunctionDecl::Create(
Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType,
@@ -3593,7 +3614,7 @@ FunctionDecl *Sema::FindDeallocationFunctionForDestructor(SourceLocation Loc,
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Delete);
FunctionDecl *OperatorDelete = nullptr;
- QualType DeallocType = Context.getRecordType(RD);
+ CanQualType DeallocType = Context.getCanonicalTagType(RD);
ImplicitDeallocationParameters IDP = {
DeallocType, ShouldUseTypeAwareOperatorNewOrDelete(),
AlignedAllocationMode::No, SizedDeallocationMode::No};
@@ -3627,7 +3648,7 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
Found.suppressDiagnostics();
if (!isAlignedAllocation(IDP.PassAlignment) &&
- hasNewExtendedAlignment(*this, Context.getRecordType(RD)))
+ hasNewExtendedAlignment(*this, Context.getCanonicalTagType(RD)))
IDP.PassAlignment = AlignedAllocationMode::Yes;
// C++17 [expr.delete]p10:
@@ -4050,7 +4071,8 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
: diag::warn_delete_incomplete,
Ex.get())) {
if (const RecordType *RT = PointeeElem->getAs<RecordType>())
- PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
+ PointeeRD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
}
}
@@ -4574,7 +4596,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// If the user-defined conversion is specified by a conversion function,
// the initial standard conversion sequence converts the source type to
// the implicit object parameter of the conversion function.
- BeforeToType = Context.getTagDeclType(Conv->getParent());
+ BeforeToType = Context.getCanonicalTagType(Conv->getParent());
} else {
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
CastKind = CK_ConstructorConversion;
@@ -4818,7 +4840,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (FromType->isVectorType() || ToType->isVectorType())
StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
if (ElTy->isBooleanType()) {
- assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
+ assert(FromType->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isFixed() &&
SCS.Second == ICK_Integral_Promotion &&
"only enums with fixed underlying type can promote to bool");
From = ImpCastExprToType(From, StepTy, CK_IntegralToBoolean, VK_PRValue,
@@ -5370,16 +5395,18 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
+ // FIXME: use sugared type from member pointer.
+ CanQualType RHSClassType = Context.getCanonicalTagType(RHSClass);
CXXCastPath BasePath;
if (CheckDerivedToBaseConversion(
- LHSType, QualType(RHSClass->getTypeForDecl(), 0), Loc,
+ LHSType, RHSClassType, Loc,
SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
&BasePath))
return QualType();
// Cast LHS to type of use.
- QualType UseType = Context.getQualifiedType(RHSClass->getTypeForDecl(),
- LHSType.getQualifiers());
+ QualType UseType =
+ Context.getQualifiedType(RHSClassType, LHSType.getQualifiers());
if (isIndirect)
UseType = Context.getPointerType(UseType);
ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
@@ -6184,7 +6211,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
case Pointer:
return Ctx.getPointerType(T);
case MemberPointer:
- return Ctx.getMemberPointerType(T, /*Qualifier=*/nullptr,
+ return Ctx.getMemberPointerType(T, /*Qualifier=*/std::nullopt,
ClassOrBound->getAsCXXRecordDecl());
case ObjCPointer:
return Ctx.getObjCObjectPointerType(T);
@@ -6357,7 +6384,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
return QualType();
Steps.emplace_back(Step::MemberPointer,
- Context.getTypeDeclType(Cls).getTypePtr());
+ Context.getCanonicalTagType(Cls).getTypePtr());
continue;
}
@@ -6636,7 +6663,8 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
// That should be enough to guarantee that this type is complete, if we're
// not processing a decltype expression.
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (RD->isInvalidDecl() || RD->isDependentContext())
return E;
@@ -7243,16 +7271,13 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult T = ActOnTemplateIdType(S,
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/true);
+ TypeResult T = ActOnTemplateIdType(
+ S, ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
+ TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ /*IsCtorOrDtorName*/ true);
if (T.isInvalid() || !T.get()) {
// Recover by assuming we had the right type all along.
DestructedType = ObjectType;
@@ -7296,16 +7321,13 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult T = ActOnTemplateIdType(S,
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/true);
+ TypeResult T = ActOnTemplateIdType(
+ S, ElaboratedTypeKeyword::None,
+ /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
+ TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ /*IsCtorOrDtorName*/ true);
if (T.isInvalid() || !T.get()) {
// Recover by dropping this type.
ScopeType = QualType();
@@ -7505,7 +7527,7 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
// GCC seems to also exclude expressions of incomplete enum type.
if (const EnumType *T = E->getType()->getAs<EnumType>()) {
- if (!T->getDecl()->isComplete()) {
+ if (!T->getOriginalDecl()->getDefinitionOrSelf()->isComplete()) {
// FIXME: stupid workaround for a codegen bug!
E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).get();
return E;
diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp
index 5dca509..4a31a13 100644
--- a/clang/lib/Sema/SemaExprMember.cpp
+++ b/clang/lib/Sema/SemaExprMember.cpp
@@ -492,13 +492,14 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
// Now look up the TypeDefDecl from the vector type. Without this,
- // diagostics look bad. We want extended vector types to appear built-in.
+ // diagnostics look bad. We want extended vector types to appear built-in.
for (Sema::ExtVectorDeclsType::iterator
I = S.ExtVectorDecls.begin(S.getExternalSource()),
E = S.ExtVectorDecls.end();
I != E; ++I) {
if ((*I)->getUnderlyingType() == VT)
- return S.Context.getTypedefType(*I);
+ return S.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, *I);
}
return VT; // should never get here (a typedef type should always be found).
@@ -881,7 +882,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
// build a CXXDependentScopeMemberExpr.
if (R.wasNotFoundInCurrentInstantiation() ||
(R.getLookupName().getCXXOverloadedOperator() == OO_Equal &&
- (SS.isSet() ? SS.getScopeRep()->isDependent()
+ (SS.isSet() ? SS.getScopeRep().isDependent()
: BaseExprType->isDependentType())))
return ActOnDependentMemberExpr(BaseExpr, BaseExprType, IsArrow, OpLoc, SS,
TemplateKWLoc, FirstQualifierInScope,
diff --git a/clang/lib/Sema/SemaExprObjC.cpp b/clang/lib/Sema/SemaExprObjC.cpp
index e0662d8..03b5c79 100644
--- a/clang/lib/Sema/SemaExprObjC.cpp
+++ b/clang/lib/Sema/SemaExprObjC.cpp
@@ -639,14 +639,14 @@ ExprResult SemaObjC::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
BoxingMethod = getNSNumberFactoryMethod(*this, Loc, ValueType);
BoxedType = NSNumberPointer;
} else if (const EnumType *ET = ValueType->getAs<EnumType>()) {
- if (!ET->getDecl()->isComplete()) {
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isComplete()) {
Diag(Loc, diag::err_objc_incomplete_boxed_expression_type)
<< ValueType << ValueExpr->getSourceRange();
return ExprError();
}
- BoxingMethod = getNSNumberFactoryMethod(*this, Loc,
- ET->getDecl()->getIntegerType());
+ BoxingMethod = getNSNumberFactoryMethod(*this, Loc, ED->getIntegerType());
BoxedType = NSNumberPointer;
} else if (ValueType->isObjCBoxableRecordType()) {
// Support for structure types, that marked as objc_boxable
@@ -2337,10 +2337,10 @@ SemaObjC::getObjCMessageKind(Scope *S, IdentifierInfo *Name,
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND))
T = Context.getObjCInterfaceType(Class);
else if (TypeDecl *Type = dyn_cast<TypeDecl>(ND)) {
- T = Context.getTypeDeclType(Type);
SemaRef.DiagnoseUseOfDecl(Type, NameLoc);
- }
- else
+ T = Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Type);
+ } else
return ObjCInstanceMessage;
// We have a class message, and T is the type we're
@@ -3847,7 +3847,8 @@ static inline T *getObjCBridgeAttr(const TypedefType *TD) {
if (QT->isPointerType()) {
QT = QT->getPointeeType();
if (const RecordType *RT = QT->getAs<RecordType>()) {
- for (auto *Redecl : RT->getDecl()->getMostRecentDecl()->redecls()) {
+ for (auto *Redecl :
+ RT->getOriginalDecl()->getMostRecentDecl()->redecls()) {
if (auto *attr = Redecl->getAttr<T>())
return attr;
}
diff --git a/clang/lib/Sema/SemaFunctionEffects.cpp b/clang/lib/Sema/SemaFunctionEffects.cpp
index 1592862..8590ee8 100644
--- a/clang/lib/Sema/SemaFunctionEffects.cpp
+++ b/clang/lib/Sema/SemaFunctionEffects.cpp
@@ -1352,11 +1352,15 @@ private:
return true;
}
- bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) override {
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node,
+ bool TraverseQualifier) override {
return true;
}
- bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) override { return true; }
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node,
+ bool TraverseQualifier) override {
+ return true;
+ }
bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) override {
return true;
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 8536e04..b4bbe02 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -234,7 +234,7 @@ static unsigned calculateLegacyCbufferSize(const ASTContext &Context,
constexpr unsigned CBufferAlign = 16;
if (const RecordType *RT = T->getAs<RecordType>()) {
unsigned Size = 0;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (const FieldDecl *Field : RD->fields()) {
QualType Ty = Field->getType();
unsigned FieldSize = calculateLegacyCbufferSize(Context, Ty);
@@ -337,16 +337,9 @@ static bool isZeroSizedArray(const ConstantArrayType *CAT) {
return CAT != nullptr;
}
-// Returns true if the record type is an HLSL resource class or an array of
-// resource classes
-static bool isResourceRecordTypeOrArrayOf(const Type *Ty) {
- while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- Ty = CAT->getArrayElementTypeNoTypeQual();
- return HLSLAttributedResourceType::findHandleTypeOnResource(Ty) != nullptr;
-}
-
static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) {
- return isResourceRecordTypeOrArrayOf(VD->getType().getTypePtr());
+ const Type *Ty = VD->getType().getTypePtr();
+ return Ty->isHLSLResourceRecord() || Ty->isHLSLResourceRecordArray();
}
// Returns true if the type is a leaf element type that is not valid to be
@@ -355,7 +348,7 @@ static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) {
// type or if it is a record type that needs to be inspected further.
static bool isInvalidConstantBufferLeafElementType(const Type *Ty) {
Ty = Ty->getUnqualifiedDesugaredType();
- if (isResourceRecordTypeOrArrayOf(Ty))
+ if (Ty->isHLSLResourceRecord() || Ty->isHLSLResourceRecordArray())
return true;
if (Ty->isRecordType())
return Ty->getAsCXXRecordDecl()->isEmpty();
@@ -373,7 +366,7 @@ static bool isInvalidConstantBufferLeafElementType(const Type *Ty) {
// needs to be created for HLSL Buffer use that will exclude these unwanted
// declarations (see createHostLayoutStruct function).
static bool requiresImplicitBufferLayoutStructure(const CXXRecordDecl *RD) {
- if (RD->getTypeForDecl()->isHLSLIntangibleType() || RD->isEmpty())
+ if (RD->isHLSLIntangible() || RD->isEmpty())
return true;
// check fields
for (const FieldDecl *Field : RD->fields()) {
@@ -458,7 +451,7 @@ static FieldDecl *createFieldForHostLayoutStruct(Sema &S, const Type *Ty,
RD = createHostLayoutStruct(S, RD);
if (!RD)
return nullptr;
- Ty = RD->getTypeForDecl();
+ Ty = S.Context.getCanonicalTagType(RD)->getTypePtr();
}
}
@@ -508,8 +501,8 @@ static CXXRecordDecl *createHostLayoutStruct(Sema &S,
if (requiresImplicitBufferLayoutStructure(BaseDecl)) {
BaseDecl = createHostLayoutStruct(S, BaseDecl);
if (BaseDecl) {
- TypeSourceInfo *TSI = AST.getTrivialTypeSourceInfo(
- QualType(BaseDecl->getTypeForDecl(), 0));
+ TypeSourceInfo *TSI =
+ AST.getTrivialTypeSourceInfo(AST.getCanonicalTagType(BaseDecl));
Base = CXXBaseSpecifier(SourceRange(), false, StructDecl->isClass(),
AS_none, TSI, SourceLocation());
}
@@ -1843,7 +1836,7 @@ SemaHLSL::TakeLocForHLSLAttribute(const HLSLAttributedResourceType *RT) {
// requirements and adds them to Bindings
void SemaHLSL::collectResourceBindingsOnUserRecordDecl(const VarDecl *VD,
const RecordType *RT) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (FieldDecl *FD : RD->fields()) {
const Type *Ty = FD->getType()->getUnqualifiedDesugaredType();
@@ -3396,7 +3389,7 @@ bool SemaHLSL::ContainsBitField(QualType BaseTy) {
continue;
}
if (const auto *RT = dyn_cast<RecordType>(T)) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion())
continue;
@@ -3597,7 +3590,7 @@ void SemaHLSL::deduceAddressSpace(VarDecl *Decl) {
return;
// Resource handles.
- if (isResourceRecordTypeOrArrayOf(Type->getUnqualifiedDesugaredType()))
+ if (Type->isHLSLResourceRecord() || Type->isHLSLResourceRecordArray())
return;
// Only static globals belong to the Private address space.
@@ -3637,10 +3630,7 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) {
if (VD->getType()->isHLSLIntangibleType())
collectResourceBindingsOnVarDecl(VD);
- const Type *VarType = VD->getType().getTypePtr();
- while (VarType->isArrayType())
- VarType = VarType->getArrayElementTypeNoTypeQual();
- if (VarType->isHLSLResourceRecord() ||
+ if (isResourceRecordTypeOrArrayOf(VD) ||
VD->hasAttr<HLSLVkConstantIdAttr>()) {
// Make the variable for resources static. The global externally visible
// storage is accessed through the handle, which is a member. The variable
@@ -3919,7 +3909,8 @@ class InitListTransformer {
}
while (!RecordTypes.empty()) {
const RecordType *RT = RecordTypes.pop_back_val();
- for (auto *FD : RT->getDecl()->fields()) {
+ for (auto *FD :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
DeclAccessPair Found = DeclAccessPair::make(FD, FD->getAccess());
DeclarationNameInfo NameInfo(FD->getDeclName(), E->getBeginLoc());
ExprResult Res = S.BuildFieldReferenceExpr(
@@ -3967,7 +3958,8 @@ class InitListTransformer {
}
while (!RecordTypes.empty()) {
const RecordType *RT = RecordTypes.pop_back_val();
- for (auto *FD : RT->getDecl()->fields()) {
+ for (auto *FD :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
Inits.push_back(generateInitListsImpl(FD->getType()));
}
}
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 1dd38c0..d7cca4b 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -446,7 +446,6 @@ class InitListChecker {
unsigned ExpectedNumInits);
int numArrayElements(QualType DeclType);
int numStructUnionElements(QualType DeclType);
- static RecordDecl *getRecordDecl(QualType DeclType);
ExprResult PerformEmptyInit(SourceLocation Loc,
const InitializedEntity &Entity);
@@ -776,7 +775,7 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
if (Init >= NumInits || !ILE->getInit(Init)) {
if (const RecordType *RType = ILE->getType()->getAs<RecordType>())
- if (!RType->getDecl()->isUnion())
+ if (!RType->getOriginalDecl()->isUnion())
assert((Init < NumInits || VerifyOnly) &&
"This ILE should have been expanded");
@@ -923,7 +922,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
return;
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
- const RecordDecl *RDecl = RType->getDecl();
+ const RecordDecl *RDecl = RType->getOriginalDecl()->getDefinitionOrSelf();
if (RDecl->isUnion() && ILE->getInitializedFieldInUnion()) {
FillInEmptyInitForField(0, ILE->getInitializedFieldInUnion(), Entity, ILE,
RequiresSecondPass, FillWithNoInit);
@@ -1127,7 +1126,8 @@ int InitListChecker::numArrayElements(QualType DeclType) {
}
int InitListChecker::numStructUnionElements(QualType DeclType) {
- RecordDecl *structDecl = DeclType->castAs<RecordType>()->getDecl();
+ RecordDecl *structDecl =
+ DeclType->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
int InitializableMembers = 0;
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(structDecl))
InitializableMembers += CXXRD->getNumBases();
@@ -1140,14 +1140,6 @@ int InitListChecker::numStructUnionElements(QualType DeclType) {
return InitializableMembers - structDecl->hasFlexibleArrayMember();
}
-RecordDecl *InitListChecker::getRecordDecl(QualType DeclType) {
- if (const auto *RT = DeclType->getAs<RecordType>())
- return RT->getDecl();
- if (const auto *Inject = DeclType->getAs<InjectedClassNameType>())
- return Inject->getDecl();
- return nullptr;
-}
-
/// Determine whether Entity is an entity for which it is idiomatic to elide
/// the braces in aggregate initialization.
static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
@@ -1164,16 +1156,22 @@ static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
// Allows elide brace initialization for aggregates with empty base.
if (Entity.getKind() == InitializedEntity::EK_Base) {
- auto *ParentRD =
- Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
+ auto *ParentRD = Entity.getParent()
+ ->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
CXXRecordDecl *CXXRD = cast<CXXRecordDecl>(ParentRD);
return CXXRD->getNumBases() == 1 && CXXRD->field_empty();
}
// Allow brace elision if the only subobject is a field.
if (Entity.getKind() == InitializedEntity::EK_Member) {
- auto *ParentRD =
- Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
+ auto *ParentRD = Entity.getParent()
+ ->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(ParentRD)) {
if (CXXRD->getNumBases()) {
return false;
@@ -1442,7 +1440,7 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
- } else if (const RecordDecl *RD = getRecordDecl(DeclType)) {
+ } else if (const RecordDecl *RD = DeclType->getAsRecordDecl()) {
auto Bases =
CXXRecordDecl::base_class_const_range(CXXRecordDecl::base_class_const_iterator(),
CXXRecordDecl::base_class_const_iterator());
@@ -2320,7 +2318,7 @@ void InitListChecker::CheckStructUnionTypes(
bool SubobjectIsDesignatorContext, unsigned &Index,
InitListExpr *StructuredList, unsigned &StructuredIndex,
bool TopLevelObject) {
- const RecordDecl *RD = getRecordDecl(DeclType);
+ const RecordDecl *RD = DeclType->getAsRecordDecl();
// If the record is invalid, some of it's members are invalid. To avoid
// confusion, we forgo checking the initializer for the entire record.
@@ -2889,7 +2887,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// then the current object (defined below) shall have
// structure or union type and the identifier shall be the
// name of a member of that type.
- RecordDecl *RD = getRecordDecl(CurrentObjectType);
+ RecordDecl *RD = CurrentObjectType->getAsRecordDecl();
if (!RD) {
SourceLocation Loc = D->getDotLoc();
if (Loc.isInvalid())
@@ -4337,8 +4335,8 @@ static bool hasCopyOrMoveCtorParam(ASTContext &Ctx,
QualType ParmT =
Info.Constructor->getParamDecl(0)->getType().getNonReferenceType();
- QualType ClassT =
- Ctx.getRecordType(cast<CXXRecordDecl>(Info.FoundDecl->getDeclContext()));
+ CanQualType ClassT = Ctx.getCanonicalTagType(
+ cast<CXXRecordDecl>(Info.FoundDecl->getDeclContext()));
return Ctx.hasSameUnqualifiedType(ParmT, ClassT);
}
@@ -4537,8 +4535,8 @@ static void TryConstructorInitialization(Sema &S,
const RecordType *DestRecordType = DestType->getAs<RecordType>();
assert(DestRecordType && "Constructor initialization requires record type");
- CXXRecordDecl *DestRecordDecl
- = cast<CXXRecordDecl>(DestRecordType->getDecl());
+ auto *DestRecordDecl = cast<CXXRecordDecl>(DestRecordType->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Build the candidate set directly in the initialization sequence
// structure, so that it will persist if we fail.
@@ -5059,8 +5057,8 @@ static void TryListInitialization(Sema &S,
// the underlying type of T, the program is ill-formed.
auto *ET = DestType->getAs<EnumType>();
if (S.getLangOpts().CPlusPlus17 &&
- Kind.getKind() == InitializationKind::IK_DirectList &&
- ET && ET->getDecl()->isFixed() &&
+ Kind.getKind() == InitializationKind::IK_DirectList && ET &&
+ ET->getOriginalDecl()->getDefinitionOrSelf()->isFixed() &&
!S.Context.hasSameUnqualifiedType(E->getType(), DestType) &&
(E->getType()->isIntegralOrUnscopedEnumerationType() ||
E->getType()->isFloatingType())) {
@@ -5170,7 +5168,8 @@ static OverloadingResult TryRefInitWithConversionFunction(
S.isCompleteType(Kind.getLocation(), T1)) {
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
- CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl());
+ auto *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getOriginalDecl())
+ ->getDefinitionOrSelf();
for (NamedDecl *D : S.LookupConstructors(T1RecordDecl)) {
auto Info = getConstructorInfo(D);
@@ -5193,7 +5192,8 @@ static OverloadingResult TryRefInitWithConversionFunction(
}
}
}
- if (T1RecordType && T1RecordType->getDecl()->isInvalidDecl())
+ if (T1RecordType &&
+ T1RecordType->getOriginalDecl()->getDefinitionOrSelf()->isInvalidDecl())
return OR_No_Viable_Function;
const RecordType *T2RecordType = nullptr;
@@ -5201,7 +5201,8 @@ static OverloadingResult TryRefInitWithConversionFunction(
S.isCompleteType(Kind.getLocation(), T2)) {
// The type we're converting from is a class type, enumerate its conversion
// functions.
- CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl());
+ auto *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getOriginalDecl())
+ ->getDefinitionOrSelf();
const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
@@ -5237,7 +5238,8 @@ static OverloadingResult TryRefInitWithConversionFunction(
}
}
}
- if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl())
+ if (T2RecordType &&
+ T2RecordType->getOriginalDecl()->getDefinitionOrSelf()->isInvalidDecl())
return OR_No_Viable_Function;
SourceLocation DeclLoc = Initializer->getBeginLoc();
@@ -5715,7 +5717,9 @@ static void TryValueInitialization(Sema &S,
T = S.Context.getBaseElementType(T);
if (const RecordType *RT = T->getAs<RecordType>()) {
- if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ ClassDecl = ClassDecl->getDefinitionOrSelf();
bool NeedZeroInitialization = true;
// C++98:
// -- if T is a class type (clause 9) with a user-declared constructor
@@ -5913,7 +5917,8 @@ static void TryOrBuildParenListInitialization(
}
} else if (auto *RT = Entity.getType()->getAs<RecordType>()) {
bool IsUnion = RT->isUnionType();
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const auto *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (RD->isInvalidDecl()) {
// Exit early to avoid confusion when processing members.
// We do the same for braced list initialization in
@@ -6102,8 +6107,9 @@ static void TryUserDefinedConversion(Sema &S,
if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) {
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
- CXXRecordDecl *DestRecordDecl
- = cast<CXXRecordDecl>(DestRecordType->getDecl());
+ auto *DestRecordDecl =
+ cast<CXXRecordDecl>(DestRecordType->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Try to complete the type we're converting to.
if (S.isCompleteType(Kind.getLocation(), DestType)) {
@@ -6139,8 +6145,9 @@ static void TryUserDefinedConversion(Sema &S,
// We can only enumerate the conversion functions for a complete type; if
// the type isn't complete, simply skip this step.
if (S.isCompleteType(DeclLoc, SourceType)) {
- CXXRecordDecl *SourceRecordDecl
- = cast<CXXRecordDecl>(SourceRecordType->getDecl());
+ auto *SourceRecordDecl =
+ cast<CXXRecordDecl>(SourceRecordType->getOriginalDecl())
+ ->getDefinitionOrSelf();
const auto &Conversions =
SourceRecordDecl->getVisibleConversionFunctions();
@@ -7173,7 +7180,8 @@ static ExprResult CopyObject(Sema &S,
Expr *CurInitExpr = (Expr *)CurInit.get();
CXXRecordDecl *Class = nullptr;
if (const RecordType *Record = T->getAs<RecordType>())
- Class = cast<CXXRecordDecl>(Record->getDecl());
+ Class =
+ cast<CXXRecordDecl>(Record->getOriginalDecl())->getDefinitionOrSelf();
if (!Class)
return CurInit;
@@ -7328,8 +7336,8 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
// Find constructors which would have been considered.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- DeclContext::lookup_result Ctors =
- S.LookupConstructors(cast<CXXRecordDecl>(Record->getDecl()));
+ DeclContext::lookup_result Ctors = S.LookupConstructors(
+ cast<CXXRecordDecl>(Record->getOriginalDecl())->getDefinitionOrSelf());
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
@@ -8160,8 +8168,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
// regardless of how we initialized the entity.
QualType T = CurInit.get()->getType();
if (const RecordType *Record = T->getAs<RecordType>()) {
- CXXDestructorDecl *Destructor
- = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
+ CXXDestructorDecl *Destructor =
+ S.LookupDestructor(cast<CXXRecordDecl>(Record->getOriginalDecl())
+ ->getDefinitionOrSelf());
S.CheckDestructorAccess(CurInit.get()->getBeginLoc(), Destructor,
S.PDiag(diag::err_access_dtor_temp) << T);
S.MarkFunctionReferenced(CurInit.get()->getBeginLoc(), Destructor);
@@ -8808,8 +8817,8 @@ static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity,
destPointeeType.getQualifiers().compatiblyIncludes(
fromPointeeType.getQualifiers(), S.getASTContext()))
S.Diag(fromDecl->getLocation(), diag::note_forward_class_conversion)
- << S.getASTContext().getTagDeclType(fromDecl)
- << S.getASTContext().getTagDeclType(destDecl);
+ << S.getASTContext().getCanonicalTagType(fromDecl)
+ << S.getASTContext().getCanonicalTagType(destDecl);
}
static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
@@ -9208,32 +9217,34 @@ bool InitializationSequence::Diagnose(Sema &S,
InheritedFrom = Inherited.getShadowDecl()->getNominatedBaseClass();
if (Entity.getKind() == InitializedEntity::EK_Base) {
S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
- << (InheritedFrom ? 2 : Constructor->isImplicit() ? 1 : 0)
- << S.Context.getTypeDeclType(Constructor->getParent())
- << /*base=*/0
- << Entity.getType()
- << InheritedFrom;
-
- RecordDecl *BaseDecl
- = Entity.getBaseSpecifier()->getType()->castAs<RecordType>()
- ->getDecl();
+ << (InheritedFrom ? 2
+ : Constructor->isImplicit() ? 1
+ : 0)
+ << S.Context.getCanonicalTagType(Constructor->getParent())
+ << /*base=*/0 << Entity.getType() << InheritedFrom;
+
+ RecordDecl *BaseDecl = Entity.getBaseSpecifier()
+ ->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
S.Diag(BaseDecl->getLocation(), diag::note_previous_decl)
- << S.Context.getTagDeclType(BaseDecl);
+ << S.Context.getCanonicalTagType(BaseDecl);
} else {
S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
- << (InheritedFrom ? 2 : Constructor->isImplicit() ? 1 : 0)
- << S.Context.getTypeDeclType(Constructor->getParent())
- << /*member=*/1
- << Entity.getName()
- << InheritedFrom;
+ << (InheritedFrom ? 2
+ : Constructor->isImplicit() ? 1
+ : 0)
+ << S.Context.getCanonicalTagType(Constructor->getParent())
+ << /*member=*/1 << Entity.getName() << InheritedFrom;
S.Diag(Entity.getDecl()->getLocation(),
diag::note_member_declared_at);
if (const RecordType *Record
= Entity.getType()->getAs<RecordType>())
- S.Diag(Record->getDecl()->getLocation(),
+ S.Diag(Record->getOriginalDecl()->getLocation(),
diag::note_previous_decl)
- << S.Context.getTagDeclType(Record->getDecl());
+ << S.Context.getCanonicalTagType(Record->getOriginalDecl());
}
break;
}
@@ -9300,11 +9311,11 @@ bool InitializationSequence::Diagnose(Sema &S,
// initialized.
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(S.CurContext);
S.Diag(Kind.getLocation(), diag::err_uninitialized_member_in_ctor)
- << (Constructor->getInheritedConstructor() ? 2 :
- Constructor->isImplicit() ? 1 : 0)
- << S.Context.getTypeDeclType(Constructor->getParent())
- << /*const=*/1
- << Entity.getName();
+ << (Constructor->getInheritedConstructor() ? 2
+ : Constructor->isImplicit() ? 1
+ : 0)
+ << S.Context.getCanonicalTagType(Constructor->getParent())
+ << /*const=*/1 << Entity.getName();
S.Diag(Entity.getDecl()->getLocation(), diag::note_previous_decl)
<< Entity.getName();
} else if (const auto *VD = dyn_cast_if_present<VarDecl>(Entity.getDecl());
@@ -10157,7 +10168,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
auto *RD = cast<CXXRecordDecl>(Pattern->getTemplatedDecl());
if (!(RD->getDefinition() && RD->isAggregate()))
return;
- QualType Ty = Context.getRecordType(RD);
+ QualType Ty = Context.getCanonicalTagType(RD);
SmallVector<QualType, 8> ElementTypes;
InitListChecker CheckInitList(*this, Entity, ListInit, Ty, ElementTypes);
@@ -10297,8 +10308,8 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
case OR_No_Viable_Function: {
CXXRecordDecl *Primary =
cast<ClassTemplateDecl>(Template)->getTemplatedDecl();
- bool Complete =
- isCompleteType(Kind.getLocation(), Context.getTypeDeclType(Primary));
+ bool Complete = isCompleteType(Kind.getLocation(),
+ Context.getCanonicalTagType(Primary));
Candidates.NoteCandidates(
PartialDiagnosticAt(
Kind.getLocation(),
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index bc3c4b0..0d891fc 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -425,7 +425,7 @@ bool Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
.getNonReferenceType()
.getUnqualifiedType()
.getDesugaredType(getASTContext());
- QualType LambdaType = getASTContext().getRecordType(RD);
+ CanQualType LambdaType = getASTContext().getCanonicalTagType(RD);
if (LambdaType == ExplicitObjectParameterType)
return false;
@@ -457,7 +457,7 @@ bool Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
return true;
}
- if (Paths.isAmbiguous(LambdaType->getCanonicalTypeUnqualified())) {
+ if (Paths.isAmbiguous(LambdaType)) {
std::string PathsDisplay = getAmbiguousPathsDisplayString(Paths);
Diag(CallLoc, diag::err_explicit_object_lambda_ambiguous_base)
<< LambdaType << PathsDisplay;
@@ -642,7 +642,7 @@ static EnumDecl *findEnumForBlockReturn(Expr *E) {
// - it is an expression of that formal enum type.
if (const EnumType *ET = E->getType()->getAs<EnumType>()) {
- return ET->getDecl();
+ return ET->getOriginalDecl()->getDefinitionOrSelf();
}
// Otherwise, nope.
@@ -759,7 +759,7 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
assert(isa<BlockScopeInfo>(CSI));
const EnumDecl *ED = findCommonEnumForBlockReturns(CSI.Returns);
if (ED) {
- CSI.ReturnType = Context.getTypeDeclType(ED);
+ CSI.ReturnType = Context.getCanonicalTagType(ED);
adjustBlockReturnsToEnum(*this, CSI.Returns, CSI.ReturnType);
return;
}
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index dc73ded..e28492b 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -563,9 +563,8 @@ void LookupResult::resolveKind() {
// no ambiguity if they all refer to the same type, so unique based on the
// canonical type.
if (const auto *TD = dyn_cast<TypeDecl>(D)) {
- QualType T = getSema().Context.getTypeDeclType(TD);
auto UniqueResult = UniqueTypes.insert(
- std::make_pair(getSema().Context.getCanonicalType(T), I));
+ std::make_pair(getSema().Context.getCanonicalTypeDeclType(TD), I));
if (!UniqueResult.second) {
// The type is not unique.
ExistingI = UniqueResult.first->second;
@@ -717,7 +716,7 @@ static QualType getOpenCLEnumType(Sema &S, llvm::StringRef Name) {
EnumDecl *Decl = Result.getAsSingle<EnumDecl>();
if (!Decl)
return diagOpenCLBuiltinTypeError(S, "enum", Name);
- return S.Context.getEnumType(Decl);
+ return S.Context.getCanonicalTagType(Decl);
}
/// Lookup an OpenCL typedef type.
@@ -730,7 +729,8 @@ static QualType getOpenCLTypedefType(Sema &S, llvm::StringRef Name) {
TypedefNameDecl *Decl = Result.getAsSingle<TypedefNameDecl>();
if (!Decl)
return diagOpenCLBuiltinTypeError(S, "typedef", Name);
- return S.Context.getTypedefType(Decl);
+ return S.Context.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, Decl);
}
/// Get the QualType instances of the return type and arguments for an OpenCL
@@ -1001,7 +1001,7 @@ static void LookupPredefedObjCSuperType(Sema &Sema, Scope *S) {
Sema.LookupName(Result, S);
if (Result.getResultKind() == LookupResultKind::Found)
if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
- Context.setObjCSuperType(Context.getTagDeclType(TD));
+ Context.setObjCSuperType(Context.getCanonicalTagType(TD));
}
void Sema::LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID) {
@@ -2435,12 +2435,12 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
if (!R.getLookupName())
return false;
+#ifndef NDEBUG
// Make sure that the declaration context is complete.
- assert((!isa<TagDecl>(LookupCtx) ||
- LookupCtx->isDependentContext() ||
- cast<TagDecl>(LookupCtx)->isCompleteDefinition() ||
- cast<TagDecl>(LookupCtx)->isBeingDefined()) &&
- "Declaration context must already be complete!");
+ if (const auto *TD = dyn_cast<TagDecl>(LookupCtx);
+ TD && !TD->isDependentType() && TD->getDefinition() == nullptr)
+ llvm_unreachable("Declaration context must already be complete!");
+#endif
struct QualifiedLookupInScope {
bool oldVal;
@@ -2596,10 +2596,8 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// C++ [class.member.lookup]p3:
// type declarations (including injected-class-names) are replaced by
// the types they designate
- if (const TypeDecl *TD = dyn_cast<TypeDecl>(ND->getUnderlyingDecl())) {
- QualType T = Context.getTypeDeclType(TD);
- return T.getCanonicalType().getAsOpaquePtr();
- }
+ if (const TypeDecl *TD = dyn_cast<TypeDecl>(ND->getUnderlyingDecl()))
+ return Context.getCanonicalTypeDeclType(TD).getAsOpaquePtr();
return ND->getUnderlyingDecl()->getCanonicalDecl();
}
@@ -2704,12 +2702,10 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS) {
- auto *NNS = SS.getScopeRep();
- if (NNS && NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
- else
-
- return LookupQualifiedName(R, LookupCtx);
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::MicrosoftSuper)
+ return LookupInSuper(R, Qualifier.getAsMicrosoftSuper());
+ return LookupQualifiedName(R, LookupCtx);
}
bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
@@ -2731,7 +2727,9 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
IsDependent = !DC && ObjectType->isDependentType();
assert(((!DC && ObjectType->isDependentType()) ||
!ObjectType->isIncompleteType() || !ObjectType->getAs<TagType>() ||
- ObjectType->castAs<TagType>()->isBeingDefined()) &&
+ ObjectType->castAs<TagType>()
+ ->getOriginalDecl()
+ ->isEntityBeingDefined()) &&
"Caller should have completed object type");
} else if (SS && SS->isNotEmpty()) {
// This nested-name-specifier occurs after another nested-name-specifier,
@@ -2744,9 +2742,9 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
// FIXME: '__super' lookup semantics could be implemented by a
// LookupResult::isSuperLookup flag which skips the initial search of
// the lookup context in LookupQualified.
- if (NestedNameSpecifier *NNS = SS->getScopeRep();
- NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
+ if (NestedNameSpecifier Qualifier = SS->getScopeRep();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::MicrosoftSuper)
+ return LookupInSuper(R, Qualifier.getAsMicrosoftSuper());
}
IsDependent = !DC && isDependentScopeSpecifier(*SS);
} else {
@@ -2772,10 +2770,12 @@ bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
// members of Class itself. That is, the naming class is Class, and the
// access includes the access of the base.
for (const auto &BaseSpec : Class->bases()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(
- BaseSpec.getType()->castAs<RecordType>()->getDecl());
+ CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(
+ BaseSpec.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind());
- Result.setBaseObjectType(Context.getRecordType(Class));
+ Result.setBaseObjectType(Context.getCanonicalTagType(Class));
LookupQualifiedName(Result, RD);
// Copy the lookup results into the target, merging the base's access into
@@ -3101,7 +3101,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// Only recurse into base classes for complete types.
if (!Result.S.isCompleteType(Result.InstantiationLoc,
- Result.S.Context.getRecordType(Class)))
+ Result.S.Context.getCanonicalTagType(Class)))
return;
// Add direct and indirect base classes along with their associated
@@ -3123,7 +3123,8 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// the classes and namespaces of known non-dependent arguments.
if (!BaseType)
continue;
- CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Result.addClassTransitive(BaseDecl)) {
// Find the associated namespace for this base class.
DeclContext *BaseCtx = BaseDecl->getDeclContext();
@@ -3194,8 +3195,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// Its associated namespaces are the innermost enclosing
// namespaces of its associated classes.
case Type::Record: {
+ // FIXME: This should use the original decl.
CXXRecordDecl *Class =
- cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(T)->getOriginalDecl())
+ ->getDefinitionOrSelf();
addAssociatedClassesAndNamespaces(Result, Class);
break;
}
@@ -3205,7 +3208,9 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// If it is a class member, its associated class is the
// member’s class; else it has no associated class.
case Type::Enum: {
- EnumDecl *Enum = cast<EnumType>(T)->getDecl();
+ // FIXME: This should use the original decl.
+ EnumDecl *Enum =
+ cast<EnumType>(T)->getOriginalDecl()->getDefinitionOrSelf();
DeclContext *Ctx = Enum->getDeclContext();
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
@@ -3438,7 +3443,7 @@ Sema::LookupSpecialMember(CXXRecordDecl *RD, CXXSpecialMemberKind SM,
// Prepare for overload resolution. Here we construct a synthetic argument
// if necessary and make sure that implicit functions are declared.
- CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(RD));
+ CanQualType CanTy = Context.getCanonicalTagType(RD);
DeclarationName Name;
Expr *Arg = nullptr;
unsigned NumArgs;
@@ -3645,7 +3650,7 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
});
}
- CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class));
+ CanQualType T = Context.getCanonicalTagType(Class);
DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T);
return Class->lookup(Name);
}
@@ -4260,7 +4265,7 @@ private:
const auto *Record = BaseType->getAs<RecordType>();
if (!Record)
continue;
- RD = Record->getDecl();
+ RD = Record->getOriginalDecl()->getDefinitionOrSelf();
}
// FIXME: It would be nice to be able to determine whether referencing
@@ -4546,40 +4551,101 @@ static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
// the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::",
// fill the vector with the IdentifierInfo pointers for "foo" and "bar").
static void getNestedNameSpecifierIdentifiers(
- NestedNameSpecifier *NNS,
- SmallVectorImpl<const IdentifierInfo*> &Identifiers) {
- if (NestedNameSpecifier *Prefix = NNS->getPrefix())
- getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
- else
+ NestedNameSpecifier NNS,
+ SmallVectorImpl<const IdentifierInfo *> &Identifiers) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
Identifiers.clear();
+ return;
- const IdentifierInfo *II = nullptr;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- II = NNS->getAsIdentifier();
- break;
-
- case NestedNameSpecifier::Namespace: {
- const NamespaceBaseDecl *Namespace = NNS->getAsNamespace();
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = NNS.getAsNamespaceAndPrefix();
+ getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
if (const auto *NS = dyn_cast<NamespaceDecl>(Namespace);
NS && NS->isAnonymousNamespace())
return;
- II = Namespace->getIdentifier();
- break;
+ Identifiers.push_back(Namespace->getIdentifier());
+ return;
}
- case NestedNameSpecifier::TypeSpec:
- II = QualType(NNS->getAsType(), 0).getBaseTypeIdentifier();
+ case NestedNameSpecifier::Kind::Type: {
+ for (const Type *T = NNS.getAsType(); /**/; /**/) {
+ switch (T->getTypeClass()) {
+ case Type::DependentName: {
+ auto *DT = cast<DependentNameType>(T);
+ getNestedNameSpecifierIdentifiers(DT->getQualifier(), Identifiers);
+ Identifiers.push_back(DT->getIdentifier());
+ return;
+ }
+ case Type::TemplateSpecialization: {
+ TemplateName Name =
+ cast<TemplateSpecializationType>(T)->getTemplateName();
+ if (const QualifiedTemplateName *QTN =
+ Name.getAsAdjustedQualifiedTemplateName()) {
+ getNestedNameSpecifierIdentifiers(QTN->getQualifier(), Identifiers);
+ Name = QTN->getUnderlyingTemplate();
+ }
+ if (const auto *TD = Name.getAsTemplateDecl(/*IgnoreDeduced=*/true))
+ Identifiers.push_back(TD->getIdentifier());
+ return;
+ }
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateStorage &S =
+ cast<DependentTemplateSpecializationType>(T)
+ ->getDependentTemplateName();
+ getNestedNameSpecifierIdentifiers(S.getQualifier(), Identifiers);
+ // FIXME: Should this dig into the Name as well?
+ // Identifiers.push_back(S.getName().getIdentifier());
+ return;
+ }
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)
+ ->getReplacementType()
+ .getTypePtr();
+ continue;
+ case Type::TemplateTypeParm:
+ Identifiers.push_back(cast<TemplateTypeParmType>(T)->getIdentifier());
+ return;
+ case Type::Decltype:
+ return;
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName: {
+ auto *TT = cast<TagType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getOriginalDecl()->getIdentifier());
+ return;
+ }
+ case Type::Typedef: {
+ auto *TT = cast<TypedefType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getDecl()->getIdentifier());
+ return;
+ }
+ case Type::Using: {
+ auto *TT = cast<UsingType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getDecl()->getIdentifier());
+ return;
+ }
+ case Type::UnresolvedUsing: {
+ auto *TT = cast<UnresolvedUsingType>(T);
+ getNestedNameSpecifierIdentifiers(TT->getQualifier(), Identifiers);
+ Identifiers.push_back(TT->getDecl()->getIdentifier());
+ return;
+ }
+ default:
+ Identifiers.push_back(QualType(T, 0).getBaseTypeIdentifier());
+ return;
+ }
+ }
break;
+ }
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return;
}
-
- if (II)
- Identifiers.push_back(II);
}
void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
@@ -4612,11 +4678,11 @@ void TypoCorrectionConsumer::FoundName(StringRef Name) {
void TypoCorrectionConsumer::addKeywordResult(StringRef Keyword) {
// Compute the edit distance between the typo and this keyword,
// and add the keyword to the list of results.
- addName(Keyword, nullptr, nullptr, true);
+ addName(Keyword, /*ND=*/nullptr, /*NNS=*/std::nullopt, /*isKeyword=*/true);
}
void TypoCorrectionConsumer::addName(StringRef Name, NamedDecl *ND,
- NestedNameSpecifier *NNS, bool isKeyword) {
+ NestedNameSpecifier NNS, bool isKeyword) {
// Use a simple length-based heuristic to determine the minimum possible
// edit distance. If the minimum isn't good enough, bail out early.
StringRef TypoStr = Typo->getName();
@@ -4708,10 +4774,10 @@ void TypoCorrectionConsumer::addNamespaces(
Namespaces.addNameSpecifier(KNPair.first);
bool SSIsTemplate = false;
- if (NestedNameSpecifier *NNS =
- (SS && SS->isValid()) ? SS->getScopeRep() : nullptr) {
- if (const Type *T = NNS->getAsType())
- SSIsTemplate = T->getTypeClass() == Type::TemplateSpecialization;
+ if (NestedNameSpecifier NNS = (SS ? SS->getScopeRep() : std::nullopt)) {
+ if (NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ SSIsTemplate =
+ NNS.getAsType()->getTypeClass() == Type::TemplateSpecialization;
}
// Do not transform this into an iterator-based loop. The loop body can
// trigger the creation of further types (through lazy deserialization) and
@@ -4813,17 +4879,15 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
for (const TypoCorrection &QR : QualifiedResults) {
for (const auto &NSI : Namespaces) {
DeclContext *Ctx = NSI.DeclCtx;
- const Type *NSType = NSI.NameSpecifier->getAsType();
+ CXXRecordDecl *NamingClass = NSI.NameSpecifier.getAsRecordDecl();
// If the current NestedNameSpecifier refers to a class and the
// current correction candidate is the name of that class, then skip
// it as it is unlikely a qualified version of the class' constructor
// is an appropriate correction.
- if (CXXRecordDecl *NSDecl = NSType ? NSType->getAsCXXRecordDecl() :
- nullptr) {
- if (NSDecl->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
- continue;
- }
+ if (NamingClass &&
+ NamingClass->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
+ continue;
TypoCorrection TC(QR);
TC.ClearCorrectionDecls();
@@ -4853,7 +4917,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
std::string NewQualified = TC.getAsString(SemaRef.getLangOpts());
std::string OldQualified;
llvm::raw_string_ostream OldOStream(OldQualified);
- SS->getScopeRep()->print(OldOStream, SemaRef.getPrintingPolicy());
+ SS->getScopeRep().print(OldOStream, SemaRef.getPrintingPolicy());
OldOStream << Typo->getName();
// If correction candidate would be an identical written qualified
// identifier, then the existing CXXScopeSpec probably included a
@@ -4864,8 +4928,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
for (LookupResult::iterator TRD = Result.begin(), TRDEnd = Result.end();
TRD != TRDEnd; ++TRD) {
if (SemaRef.CheckMemberAccess(TC.getCorrectionRange().getBegin(),
- NSType ? NSType->getAsCXXRecordDecl()
- : nullptr,
+ NamingClass,
TRD.getPair()) == Sema::AR_accessible)
TC.addCorrectionDecl(*TRD);
}
@@ -4889,10 +4952,10 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet(
ASTContext &Context, DeclContext *CurContext, CXXScopeSpec *CurScopeSpec)
: Context(Context), CurContextChain(buildContextChain(CurContext)) {
- if (NestedNameSpecifier *NNS =
- CurScopeSpec ? CurScopeSpec->getScopeRep() : nullptr) {
+ if (NestedNameSpecifier NNS =
+ CurScopeSpec ? CurScopeSpec->getScopeRep() : std::nullopt) {
llvm::raw_string_ostream SpecifierOStream(CurNameSpecifier);
- NNS->print(SpecifierOStream, Context.getPrintingPolicy());
+ NNS.print(SpecifierOStream, Context.getPrintingPolicy());
getNestedNameSpecifierIdentifiers(NNS, CurNameSpecifierIdentifiers);
}
@@ -4906,7 +4969,7 @@ TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet(
// Add the global context as a NestedNameSpecifier
SpecifierInfo SI = {cast<DeclContext>(Context.getTranslationUnitDecl()),
- NestedNameSpecifier::GlobalSpecifier(Context), 1};
+ NestedNameSpecifier::getGlobal(), 1};
DistanceMap[1].push_back(SI);
}
@@ -4926,14 +4989,16 @@ auto TypoCorrectionConsumer::NamespaceSpecifierSet::buildContextChain(
unsigned
TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier(
- DeclContextList &DeclChain, NestedNameSpecifier *&NNS) {
+ DeclContextList &DeclChain, NestedNameSpecifier &NNS) {
unsigned NumSpecifiers = 0;
for (DeclContext *C : llvm::reverse(DeclChain)) {
if (auto *ND = dyn_cast_or_null<NamespaceDecl>(C)) {
- NNS = NestedNameSpecifier::Create(Context, NNS, ND);
+ NNS = NestedNameSpecifier(Context, ND, NNS);
++NumSpecifiers;
} else if (auto *RD = dyn_cast_or_null<RecordDecl>(C)) {
- NNS = NestedNameSpecifier::Create(Context, NNS, RD->getTypeForDecl());
+ QualType T = Context.getTagType(ElaboratedTypeKeyword::None, NNS, RD,
+ /*OwnsTag=*/false);
+ NNS = NestedNameSpecifier(T.getTypePtr());
++NumSpecifiers;
}
}
@@ -4942,7 +5007,7 @@ TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier(
void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
DeclContext *Ctx) {
- NestedNameSpecifier *NNS = nullptr;
+ NestedNameSpecifier NNS = std::nullopt;
unsigned NumSpecifiers = 0;
DeclContextList NamespaceDeclChain(buildContextChain(Ctx));
DeclContextList FullNamespaceDeclChain(NamespaceDeclChain);
@@ -4960,7 +5025,7 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
// Add an explicit leading '::' specifier if needed.
if (NamespaceDeclChain.empty()) {
// Rebuild the NestedNameSpecifier as a globally-qualified specifier.
- NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ NNS = NestedNameSpecifier::getGlobal();
NumSpecifiers =
buildNestedNameSpecifier(FullNamespaceDeclChain, NNS);
} else if (NamedDecl *ND =
@@ -4972,12 +5037,12 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
llvm::raw_string_ostream SpecifierOStream(NewNameSpecifier);
SmallVector<const IdentifierInfo *, 4> NewNameSpecifierIdentifiers;
getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
- NNS->print(SpecifierOStream, Context.getPrintingPolicy());
+ NNS.print(SpecifierOStream, Context.getPrintingPolicy());
SameNameSpecifier = NewNameSpecifier == CurNameSpecifier;
}
if (SameNameSpecifier || llvm::is_contained(CurContextIdentifiers, Name)) {
// Rebuild the NestedNameSpecifier as a globally-qualified specifier.
- NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ NNS = NestedNameSpecifier::getGlobal();
NumSpecifiers =
buildNestedNameSpecifier(FullNamespaceDeclChain, NNS);
}
@@ -5463,7 +5528,7 @@ std::string TypoCorrection::getAsString(const LangOptions &LO) const {
if (CorrectionNameSpec) {
std::string tmpBuffer;
llvm::raw_string_ostream PrefixOStream(tmpBuffer);
- CorrectionNameSpec->print(PrefixOStream, PrintingPolicy(LO));
+ CorrectionNameSpec.print(PrefixOStream, PrintingPolicy(LO));
PrefixOStream << CorrectionName;
return PrefixOStream.str();
}
diff --git a/clang/lib/Sema/SemaObjC.cpp b/clang/lib/Sema/SemaObjC.cpp
index 0f39a98..8d8d5e8 100644
--- a/clang/lib/Sema/SemaObjC.cpp
+++ b/clang/lib/Sema/SemaObjC.cpp
@@ -691,7 +691,7 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) {
S.Diag(loc, diag::err_objc_type_args_wrong_arity)
<< (typeArgs.size() < typeParams->size()) << objcClass->getDeclName()
- << (unsigned)finalTypeArgs.size() << (unsigned)numTypeParams;
+ << (unsigned)finalTypeArgs.size() << numTypeParams;
S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass;
if (failOnError)
@@ -1407,7 +1407,8 @@ SemaObjC::ObjCSubscriptKind SemaObjC::CheckSubscriptingKind(Expr *FromE) {
int NoIntegrals = 0, NoObjCIdPointers = 0;
SmallVector<CXXConversionDecl *, 4> ConversionDecls;
- for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl())
+ for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf()
->getVisibleConversionFunctions()) {
if (CXXConversionDecl *Conversion =
dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) {
@@ -1510,7 +1511,7 @@ bool SemaObjC::isCFStringType(QualType T) {
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl();
if (RD->getTagKind() != TagTypeKind::Struct)
return false;
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index 9dbb1d2..bf6c364 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -1321,7 +1321,9 @@ Decl *SemaObjC::ActOnPropertyImplDecl(
}
if (!CompleteTypeErr) {
const RecordType *RecordTy = PropertyIvarType->getAs<RecordType>();
- if (RecordTy && RecordTy->getDecl()->hasFlexibleArrayMember()) {
+ if (RecordTy && RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember()) {
Diag(PropertyIvarLoc, diag::err_synthesize_variable_sized_ivar)
<< PropertyIvarType;
CompleteTypeErr = true; // suppress later diagnostics about the ivar
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 2c5d97c..7d800c4 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -7358,7 +7358,9 @@ SemaOpenMP::checkOpenMPDeclareVariantFunction(SemaOpenMP::DeclGroupPtrTy DG,
Diag(SR.getBegin(), diag::err_omp_interop_type_not_found) << SR;
return std::nullopt;
}
- QualType InteropType = Context.getTypeDeclType(TD);
+ QualType InteropType =
+ Context.getTypeDeclType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD);
if (PTy->isVariadic()) {
Diag(FD->getLocation(), diag::err_omp_append_args_with_varargs) << SR;
return std::nullopt;
@@ -7378,7 +7380,7 @@ SemaOpenMP::checkOpenMPDeclareVariantFunction(SemaOpenMP::DeclGroupPtrTy DG,
auto *Method = dyn_cast<CXXMethodDecl>(FD);
if (Method && !Method->isStatic()) {
FnPtrType = Context.getMemberPointerType(
- AdjustedFnType, /*Qualifier=*/nullptr, Method->getParent());
+ AdjustedFnType, /*Qualifier=*/std::nullopt, Method->getParent());
ExprResult ER;
{
// Build addr_of unary op to correctly handle type checks for member
@@ -18629,10 +18631,11 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
if (const auto *TyRec = Ty->getAs<RecordType>()) {
// Complete the type if it can be completed.
// If the type is neither complete nor being defined, bail out now.
- if (SemaRef.isCompleteType(Loc, Ty) || TyRec->isBeingDefined() ||
- TyRec->getDecl()->getDefinition()) {
+ bool IsComplete = SemaRef.isCompleteType(Loc, Ty);
+ RecordDecl *RD = TyRec->getOriginalDecl()->getDefinition();
+ if (IsComplete || RD) {
Lookup.clear();
- SemaRef.LookupQualifiedName(Lookup, TyRec->getDecl());
+ SemaRef.LookupQualifiedName(Lookup, RD);
if (Lookup.empty()) {
Lookups.emplace_back();
Lookups.back().append(Lookup.begin(), Lookup.end());
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 76e189d..d593d1d 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -370,7 +370,7 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
// the underlying type is narrowing. This only arises for expressions of
// the form 'Enum{init}'.
if (auto *ET = ToType->getAs<EnumType>())
- ToType = ET->getDecl()->getIntegerType();
+ ToType = ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
switch (Second) {
// 'bool' is an integral type; dispatch to the right place to handle it.
@@ -1063,7 +1063,8 @@ static bool shouldAddReversedEqEq(Sema &S, SourceLocation OpLoc,
return true;
LookupResult Members(S, NotEqOp, OpLoc,
Sema::LookupNameKind::LookupMemberName);
- S.LookupQualifiedName(Members, RHSRec->getDecl());
+ S.LookupQualifiedName(Members,
+ RHSRec->getOriginalDecl()->getDefinitionOrSelf());
Members.suppressAccessDiagnostics();
for (NamedDecl *Op : Members)
if (FunctionsCorrespond(S.Context, EqFD, Op->getAsFunction()))
@@ -1268,7 +1269,7 @@ OverloadKind Sema::CheckOverload(Scope *S, FunctionDecl *New,
//
// Exception: if the scope is dependent and this is not a class
// member, the using declaration can only introduce an enumerator.
- if (UUD->getQualifier()->isDependent() && !UUD->isCXXClassMember()) {
+ if (UUD->getQualifier().isDependent() && !UUD->isCXXClassMember()) {
Match = *I;
return OverloadKind::NonFunction;
}
@@ -1471,9 +1472,8 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
if (OldMethod->isImplicitObjectMemberFunction() &&
OldMethod->getParent() != NewMethod->getParent()) {
- QualType ParentType =
- SemaRef.Context.getTypeDeclType(OldMethod->getParent())
- .getCanonicalType();
+ CanQualType ParentType =
+ SemaRef.Context.getCanonicalTagType(OldMethod->getParent());
if (ParentType.getTypePtr() != BS.Ty)
return false;
BS.Ty = DS.Ty;
@@ -2293,7 +2293,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
== UO_AddrOf &&
"Non-address-of operator on non-static member address");
FromType = S.Context.getMemberPointerType(
- FromType, /*Qualifier=*/nullptr, Method->getParent());
+ FromType, /*Qualifier=*/std::nullopt, Method->getParent());
} else if (isa<UnaryOperator>(From->IgnoreParens())) {
assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() ==
UO_AddrOf &&
@@ -2602,10 +2602,12 @@ IsTransparentUnionStandardConversion(Sema &S, Expr* From,
bool CStyle) {
const RecordType *UT = ToType->getAsUnionType();
- if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!UT)
return false;
// The field to initialize within the transparent union.
- RecordDecl *UD = UT->getDecl();
+ const RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!UD->hasAttr<TransparentUnionAttr>())
+ return false;
// It's compatible if the expression matches any of the fields.
for (const auto *it : UD->fields()) {
if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
@@ -2663,15 +2665,17 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
// C++0x 7.2p9: Note that this implicit enum to int conversion is not
// provided for a scoped enumeration.
- if (FromEnumType->getDecl()->isScoped())
+ const EnumDecl *FromED =
+ FromEnumType->getOriginalDecl()->getDefinitionOrSelf();
+ if (FromED->isScoped())
return false;
// We can perform an integral promotion to the underlying type of the enum,
// even if that's not the promoted type. Note that the check for promoting
// the underlying type is based on the type alone, and does not consider
// the bitfield-ness of the actual source expression.
- if (FromEnumType->getDecl()->isFixed()) {
- QualType Underlying = FromEnumType->getDecl()->getIntegerType();
+ if (FromED->isFixed()) {
+ QualType Underlying = FromED->getIntegerType();
return Context.hasSameUnqualifiedType(Underlying, ToType) ||
IsIntegralPromotion(nullptr, Underlying, ToType);
}
@@ -2679,8 +2683,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
isCompleteType(From->getBeginLoc(), FromType))
- return Context.hasSameUnqualifiedType(
- ToType, FromEnumType->getDecl()->getPromotionType());
+ return Context.hasSameUnqualifiedType(ToType, FromED->getPromotionType());
// C++ [conv.prom]p5:
// If the bit-field has an enumerated type, it is treated as any other
@@ -3347,12 +3350,12 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
ToMember->getMostRecentCXXRecordDecl())) {
PDiag << ft_different_class;
if (ToMember->isSugared())
- PDiag << Context.getTypeDeclType(
+ PDiag << Context.getCanonicalTagType(
ToMember->getMostRecentCXXRecordDecl());
else
PDiag << ToMember->getQualifier();
if (FromMember->isSugared())
- PDiag << Context.getTypeDeclType(
+ PDiag << Context.getCanonicalTagType(
FromMember->getMostRecentCXXRecordDecl());
else
PDiag << FromMember->getQualifier();
@@ -3636,12 +3639,12 @@ Sema::MemberPointerConversionResult Sema::CheckMemberPointerConversion(
CXXRecordDecl *FromClass = FromPtrType->getMostRecentCXXRecordDecl(),
*ToClass = ToPtrType->getMostRecentCXXRecordDecl();
- auto DiagCls = [](PartialDiagnostic &PD, NestedNameSpecifier *Qual,
- const CXXRecordDecl *Cls) {
- if (declaresSameEntity(Qual->getAsRecordDecl(), Cls))
+ auto DiagCls = [&](PartialDiagnostic &PD, NestedNameSpecifier Qual,
+ const CXXRecordDecl *Cls) {
+ if (declaresSameEntity(Qual.getAsRecordDecl(), Cls))
PD << Qual;
else
- PD << QualType(Cls->getTypeForDecl(), 0);
+ PD << Context.getCanonicalTagType(Cls);
};
auto DiagFromTo = [&](PartialDiagnostic &PD) -> PartialDiagnostic & {
DiagCls(PD, FromPtrType->getQualifier(), FromClass);
@@ -3658,8 +3661,7 @@ Sema::MemberPointerConversionResult Sema::CheckMemberPointerConversion(
if (!IsDerivedFrom(OpRange.getBegin(), Derived, Base, Paths))
return MemberPointerConversionResult::NotDerived;
- if (Paths.isAmbiguous(
- Base->getTypeForDecl()->getCanonicalTypeUnqualified())) {
+ if (Paths.isAmbiguous(Context.getCanonicalTagType(Base))) {
PartialDiagnostic PD = PDiag(diag::err_ambiguous_memptr_conv);
PD << int(Direction);
DiagFromTo(PD) << getAmbiguousPathsDisplayString(Paths) << OpRange;
@@ -3687,8 +3689,8 @@ Sema::MemberPointerConversionResult Sema::CheckMemberPointerConversion(
? diag::err_upcast_to_inaccessible_base
: diag::err_downcast_from_inaccessible_base,
[&](PartialDiagnostic &PD) {
- NestedNameSpecifier *BaseQual = FromPtrType->getQualifier(),
- *DerivedQual = ToPtrType->getQualifier();
+ NestedNameSpecifier BaseQual = FromPtrType->getQualifier(),
+ DerivedQual = ToPtrType->getQualifier();
if (Direction == MemberPointerConversionDirection::Upcast)
std::swap(BaseQual, DerivedQual);
DiagCls(PD, DerivedQual, Derived);
@@ -3983,8 +3985,9 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (!S.isCompleteType(From->getExprLoc(), ToType)) {
// We're not going to find any constructors.
- } else if (CXXRecordDecl *ToRecordDecl
- = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
+ } else if (auto *ToRecordDecl =
+ dyn_cast<CXXRecordDecl>(ToRecordType->getOriginalDecl())) {
+ ToRecordDecl = ToRecordDecl->getDefinitionOrSelf();
Expr **Args = &From;
unsigned NumArgs = 1;
@@ -4057,8 +4060,9 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// No conversion functions from incomplete types.
} else if (const RecordType *FromRecordType =
From->getType()->getAs<RecordType>()) {
- if (CXXRecordDecl *FromRecordDecl
- = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
+ if (auto *FromRecordDecl =
+ dyn_cast<CXXRecordDecl>(FromRecordType->getOriginalDecl())) {
+ FromRecordDecl = FromRecordDecl->getDefinitionOrSelf();
// Add all of the conversion functions as candidates.
const auto &Conversions = FromRecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
@@ -4509,7 +4513,8 @@ getFixedEnumPromtion(Sema &S, const StandardConversionSequence &SCS) {
if (!FromType->isEnumeralType())
return FixedEnumPromotion::None;
- EnumDecl *Enum = FromType->castAs<EnumType>()->getDecl();
+ EnumDecl *Enum =
+ FromType->castAs<EnumType>()->getOriginalDecl()->getDefinitionOrSelf();
if (!Enum->isFixed())
return FixedEnumPromotion::None;
@@ -5145,7 +5150,9 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
Expr *Init, QualType T2, bool AllowRvalues,
bool AllowExplicit) {
assert(T2->isRecordType() && "Can only find conversions of record types.");
- auto *T2RecordDecl = cast<CXXRecordDecl>(T2->castAs<RecordType>()->getDecl());
+ auto *T2RecordDecl =
+ cast<CXXRecordDecl>(T2->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
OverloadCandidateSet CandidateSet(
DeclLoc, OverloadCandidateSet::CSK_InitByUserDefinedConversion);
@@ -5934,7 +5941,7 @@ static ImplicitConversionSequence TryObjectArgumentInitialization(
assert(FromType->isRecordType());
- QualType ClassType = S.Context.getTypeDeclType(ActingContext);
+ CanQualType ClassType = S.Context.getCanonicalTagType(ActingContext);
// C++98 [class.dtor]p2:
// A destructor can be invoked for a const, volatile or const volatile
// object.
@@ -6055,7 +6062,7 @@ static ImplicitConversionSequence TryObjectArgumentInitialization(
/// the implicit object parameter for the given Method with the given
/// expression.
ExprResult Sema::PerformImplicitObjectArgumentInitialization(
- Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl,
+ Expr *From, NestedNameSpecifier Qualifier, NamedDecl *FoundDecl,
CXXMethodDecl *Method) {
QualType FromRecordType, DestType;
QualType ImplicitParamRecordType = Method->getFunctionObjectParameterType();
@@ -6840,8 +6847,9 @@ ExprResult Sema::PerformContextualImplicitConversion(
UnresolvedSet<4>
ViableConversions; // These are *potentially* viable in C++1y.
UnresolvedSet<4> ExplicitConversions;
- const auto &Conversions =
- cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+ const auto &Conversions = cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf()
+ ->getVisibleConversionFunctions();
bool HadMultipleCandidates =
(std::distance(Conversions.begin(), Conversions.end()) > 1);
@@ -7160,7 +7168,8 @@ void Sema::AddOverloadCandidate(
// C++ [class.copy]p3:
// A member function template is never instantiated to perform the copy
// of a class object to an object of its class type.
- QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
+ CanQualType ClassType =
+ Context.getCanonicalTagType(Constructor->getParent());
if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
IsDerivedFrom(Args[0]->getBeginLoc(), Args[0]->getType(),
@@ -7181,8 +7190,8 @@ void Sema::AddOverloadCandidate(
if (Shadow && Args.size() == 1 && Constructor->getNumParams() >= 1 &&
Constructor->getParamDecl(0)->getType()->isReferenceType()) {
QualType P = Constructor->getParamDecl(0)->getType()->getPointeeType();
- QualType C = Context.getRecordType(Constructor->getParent());
- QualType D = Context.getRecordType(Shadow->getParent());
+ CanQualType C = Context.getCanonicalTagType(Constructor->getParent());
+ CanQualType D = Context.getCanonicalTagType(Shadow->getParent());
SourceLocation Loc = Args.front()->getExprLoc();
if ((Context.hasSameUnqualifiedType(P, C) || IsDerivedFrom(Loc, P, C)) &&
(Context.hasSameUnqualifiedType(D, P) || IsDerivedFrom(Loc, D, P))) {
@@ -7403,7 +7412,7 @@ static bool convertArgsForAvailabilityChecks(
"Shouldn't have `this` for ctors!");
assert(!Method->isStatic() && "Shouldn't have `this` for static methods!");
ExprResult R = S.PerformImplicitObjectArgumentInitialization(
- ThisArg, /*Qualifier=*/nullptr, Method, Method);
+ ThisArg, /*Qualifier=*/std::nullopt, Method, Method);
if (R.isInvalid())
return false;
ConvertedThis = R.get();
@@ -8159,29 +8168,24 @@ bool Sema::CheckNonDependentConversions(
ArgType = ArgType->getPointeeType();
}
- if (auto *RT = ParamType->getAs<RecordType>())
- if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- RD && RD->hasDefinition()) {
- if (llvm::any_of(LookupConstructors(RD), [](NamedDecl *ND) {
- auto Info = getConstructorInfo(ND);
- if (!Info)
- return false;
- CXXConstructorDecl *Ctor = Info.Constructor;
- /// isConvertingConstructor takes copy/move constructors into
- /// account!
- return !Ctor->isCopyOrMoveConstructor() &&
- Ctor->isConvertingConstructor(
- /*AllowExplicit=*/true);
- }))
- return true;
- }
-
- if (auto *RT = ArgType->getAs<RecordType>())
- if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- RD && RD->hasDefinition() &&
- !RD->getVisibleConversionFunctions().empty()) {
- return true;
- }
+ if (auto *RD = ParamType->getAsCXXRecordDecl();
+ RD && RD->hasDefinition() &&
+ llvm::any_of(LookupConstructors(RD), [](NamedDecl *ND) {
+ auto Info = getConstructorInfo(ND);
+ if (!Info)
+ return false;
+ CXXConstructorDecl *Ctor = Info.Constructor;
+ /// isConvertingConstructor takes copy/move constructors into
+ /// account!
+ return !Ctor->isCopyOrMoveConstructor() &&
+ Ctor->isConvertingConstructor(
+ /*AllowExplicit=*/true);
+ }))
+ return true;
+ if (auto *RD = ArgType->getAsCXXRecordDecl();
+ RD && RD->hasDefinition() &&
+ !RD->getVisibleConversionFunctions().empty())
+ return true;
return false;
};
@@ -8336,7 +8340,8 @@ void Sema::AddConversionCandidate(
if (const auto *FromPtrType = ObjectType->getAs<PointerType>())
ObjectType = FromPtrType->getPointeeType();
const auto *ConversionContext =
- cast<CXXRecordDecl>(ObjectType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(ObjectType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// C++23 [over.best.ics.general]
// However, if the target is [...]
@@ -8736,15 +8741,16 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
// qualified lookup of T1::operator@ (13.3.1.1.1); otherwise,
// the set of member candidates is empty.
if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
+ bool IsComplete = isCompleteType(OpLoc, T1);
+ CXXRecordDecl *T1RD =
+ cast<CXXRecordDecl>(T1Rec->getOriginalDecl())->getDefinition();
// Complete the type if it can be completed.
- if (!isCompleteType(OpLoc, T1) && !T1Rec->isBeingDefined())
- return;
// If the type is neither complete nor being defined, bail out now.
- if (!T1Rec->getDecl()->getDefinition())
+ if (!T1RD || (!IsComplete && !T1RD->isBeingDefined()))
return;
LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
- LookupQualifiedName(Operators, T1Rec->getDecl());
+ LookupQualifiedName(Operators, T1RD);
Operators.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = Operators.begin(),
@@ -9009,8 +9015,8 @@ BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
if ((CVR | BaseCVR) != CVR) continue;
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
- MemberPointerTypes.insert(
- Context.getMemberPointerType(QPointeeTy, /*Qualifier=*/nullptr, Cls));
+ MemberPointerTypes.insert(Context.getMemberPointerType(
+ QPointeeTy, /*Qualifier=*/std::nullopt, Cls));
}
return true;
@@ -9087,7 +9093,8 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
if (!SemaRef.isCompleteType(Loc, Ty))
return;
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(TyRec->getOriginalDecl())->getDefinitionOrSelf();
for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) {
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
@@ -10199,7 +10206,7 @@ public:
if (S.getLangOpts().CPlusPlus11) {
for (QualType EnumTy : CandidateTypes[ArgIdx].enumeration_types()) {
- if (!EnumTy->castAs<EnumType>()->getDecl()->isScoped())
+ if (!EnumTy->castAs<EnumType>()->getOriginalDecl()->isScoped())
continue;
if (!AddedTypes.insert(S.Context.getCanonicalType(EnumTy)).second)
@@ -10955,9 +10962,9 @@ bool clang::isBetterOverloadCandidate(
isa<CXXConversionDecl>(Cand1.Function) ? TPOC_Conversion
: TPOC_Call,
Cand1.ExplicitCallArguments,
- Obj1Context ? QualType(Obj1Context->getTypeForDecl(), 0)
+ Obj1Context ? S.Context.getCanonicalTagType(Obj1Context)
: QualType{},
- Obj2Context ? QualType(Obj2Context->getTypeForDecl(), 0)
+ Obj2Context ? S.Context.getCanonicalTagType(Obj2Context)
: QualType{},
Cand1.isReversed() ^ Cand2.isReversed(), PartialOverloading)) {
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
@@ -14863,8 +14870,8 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
if (Method->isExplicitObjectMemberFunction())
Exp = InitializeExplicitObjectArgument(*this, E, Method);
else
- Exp = PerformImplicitObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
- FoundDecl, Method);
+ Exp = PerformImplicitObjectArgumentInitialization(
+ E, /*Qualifier=*/std::nullopt, FoundDecl, Method);
if (Exp.isInvalid())
return true;
@@ -15025,7 +15032,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
InputInit = InitializeExplicitObjectArgument(*this, Input, Method);
else
InputInit = PerformImplicitObjectArgumentInitialization(
- Input, /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Input, /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (InputInit.isInvalid())
return ExprError();
Base = Input = InputInit.get();
@@ -15408,7 +15415,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
ParamIdx = 1;
} else {
Arg0 = PerformImplicitObjectArgumentInitialization(
- Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Args[0], /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
}
Arg1 = PerformCopyInitialization(
InitializedEntity::InitializeParameter(
@@ -15878,7 +15885,7 @@ ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ArgExpr = Args;
} else {
ExprResult Arg0 = PerformImplicitObjectArgumentInitialization(
- Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Args[0], /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (Arg0.isInvalid())
return ExprError();
@@ -16074,7 +16081,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
CXXMethodDecl *Method = nullptr;
bool HadMultipleCandidates = false;
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_public);
- NestedNameSpecifier *Qualifier = nullptr;
+ NestedNameSpecifier Qualifier = std::nullopt;
if (isa<MemberExpr>(NakedMemExpr)) {
MemExpr = cast<MemberExpr>(NakedMemExpr);
Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl());
@@ -16346,7 +16353,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
const auto *Record = Object.get()->getType()->castAs<RecordType>();
LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
- LookupQualifiedName(R, Record->getDecl());
+ LookupQualifiedName(R, Record->getOriginalDecl()->getDefinitionOrSelf());
R.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
@@ -16390,8 +16397,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// functions for each conversion function declared in an
// accessible base class provided the function is not hidden
// within T by another intervening declaration.
- const auto &Conversions =
- cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
+ const auto &Conversions = cast<CXXRecordDecl>(Record->getOriginalDecl())
+ ->getDefinitionOrSelf()
+ ->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end();
!IgnoreSurrogateFunctions && I != E; ++I) {
NamedDecl *D = *I;
@@ -16541,7 +16549,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
IsError |= PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
} else {
ExprResult ObjRes = PerformImplicitObjectArgumentInitialization(
- Object.get(), /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Object.get(), /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (ObjRes.isInvalid())
IsError = true;
else
@@ -16612,7 +16620,10 @@ ExprResult Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base,
return ExprError();
LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
- LookupQualifiedName(R, Base->getType()->castAs<RecordType>()->getDecl());
+ LookupQualifiedName(R, Base->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf());
R.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
@@ -16686,7 +16697,7 @@ ExprResult Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base,
Base = R.get();
} else {
ExprResult BaseResult = PerformImplicitObjectArgumentInitialization(
- Base, /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ Base, /*Qualifier=*/std::nullopt, Best->FoundDecl, Method);
if (BaseResult.isInvalid())
return ExprError();
Base = BaseResult.get();
@@ -16940,7 +16951,7 @@ ExprResult Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
assert(isa<DeclRefExpr>(SubExpr.get()) &&
"fixed to something other than a decl ref");
- NestedNameSpecifier *Qualifier =
+ NestedNameSpecifier Qualifier =
cast<DeclRefExpr>(SubExpr.get())->getQualifier();
assert(Qualifier &&
"fixed to a member ref with no nested name qualifier");
diff --git a/clang/lib/Sema/SemaPPC.cpp b/clang/lib/Sema/SemaPPC.cpp
index d5c83ae..7c82e54 100644
--- a/clang/lib/Sema/SemaPPC.cpp
+++ b/clang/lib/Sema/SemaPPC.cpp
@@ -41,8 +41,10 @@ void SemaPPC::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
return;
QualType ArgType = Arg->getType();
- for (const FieldDecl *FD :
- ArgType->castAs<RecordType>()->getDecl()->fields()) {
+ for (const FieldDecl *FD : ArgType->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->fields()) {
if (const auto *AA = FD->getAttr<AlignedAttr>()) {
CharUnits Alignment = getASTContext().toCharUnitsFromBits(
AA->getAlignment(getASTContext()));
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
index 4683c81..dd25336 100644
--- a/clang/lib/Sema/SemaSYCL.cpp
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -221,7 +221,7 @@ static SourceLocation SourceLocationForUserDeclaredType(QualType QT) {
SourceLocation Loc;
const Type *T = QT->getUnqualifiedDesugaredType();
if (const TagType *TT = dyn_cast<TagType>(T))
- Loc = TT->getDecl()->getLocation();
+ Loc = TT->getOriginalDecl()->getLocation();
else if (const ObjCInterfaceType *ObjCIT = dyn_cast<ObjCInterfaceType>(T))
Loc = ObjCIT->getDecl()->getLocation();
return Loc;
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index a5f9202..efc0b35 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -798,7 +798,7 @@ bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) {
const auto *MPT =
CalleeBinOp->getRHS()->getType()->castAs<MemberPointerType>();
CalleeType.This =
- Context.getTypeDeclType(MPT->getMostRecentCXXRecordDecl());
+ Context.getCanonicalTagType(MPT->getMostRecentCXXRecordDecl());
CalleeType.Func = MPT->getPointeeType()->castAs<FunctionProtoType>();
CalleeType.MemberType = FuncType::ft_pointer_to_member;
} else if (isa<CXXPseudoDestructorExpr>(CalleeExpr)) {
@@ -1254,7 +1254,7 @@ static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
QualType VarType = VD->getType();
- QualType EnumType = S.Context.getTypeDeclType(ED);
+ CanQualType EnumType = S.Context.getCanonicalTagType(ED);
if (VD->hasGlobalStorage() && VarType.isConstQualified() &&
S.Context.hasSameUnqualifiedType(EnumType, VarType))
return false;
@@ -1284,11 +1284,11 @@ static void checkEnumTypesInSwitchStmt(Sema &S, const Expr *Cond,
return;
// Ignore anonymous enums.
- if (!CondEnumType->getDecl()->getIdentifier() &&
- !CondEnumType->getDecl()->getTypedefNameForAnonDecl())
+ if (!CondEnumType->getOriginalDecl()->getIdentifier() &&
+ !CondEnumType->getOriginalDecl()->getTypedefNameForAnonDecl())
return;
- if (!CaseEnumType->getDecl()->getIdentifier() &&
- !CaseEnumType->getDecl()->getTypedefNameForAnonDecl())
+ if (!CaseEnumType->getOriginalDecl()->getIdentifier() &&
+ !CaseEnumType->getOriginalDecl()->getTypedefNameForAnonDecl())
return;
if (S.Context.hasSameUnqualifiedType(CondType, CaseType))
@@ -1601,9 +1601,11 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// If switch has default case, then ignore it.
if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond &&
- ET && ET->getDecl()->isCompleteDefinition() &&
- !ET->getDecl()->enumerators().empty()) {
- const EnumDecl *ED = ET->getDecl();
+ ET) {
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
+ if (!ED->isCompleteDefinition() || ED->enumerators().empty())
+ goto enum_out;
+
EnumValsTy EnumVals;
// Gather all enum values, set their type and sort them,
@@ -1716,6 +1718,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (!hasCasesNotInSwitch)
SS->setAllEnumCasesCovered();
}
+ enum_out:;
}
if (BodyStmt)
@@ -1745,7 +1748,7 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
if (SrcExpr->isTypeDependent() || SrcExpr->isValueDependent())
return;
- const EnumDecl *ED = ET->getDecl();
+ const EnumDecl *ED = ET->getOriginalDecl()->getDefinitionOrSelf();
if (!ED->isClosed())
return;
@@ -3710,7 +3713,7 @@ private:
Sema &S;
};
bool LocalTypedefNameReferencer::VisitRecordType(RecordType *RT) {
- auto *R = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ auto *R = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() ||
R->isDependentType())
return true;
@@ -3929,7 +3932,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
<< RetValExp->getSourceRange();
if (FD->hasAttr<CmseNSEntryAttr>() && RetValExp) {
if (const auto *RT = dyn_cast<RecordType>(FnRetType.getCanonicalType())) {
- if (RT->getDecl()->isOrContainsUnion())
+ if (RT->getOriginalDecl()->isOrContainsUnion())
Diag(RetValExp->getBeginLoc(), diag::warn_cmse_nonsecure_union) << 1;
}
}
@@ -4620,7 +4623,8 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
// Build the context parameter
DeclContext *DC = CapturedDecl::castToDeclContext(CD);
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ CanQualType ParamType =
+ Context.getPointerType(Context.getCanonicalTagType(RD));
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamKind::CapturedContext);
@@ -4662,9 +4666,10 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
assert(!ContextIsFound &&
"null type has been found already for '__context' parameter");
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD))
- .withConst()
- .withRestrict();
+ QualType ParamType =
+ Context.getPointerType(Context.getCanonicalTagType(RD))
+ .withConst()
+ .withRestrict();
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamKind::CapturedContext);
@@ -4684,7 +4689,8 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
if (!ContextIsFound) {
// Add __context implicitly if it is not specified.
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ CanQualType ParamType =
+ Context.getPointerType(Context.getCanonicalTagType(RD));
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamKind::CapturedContext);
diff --git a/clang/lib/Sema/SemaStmtAsm.cpp b/clang/lib/Sema/SemaStmtAsm.cpp
index 4507a21..cd8b98c 100644
--- a/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/clang/lib/Sema/SemaStmtAsm.cpp
@@ -894,7 +894,7 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
QT = PT->getPointeeType();
RT = QT->getAs<RecordType>();
} else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
- RT = TD->getTypeForDecl()->getAs<RecordType>();
+ RT = Context.getTypeDeclType(TD)->getAs<RecordType>();
else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl))
RT = TD->getType()->getAs<RecordType>();
if (!RT)
@@ -907,7 +907,8 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
LookupResult FieldResult(*this, &Context.Idents.get(NextMember),
SourceLocation(), LookupMemberName);
- if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!LookupQualifiedName(FieldResult, RD))
return true;
if (!FieldResult.isSingleResult())
@@ -919,7 +920,7 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
if (!FD)
return true;
- const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
+ const ASTRecordLayout &RL = Context.getASTRecordLayout(RD);
unsigned i = FD->getFieldIndex();
CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
Offset += (unsigned)Result.getQuantity();
@@ -951,7 +952,8 @@ Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member,
LookupResult FieldResult(*this, &Context.Idents.get(Member), AsmLoc,
LookupMemberName);
- if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ if (!LookupQualifiedName(FieldResult,
+ RT->getOriginalDecl()->getDefinitionOrSelf()))
return ExprResult();
// Only normal and indirect field results will work.
diff --git a/clang/lib/Sema/SemaSwift.cpp b/clang/lib/Sema/SemaSwift.cpp
index 4000bef..a99222c 100644
--- a/clang/lib/Sema/SemaSwift.cpp
+++ b/clang/lib/Sema/SemaSwift.cpp
@@ -130,7 +130,7 @@ static bool isErrorParameter(Sema &S, QualType QT) {
// Check for CFError**.
if (const auto *PT = Pointee->getAs<PointerType>())
if (const auto *RT = PT->getPointeeType()->getAs<RecordType>())
- if (S.ObjC().isCFError(RT->getDecl()))
+ if (S.ObjC().isCFError(RT->getOriginalDecl()->getDefinitionOrSelf()))
return true;
return false;
@@ -272,7 +272,8 @@ static void checkSwiftAsyncErrorBlock(Sema &S, Decl *D,
// Check for CFError *.
if (const auto *PtrTy = Param->getAs<PointerType>()) {
if (const auto *RT = PtrTy->getPointeeType()->getAs<RecordType>()) {
- if (S.ObjC().isCFError(RT->getDecl())) {
+ if (S.ObjC().isCFError(
+ RT->getOriginalDecl()->getDefinitionOrSelf())) {
AnyErrorParams = true;
break;
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index b6b8932..72d98ca 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -293,7 +293,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
if (!SS.isInvalid()) {
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
Template = Context.getQualifiedTemplateName(Qualifier, hasTemplateKeyword,
Template);
}
@@ -367,9 +367,8 @@ bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
// The code is missing a 'template' keyword prior to the dependent template
// name.
- NestedNameSpecifier *Qualifier = (NestedNameSpecifier *)SS->getScopeRep();
SuggestedTemplate = TemplateTy::make(Context.getDependentTemplateName(
- {Qualifier, &II, /*HasTemplateKeyword=*/false}));
+ {SS->getScopeRep(), &II, /*HasTemplateKeyword=*/false}));
Diag(IILoc, diag::err_template_kw_missing)
<< SuggestedTemplate.get()
<< FixItHint::CreateInsertion(IILoc, "template ");
@@ -401,7 +400,9 @@ bool Sema::LookupTemplateName(LookupResult &Found, Scope *S, CXXScopeSpec &SS,
IsDependent = !LookupCtx && ObjectType->isDependentType();
assert((IsDependent || !ObjectType->isIncompleteType() ||
!ObjectType->getAs<TagType>() ||
- ObjectType->castAs<TagType>()->isBeingDefined()) &&
+ ObjectType->castAs<TagType>()
+ ->getOriginalDecl()
+ ->isEntityBeingDefined()) &&
"Caller should have completed object type");
// Template names cannot appear inside an Objective-C class or object type
@@ -801,9 +802,9 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
if (!Complain || (PatternDef && PatternDef->isInvalidDecl()))
return true;
- QualType InstantiationTy;
+ CanQualType InstantiationTy;
if (TagDecl *TD = dyn_cast<TagDecl>(Instantiation))
- InstantiationTy = Context.getTypeDeclType(TD);
+ InstantiationTy = Context.getCanonicalTagType(TD);
if (PatternDef) {
Diag(PointOfInstantiation,
diag::err_template_instantiate_within_definition)
@@ -911,7 +912,7 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
TypeSourceInfo *DI;
QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI);
if (!DI)
- DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation());
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getNameLoc());
return TemplateArgumentLoc(TemplateArgument(T), DI);
}
@@ -928,9 +929,9 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
else
TArg = Template;
return TemplateArgumentLoc(
- SemaRef.Context, TArg,
+ SemaRef.Context, TArg, Arg.getTemplateKwLoc(),
Arg.getScopeSpec().getWithLocInContext(SemaRef.Context),
- Arg.getLocation(), Arg.getEllipsisLoc());
+ Arg.getNameLoc(), Arg.getEllipsisLoc());
}
}
@@ -971,15 +972,12 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TL = PET.getPatternLoc();
}
- CXXScopeSpec SS;
- if (auto ET = TL.getAs<ElaboratedTypeLoc>()) {
- SS.Adopt(ET.getQualifierLoc());
- TL = ET.getNamedTypeLoc();
- }
-
if (auto DTST = TL.getAs<DeducedTemplateSpecializationTypeLoc>()) {
TemplateName Name = DTST.getTypePtr()->getTemplateName();
- ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
+ CXXScopeSpec SS;
+ SS.Adopt(DTST.getQualifierLoc());
+ ParsedTemplateArgument Result(/*TemplateKwLoc=*/SourceLocation(), SS,
+ TemplateTy::make(Name),
DTST.getTemplateNameLoc());
if (EllipsisLoc.isValid())
Result = Result.getTemplatePackExpansion(EllipsisLoc);
@@ -2145,11 +2143,11 @@ DeclResult Sema::CheckClassTemplate(
bool ShouldAddRedecl =
!(TUK == TagUseKind::Friend && CurContext->isDependentContext());
- CXXRecordDecl *NewClass =
- CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
- PrevClassTemplate && ShouldAddRedecl ?
- PrevClassTemplate->getTemplatedDecl() : nullptr,
- /*DelayTypeCreation=*/true);
+ CXXRecordDecl *NewClass = CXXRecordDecl::Create(
+ Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
+ PrevClassTemplate && ShouldAddRedecl
+ ? PrevClassTemplate->getTemplatedDecl()
+ : nullptr);
SetNestedNameSpecifier(*this, NewClass, SS);
if (NumOuterTemplateParamLists > 0)
NewClass->setTemplateParameterListsInfo(
@@ -2178,12 +2176,6 @@ DeclResult Sema::CheckClassTemplate(
if (ModulePrivateLoc.isValid())
NewTemplate->setModulePrivate();
- // Build the type for the class template declaration now.
- QualType T = NewTemplate->getInjectedClassNameSpecialization();
- T = Context.getInjectedClassNameType(NewClass, T);
- assert(T->isDependentType() && "Class template type is not dependent?");
- (void)T;
-
// If we are providing an explicit specialization of a member that is a
// class template, make a note of that.
if (PrevClassTemplate &&
@@ -2674,11 +2666,11 @@ struct DependencyChecker : DynamicRecursiveASTVisitor {
return DynamicRecursiveASTVisitor::TraverseStmt(S);
}
- bool TraverseTypeLoc(TypeLoc TL) override {
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) override {
if (IgnoreNonTypeDependent && !TL.isNull() &&
!TL.getType()->isDependentType())
return true;
- return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL);
+ return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL, TraverseQualifier);
}
bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) override {
@@ -2727,8 +2719,12 @@ struct DependencyChecker : DynamicRecursiveASTVisitor {
return TraverseTemplateArgument(T->getArgumentPack());
}
- bool TraverseInjectedClassNameType(InjectedClassNameType *T) override {
- return TraverseType(T->getInjectedSpecializationType());
+ bool TraverseInjectedClassNameType(InjectedClassNameType *T,
+ bool TraverseQualifier) override {
+ // An InjectedClassNameType will never have a dependent template name,
+ // so no need to traverse it.
+ return TraverseTemplateArguments(
+ T->getTemplateArgs(T->getOriginalDecl()->getASTContext()));
}
};
} // end anonymous namespace
@@ -2751,14 +2747,14 @@ static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
QualType T,
const CXXScopeSpec &SS) {
NestedNameSpecifierLoc NNSLoc(SS.getScopeRep(), SS.location_data());
- while (NestedNameSpecifier *NNS = NNSLoc.getNestedNameSpecifier()) {
- if (const Type *CurType = NNS->getAsType()) {
- if (Context.hasSameUnqualifiedType(T, QualType(CurType, 0)))
- return NNSLoc.getTypeLoc().getSourceRange();
- } else
+ for (;;) {
+ NestedNameSpecifier NNS = NNSLoc.getNestedNameSpecifier();
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
break;
-
- NNSLoc = NNSLoc.getPrefix();
+ if (Context.hasSameUnqualifiedType(T, QualType(NNS.getAsType(), 0)))
+ return NNSLoc.castAsTypeLoc().getSourceRange();
+ // FIXME: This will always be empty.
+ NNSLoc = NNSLoc.getAsNamespaceAndPrefix().Prefix;
}
return SourceRange();
@@ -2777,12 +2773,13 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// by the nested-name-specifier and walking out until we run out of types.
SmallVector<QualType, 4> NestedTypes;
QualType T;
- if (SS.getScopeRep()) {
- if (CXXRecordDecl *Record
- = dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
- T = Context.getTypeDeclType(Record);
+ if (NestedNameSpecifier Qualifier = SS.getScopeRep();
+ Qualifier.getKind() == NestedNameSpecifier::Kind::Type) {
+ if (CXXRecordDecl *Record =
+ dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
+ T = Context.getCanonicalTagType(Record);
else
- T = QualType(SS.getScopeRep()->getAsType(), 0);
+ T = QualType(Qualifier.getAsType(), 0);
}
// If we found an explicit specialization that prevents us from needing
@@ -2830,9 +2827,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Look one step prior in a dependent template specialization type.
if (const DependentTemplateSpecializationType *DependentTST
= T->getAs<DependentTemplateSpecializationType>()) {
- if (NestedNameSpecifier *NNS =
- DependentTST->getDependentTemplateName().getQualifier())
- T = QualType(NNS->getAsType(), 0);
+ if (NestedNameSpecifier NNS =
+ DependentTST->getDependentTemplateName().getQualifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ T = QualType(NNS.getAsType(), 0);
else
T = QualType();
continue;
@@ -2840,8 +2838,9 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Look one step prior in a dependent name type.
if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){
- if (NestedNameSpecifier *NNS = DependentName->getQualifier())
- T = QualType(NNS->getAsType(), 0);
+ if (NestedNameSpecifier NNS = DependentName->getQualifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type)
+ T = QualType(NNS.getAsType(), 0);
else
T = QualType();
continue;
@@ -2851,7 +2850,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
if (const EnumType *EnumT = T->getAs<EnumType>()) {
// FIXME: Forward-declared enums require a TSK_ExplicitSpecialization
// check here.
- EnumDecl *Enum = EnumT->getDecl();
+ EnumDecl *Enum = EnumT->getOriginalDecl();
// Get to the parent type.
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent()))
@@ -3151,7 +3150,8 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
}
}
-static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
+static QualType builtinCommonTypeImpl(Sema &S, ElaboratedTypeKeyword Keyword,
+ TemplateName BaseTemplate,
SourceLocation TemplateLoc,
ArrayRef<TemplateArgument> Ts) {
auto lookUpCommonType = [&](TemplateArgument T1,
@@ -3159,7 +3159,8 @@ static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
// Don't bother looking for other specializations if both types are
// builtins - users aren't allowed to specialize for them
if (T1.getAsType()->isBuiltinType() && T2.getAsType()->isBuiltinType())
- return builtinCommonTypeImpl(S, BaseTemplate, TemplateLoc, {T1, T2});
+ return builtinCommonTypeImpl(S, Keyword, BaseTemplate, TemplateLoc,
+ {T1, T2});
TemplateArgumentListInfo Args;
Args.addArgument(TemplateArgumentLoc(
@@ -3173,7 +3174,7 @@ static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
QualType BaseTemplateInst =
- S.CheckTemplateIdType(BaseTemplate, TemplateLoc, Args);
+ S.CheckTemplateIdType(Keyword, BaseTemplate, TemplateLoc, Args);
if (SFINAE.hasErrorOccurred())
return QualType();
@@ -3286,7 +3287,7 @@ static QualType builtinCommonTypeImpl(Sema &S, TemplateName BaseTemplate,
}
static bool isInVkNamespace(const RecordType *RT) {
- DeclContext *DC = RT->getDecl()->getDeclContext();
+ DeclContext *DC = RT->getOriginalDecl()->getDeclContext();
if (!DC)
return false;
@@ -3303,8 +3304,9 @@ static SpirvOperand checkHLSLSpirvTypeOperand(Sema &SemaRef,
if (auto *RT = OperandArg->getAs<RecordType>()) {
bool Literal = false;
SourceLocation LiteralLoc;
- if (isInVkNamespace(RT) && RT->getDecl()->getName() == "Literal") {
- auto SpecDecl = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (isInVkNamespace(RT) && RT->getOriginalDecl()->getName() == "Literal") {
+ auto SpecDecl =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
assert(SpecDecl);
const TemplateArgumentList &LiteralArgs = SpecDecl->getTemplateArgs();
@@ -3315,8 +3317,9 @@ static SpirvOperand checkHLSLSpirvTypeOperand(Sema &SemaRef,
}
if (RT && isInVkNamespace(RT) &&
- RT->getDecl()->getName() == "integral_constant") {
- auto SpecDecl = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ RT->getOriginalDecl()->getName() == "integral_constant") {
+ auto SpecDecl =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getOriginalDecl());
assert(SpecDecl);
const TemplateArgumentList &ConstantArgs = SpecDecl->getTemplateArgs();
@@ -3338,11 +3341,10 @@ static SpirvOperand checkHLSLSpirvTypeOperand(Sema &SemaRef,
return SpirvOperand::createType(OperandArg);
}
-static QualType
-checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
- ArrayRef<TemplateArgument> Converted,
- SourceLocation TemplateLoc,
- TemplateArgumentListInfo &TemplateArgs) {
+static QualType checkBuiltinTemplateIdType(
+ Sema &SemaRef, ElaboratedTypeKeyword Keyword, BuiltinTemplateDecl *BTD,
+ ArrayRef<TemplateArgument> Converted, SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
ASTContext &Context = SemaRef.getASTContext();
switch (BTD->getBuiltinTemplateKind()) {
@@ -3389,7 +3391,7 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
// The first template argument will be reused as the template decl that
// our synthetic template arguments will be applied to.
- return SemaRef.CheckTemplateIdType(Converted[0].getAsTemplate(),
+ return SemaRef.CheckTemplateIdType(Keyword, Converted[0].getAsTemplate(),
TemplateLoc, SyntheticTemplateArgs);
}
@@ -3426,14 +3428,16 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
TemplateName BaseTemplate = Converted[0].getAsTemplate();
ArrayRef<TemplateArgument> Ts = Converted[3].getPackAsArray();
- if (auto CT = builtinCommonTypeImpl(SemaRef, BaseTemplate, TemplateLoc, Ts);
+ if (auto CT = builtinCommonTypeImpl(SemaRef, Keyword, BaseTemplate,
+ TemplateLoc, Ts);
!CT.isNull()) {
TemplateArgumentListInfo TAs;
TAs.addArgument(TemplateArgumentLoc(
TemplateArgument(CT), SemaRef.Context.getTrivialTypeSourceInfo(
CT, TemplateArgs[1].getLocation())));
TemplateName HasTypeMember = Converted[1].getAsTemplate();
- return SemaRef.CheckTemplateIdType(HasTypeMember, TemplateLoc, TAs);
+ return SemaRef.CheckTemplateIdType(Keyword, HasTypeMember, TemplateLoc,
+ TAs);
}
QualType HasNoTypeMember = Converted[2].getAsType();
return HasNoTypeMember;
@@ -3545,7 +3549,7 @@ public:
if (DR && DR->getQualifier()) {
// If this is a qualified name, expand the template arguments in nested
// qualifiers.
- DR->getQualifier()->print(OS, Policy, true);
+ DR->getQualifier().print(OS, Policy, true);
// Then print the decl itself.
const ValueDecl *VD = DR->getDecl();
OS << VD->getName();
@@ -3610,7 +3614,8 @@ Sema::findFailedBooleanCondition(Expr *Cond) {
return { FailedCond, Description };
}
-QualType Sema::CheckTemplateIdType(TemplateName Name,
+QualType Sema::CheckTemplateIdType(ElaboratedTypeKeyword Keyword,
+ TemplateName Name,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
// FIXME: 'getUnderlying' loses SubstTemplateTemplateParm nodes from alias
@@ -3661,6 +3666,18 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
} else if (TypeAliasTemplateDecl *AliasTemplate =
dyn_cast<TypeAliasTemplateDecl>(Template)) {
+ // C++0x [dcl.type.elab]p2:
+ // If the identifier resolves to a typedef-name or the simple-template-id
+ // resolves to an alias template specialization, the
+ // elaborated-type-specifier is ill-formed.
+ if (Keyword != ElaboratedTypeKeyword::None &&
+ Keyword != ElaboratedTypeKeyword::Typename) {
+ SemaRef.Diag(TemplateLoc, diag::err_tag_reference_non_tag)
+ << AliasTemplate << NonTagKind::TypeAliasTemplate
+ << KeywordHelpers::getTagTypeKindForKeyword(Keyword);
+ SemaRef.Diag(AliasTemplate->getLocation(), diag::note_declared_at);
+ }
+
// Find the canonical type for this type alias template specialization.
TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
if (Pattern->isInvalidDecl())
@@ -3728,8 +3745,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
return QualType();
}
} else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
- CanonType = checkBuiltinTemplateIdType(*this, BTD, CTAI.SugaredConverted,
- TemplateLoc, TemplateArgs);
+ CanonType = checkBuiltinTemplateIdType(
+ *this, Keyword, BTD, CTAI.SugaredConverted, TemplateLoc, TemplateArgs);
} else if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs, CTAI.CanonicalConverted)) {
@@ -3768,16 +3785,15 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Fetch the injected class name type and check whether its
// injected type is equal to the type we just built.
- QualType ICNT = Context.getTypeDeclType(Record);
- QualType Injected = cast<InjectedClassNameType>(ICNT)
- ->getInjectedSpecializationType();
+ CanQualType ICNT = Context.getCanonicalTagType(Record);
+ CanQualType Injected =
+ Record->getCanonicalTemplateSpecializationType(Context);
- if (CanonType != Injected->getCanonicalTypeInternal())
+ if (CanonType != Injected)
continue;
// If so, the canonical type of this TST is the injected
// class name type of the record we just found.
- assert(ICNT.isCanonical());
CanonType = ICNT;
break;
}
@@ -3819,7 +3835,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Diagnose uses of this specialization.
(void)DiagnoseUseOfDecl(Decl, TemplateLoc);
- CanonType = Context.getTypeDeclType(Decl);
+ CanonType = Context.getCanonicalTagType(Decl);
assert(isa<RecordType>(CanonType) &&
"type of non-dependent specialization is not a RecordType");
} else {
@@ -3830,7 +3846,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// specialization, which refers back to the class template
// specialization we created or found.
return Context.getTemplateSpecializationType(
- Name, TemplateArgs.arguments(), CTAI.CanonicalConverted, CanonType);
+ Keyword, Name, TemplateArgs.arguments(), CTAI.CanonicalConverted,
+ CanonType);
}
void Sema::ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &ParsedName,
@@ -3878,7 +3895,7 @@ bool Sema::resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest)
<< ATN->getDeclName());
Name = Context.getQualifiedTemplateName(
- /*NNS=*/nullptr, /*TemplateKeyword=*/false,
+ /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
TemplateName(Corrected.getCorrectionDeclAs<TemplateDecl>()));
return false;
}
@@ -3889,11 +3906,12 @@ bool Sema::resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
}
TypeResult Sema::ActOnTemplateIdType(
- Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy TemplateD, const IdentifierInfo *TemplateII,
- SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
- ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc,
- bool IsCtorOrDtorName, bool IsClassName,
+ Scope *S, ElaboratedTypeKeyword ElaboratedKeyword,
+ SourceLocation ElaboratedKeywordLoc, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc, TemplateTy TemplateD,
+ const IdentifierInfo *TemplateII, SourceLocation TemplateIILoc,
+ SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc, bool IsCtorOrDtorName, bool IsClassName,
ImplicitTypenameContext AllowImplicitTypename) {
if (SS.isInvalid())
return true;
@@ -3909,8 +3927,9 @@ TypeResult Sema::ActOnTemplateIdType(
// elaborated-type-specifier (7.1.5.3).
if (!LookupCtx && isDependentScopeSpecifier(SS)) {
// C++2a relaxes some of those restrictions in [temp.res]p5.
- NestedNameSpecifier *NNS =
- NestedNameSpecifier::Create(Context, SS.getScopeRep(), TemplateII);
+ QualType DNT = Context.getDependentNameType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), TemplateII);
+ NestedNameSpecifier NNS(DNT.getTypePtr());
if (AllowImplicitTypename == ImplicitTypenameContext::Yes) {
auto DB = DiagCompat(SS.getBeginLoc(), diag_compat::implicit_typename)
<< NNS;
@@ -3954,12 +3973,12 @@ TypeResult Sema::ActOnTemplateIdType(
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
assert(SS.getScopeRep() == DTN->getQualifier());
QualType T = Context.getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword::None, *DTN, TemplateArgs.arguments());
+ ElaboratedKeyword, *DTN, TemplateArgs.arguments());
// Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(T);
- SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setElaboratedKeywordLoc(ElaboratedKeywordLoc);
SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
@@ -3970,30 +3989,17 @@ TypeResult Sema::ActOnTemplateIdType(
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
- QualType SpecTy = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
+ QualType SpecTy = CheckTemplateIdType(ElaboratedKeyword, Template,
+ TemplateIILoc, TemplateArgs);
if (SpecTy.isNull())
return true;
// Build type-source information.
TypeLocBuilder TLB;
- TemplateSpecializationTypeLoc SpecTL =
- TLB.push<TemplateSpecializationTypeLoc>(SpecTy);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateIILoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
- SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
-
- // Create an elaborated-type-specifier containing the nested-name-specifier.
- QualType ElTy =
- getElaboratedType(ElaboratedTypeKeyword::None,
- !IsCtorOrDtorName ? SS : CXXScopeSpec(), SpecTy);
- ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(ElTy);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- if (!ElabTL.isEmpty())
- ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
- return CreateParsedType(ElTy, TLB.getTypeSourceInfo(Context, ElTy));
+ TLB.push<TemplateSpecializationTypeLoc>(SpecTy).set(
+ ElaboratedKeywordLoc, SS.getWithLocInContext(Context), TemplateKWLoc,
+ TemplateIILoc, TemplateArgs);
+ return CreateParsedType(SpecTy, TLB.getTypeSourceInfo(Context, SpecTy));
}
TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
@@ -4040,24 +4046,14 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
- if (TypeAliasTemplateDecl *TAT =
- dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
- // C++0x [dcl.type.elab]p2:
- // If the identifier resolves to a typedef-name or the simple-template-id
- // resolves to an alias template specialization, the
- // elaborated-type-specifier is ill-formed.
- Diag(TemplateLoc, diag::err_tag_reference_non_tag)
- << TAT << NonTagKind::TypeAliasTemplate << TagKind;
- Diag(TAT->getLocation(), diag::note_declared_at);
- }
-
- QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
+ QualType Result =
+ CheckTemplateIdType(Keyword, Template, TemplateLoc, TemplateArgs);
if (Result.isNull())
return TypeResult(true);
// Check the tag kind
if (const RecordType *RT = Result->getAs<RecordType>()) {
- RecordDecl *D = RT->getDecl();
+ RecordDecl *D = RT->getOriginalDecl()->getDefinitionOrSelf();
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
@@ -4073,21 +4069,9 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
// Provide source-location information for the template specialization.
TypeLocBuilder TLB;
- TemplateSpecializationTypeLoc SpecTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateLoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
- SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
-
- // Construct an elaborated type containing the nested-name-specifier (if any)
- // and tag keyword.
- Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result);
- ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
- ElabTL.setElaboratedKeywordLoc(TagLoc);
- ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TLB.push<TemplateSpecializationTypeLoc>(Result).set(
+ TagLoc, SS.getWithLocInContext(Context), TemplateKWLoc, TemplateLoc,
+ TemplateArgs);
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
@@ -4099,7 +4083,6 @@ static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized,
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D);
static bool isTemplateArgumentTemplateParameter(const TemplateArgument &Arg,
- NamedDecl *Param,
unsigned Depth,
unsigned Index) {
switch (Arg.getKind()) {
@@ -4139,8 +4122,9 @@ static bool isTemplateArgumentTemplateParameter(const TemplateArgument &Arg,
}
static bool isSameAsPrimaryTemplate(TemplateParameterList *Params,
+ TemplateParameterList *SpecParams,
ArrayRef<TemplateArgument> Args) {
- if (Params->size() != Args.size())
+ if (Params->size() != Args.size() || Params->size() != SpecParams->size())
return false;
unsigned Depth = Params->getDepth();
@@ -4157,9 +4141,19 @@ static bool isSameAsPrimaryTemplate(TemplateParameterList *Params,
Arg = Arg.pack_begin()->getPackExpansionPattern();
}
- if (!isTemplateArgumentTemplateParameter(Arg, Params->getParam(I), Depth,
- I))
+ if (!isTemplateArgumentTemplateParameter(Arg, Depth, I))
return false;
+
+ // For NTTPs further specialization is allowed via deduced types, so
+ // we need to make sure to only reject here if primary template and
+ // specialization use the same type for the NTTP.
+ if (auto *SpecNTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(SpecParams->getParam(I))) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(I));
+ if (!NTTP || NTTP->getType().getCanonicalType() !=
+ SpecNTTP->getType().getCanonicalType())
+ return false;
+ }
}
return true;
@@ -4357,7 +4351,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
}
if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
- CTAI.CanonicalConverted) &&
+ TemplateParams, CTAI.CanonicalConverted) &&
(!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
@@ -5055,7 +5049,7 @@ TemplateNameKind Sema::ActOnTemplateName(Scope *S,
return TNK_Non_template;
}
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ NestedNameSpecifier Qualifier = SS.getScopeRep();
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_Identifier:
@@ -5343,8 +5337,9 @@ static bool SubstDefaultTemplateArgument(
///
/// \returns the substituted template argument, or NULL if an error occurred.
static TemplateName SubstDefaultTemplateArgument(
- Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
- SourceLocation RAngleLoc, TemplateTemplateParmDecl *Param,
+ Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateKWLoc,
+ SourceLocation TemplateLoc, SourceLocation RAngleLoc,
+ TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
ArrayRef<TemplateArgument> CanonicalConverted,
NestedNameSpecifierLoc &QualifierLoc) {
@@ -5361,25 +5356,17 @@ static TemplateName SubstDefaultTemplateArgument(
TemplateArgLists.addOuterTemplateArguments(std::nullopt);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
- // Substitute into the nested-name-specifier first,
- QualifierLoc = Param->getDefaultArgument().getTemplateQualifierLoc();
- if (QualifierLoc) {
- QualifierLoc =
- SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgLists);
- if (!QualifierLoc)
- return TemplateName();
- }
- return SemaRef.SubstTemplateName(
- QualifierLoc,
- Param->getDefaultArgument().getArgument().getAsTemplate(),
- Param->getDefaultArgument().getTemplateNameLoc(),
- TemplateArgLists);
+ const TemplateArgumentLoc &A = Param->getDefaultArgument();
+ QualifierLoc = A.getTemplateQualifierLoc();
+ return SemaRef.SubstTemplateName(TemplateKWLoc, QualifierLoc,
+ A.getArgument().getAsTemplate(),
+ A.getTemplateNameLoc(), TemplateArgLists);
}
TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
- TemplateDecl *Template, SourceLocation TemplateLoc,
- SourceLocation RAngleLoc, Decl *Param,
+ TemplateDecl *Template, SourceLocation TemplateKWLoc,
+ SourceLocation TemplateNameLoc, SourceLocation RAngleLoc, Decl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
ArrayRef<TemplateArgument> CanonicalConverted, bool &HasDefaultArg) {
HasDefaultArg = false;
@@ -5390,8 +5377,8 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
HasDefaultArg = true;
TemplateArgumentLoc Output;
- if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
- TypeParm, SugaredConverted,
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateNameLoc,
+ RAngleLoc, TypeParm, SugaredConverted,
CanonicalConverted, Output))
return TemplateArgumentLoc();
return Output;
@@ -5404,8 +5391,8 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
HasDefaultArg = true;
TemplateArgumentLoc Output;
- if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
- NonTypeParm, SugaredConverted,
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateNameLoc,
+ RAngleLoc, NonTypeParm, SugaredConverted,
CanonicalConverted, Output))
return TemplateArgumentLoc();
return Output;
@@ -5417,17 +5404,16 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
+ const TemplateArgumentLoc &A = TempTempParm->getDefaultArgument();
NestedNameSpecifierLoc QualifierLoc;
TemplateName TName = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TempTempParm, SugaredConverted,
- CanonicalConverted, QualifierLoc);
+ *this, Template, TemplateKWLoc, TemplateNameLoc, RAngleLoc, TempTempParm,
+ SugaredConverted, CanonicalConverted, QualifierLoc);
if (TName.isNull())
return TemplateArgumentLoc();
- return TemplateArgumentLoc(
- Context, TemplateArgument(TName),
- TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
- TempTempParm->getDefaultArgument().getTemplateNameLoc());
+ return TemplateArgumentLoc(Context, TemplateArgument(TName), TemplateKWLoc,
+ QualifierLoc, A.getTemplateNameLoc());
}
/// Convert a template-argument that we parsed as a type into a template, if
@@ -5435,33 +5421,24 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
/// template template arguments and as template type arguments.
static TemplateArgumentLoc
convertTypeTemplateArgumentToTemplate(ASTContext &Context, TypeLoc TLoc) {
- // Extract and step over any surrounding nested-name-specifier.
- NestedNameSpecifierLoc QualLoc;
- if (auto ETLoc = TLoc.getAs<ElaboratedTypeLoc>()) {
- if (ETLoc.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None)
- return TemplateArgumentLoc();
+ auto TagLoc = TLoc.getAs<TagTypeLoc>();
+ if (!TagLoc)
+ return TemplateArgumentLoc();
- QualLoc = ETLoc.getQualifierLoc();
- TLoc = ETLoc.getNamedTypeLoc();
- }
// If this type was written as an injected-class-name, it can be used as a
// template template argument.
- if (auto InjLoc = TLoc.getAs<InjectedClassNameTypeLoc>())
- return TemplateArgumentLoc(Context, InjLoc.getTypePtr()->getTemplateName(),
- QualLoc, InjLoc.getNameLoc());
-
// If this type was written as an injected-class-name, it may have been
// converted to a RecordType during instantiation. If the RecordType is
// *not* wrapped in a TemplateSpecializationType and denotes a class
// template specialization, it must have come from an injected-class-name.
- if (auto RecLoc = TLoc.getAs<RecordTypeLoc>())
- if (auto *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(RecLoc.getDecl()))
- return TemplateArgumentLoc(Context,
- TemplateName(CTSD->getSpecializedTemplate()),
- QualLoc, RecLoc.getNameLoc());
- return TemplateArgumentLoc();
+ TemplateName Name = TagLoc.getTypePtr()->getTemplateName(Context);
+ if (Name.isNull())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(Context, Name,
+ /*TemplateKWLoc=*/SourceLocation(),
+ TagLoc.getQualifierLoc(), TagLoc.getNameLoc());
}
bool Sema::CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &ArgLoc,
@@ -6007,8 +5984,8 @@ bool Sema::CheckTemplateArgumentList(
// (when the template parameter was part of a nested template) into
// the default argument.
TemplateArgumentLoc Arg = SubstDefaultTemplateArgumentIfAvailable(
- Template, TemplateLoc, RAngleLoc, *Param, CTAI.SugaredConverted,
- CTAI.CanonicalConverted, HasDefaultArg);
+ Template, /*TemplateKWLoc=*/SourceLocation(), TemplateLoc, RAngleLoc,
+ *Param, CTAI.SugaredConverted, CTAI.CanonicalConverted, HasDefaultArg);
if (Arg.getArgument().isNull()) {
if (!HasDefaultArg) {
@@ -6152,7 +6129,7 @@ namespace {
#include "clang/AST/TypeNodes.inc"
bool VisitTagDecl(const TagDecl *Tag);
- bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ bool VisitNestedNameSpecifier(NestedNameSpecifier NNS);
};
} // end anonymous namespace
@@ -6297,11 +6274,11 @@ bool UnnamedLocalNoLinkageFinder::VisitDeducedTemplateSpecializationType(
}
bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) {
- return VisitTagDecl(T->getDecl());
+ return VisitTagDecl(T->getOriginalDecl()->getDefinitionOrSelf());
}
bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) {
- return VisitTagDecl(T->getDecl());
+ return VisitTagDecl(T->getOriginalDecl()->getDefinitionOrSelf());
}
bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType(
@@ -6321,7 +6298,7 @@ bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType(
const InjectedClassNameType* T) {
- return VisitTagDecl(T->getDecl());
+ return VisitTagDecl(T->getOriginalDecl()->getDefinitionOrSelf());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
@@ -6331,10 +6308,7 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType* T) {
- if (auto *Q = T->getDependentTemplateName().getQualifier())
- return VisitNestedNameSpecifier(Q);
-
- return false;
+ return VisitNestedNameSpecifier(T->getDependentTemplateName().getQualifier());
}
bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
@@ -6380,11 +6354,10 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentBitIntType(
bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
if (Tag->getDeclContext()->isFunctionOrMethod()) {
- S.Diag(SR.getBegin(),
- S.getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_template_arg_local_type :
- diag::ext_template_arg_local_type)
- << S.Context.getTypeDeclType(Tag) << SR;
+ S.Diag(SR.getBegin(), S.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_template_arg_local_type
+ : diag::ext_template_arg_local_type)
+ << S.Context.getCanonicalTagType(Tag) << SR;
return true;
}
@@ -6401,20 +6374,15 @@ bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
}
bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
- NestedNameSpecifier *NNS) {
- assert(NNS);
- if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
- return true;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
+ NestedNameSpecifier NNS) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Namespace:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
return false;
-
- case NestedNameSpecifier::TypeSpec:
- return Visit(QualType(NNS->getAsType(), 0));
+ case NestedNameSpecifier::Kind::Type:
+ return Visit(QualType(NNS.getAsType(), 0));
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
@@ -7382,7 +7350,8 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType,
// that case, this may extend the argument from 1 bit to 8 bits.
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
- IntegerType = Enum->getDecl()->getIntegerType();
+ IntegerType =
+ Enum->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
Value = Value.extOrTrunc(IntegerType->isBitIntType()
? Context.getIntWidth(IntegerType)
: Context.getTypeSize(IntegerType));
@@ -7480,7 +7449,8 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType,
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>()) {
- IntegerType = Enum->getDecl()->getIntegerType();
+ IntegerType =
+ Enum->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
}
if (ParamType->isBooleanType()) {
@@ -7903,10 +7873,9 @@ ExprResult Sema::BuildExpressionFromDeclTemplateArgument(
assert(VD->getDeclContext()->isRecord() &&
(isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
isa<IndirectFieldDecl>(VD)));
- QualType ClassType
- = Context.getTypeDeclType(cast<RecordDecl>(VD->getDeclContext()));
- NestedNameSpecifier *Qualifier =
- NestedNameSpecifier::Create(Context, nullptr, ClassType.getTypePtr());
+ CanQualType ClassType =
+ Context.getCanonicalTagType(cast<RecordDecl>(VD->getDeclContext()));
+ NestedNameSpecifier Qualifier(ClassType.getTypePtr());
SS.MakeTrivial(Context, Qualifier, Loc);
}
@@ -7998,7 +7967,7 @@ static Expr *BuildExpressionFromIntegralTemplateArgumentValue(
// type of literal for it.
QualType T = OrigT;
if (const EnumType *ET = OrigT->getAs<EnumType>())
- T = ET->getDecl()->getIntegerType();
+ T = ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
Expr *E;
if (T->isAnyCharacterType()) {
@@ -8881,7 +8850,6 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
isPartialSpecialization))
return true;
- QualType CanonType;
if (!isPartialSpecialization) {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
@@ -8897,18 +8865,14 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (!PrevDecl)
ClassTemplate->AddSpecialization(Specialization, InsertPos);
-
- if (!CurContext->isDependentContext())
- CanonType = Context.getTypeDeclType(Specialization);
- }
-
- TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(
- Name, TemplateNameLoc, TemplateArgs, CTAI.CanonicalConverted, CanonType);
-
- if (isPartialSpecialization) {
+ } else {
+ CanQualType CanonType = CanQualType::CreateUnsafe(
+ Context.getCanonicalTemplateSpecializationType(
+ TemplateName(ClassTemplate->getCanonicalDecl()),
+ CTAI.CanonicalConverted));
if (Context.hasSameType(
- WrittenTy->getType(),
- ClassTemplate->getInjectedClassNameSpecialization()) &&
+ CanonType,
+ ClassTemplate->getCanonicalInjectedSpecializationType(Context)) &&
(!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
@@ -8930,13 +8894,12 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
// Create a new class template partial specialization declaration node.
- ClassTemplatePartialSpecializationDecl *PrevPartial
- = cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
+ ClassTemplatePartialSpecializationDecl *PrevPartial =
+ cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
ClassTemplatePartialSpecializationDecl *Partial =
ClassTemplatePartialSpecializationDecl::Create(
Context, Kind, DC, KWLoc, TemplateNameLoc, TemplateParams,
- ClassTemplate, CTAI.CanonicalConverted, WrittenTy->getType(),
- PrevPartial);
+ ClassTemplate, CTAI.CanonicalConverted, CanonType, PrevPartial);
Partial->setTemplateArgsAsWritten(TemplateArgs);
SetNestedNameSpecifier(*this, Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
@@ -8975,7 +8938,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (!Okay) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
- << Context.getTypeDeclType(Specialization) << Range;
+ << Context.getCanonicalTagType(Specialization) << Range;
Diag(PrevDecl->getPointOfInstantiation(),
diag::note_instantiation_required_here)
@@ -9038,6 +9001,13 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization->startDefinition();
if (TUK == TagUseKind::Friend) {
+ CanQualType CanonType = Context.getCanonicalTagType(Specialization);
+ TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(
+ ElaboratedTypeKeyword::None, /*ElaboratedKeywordLoc=*/SourceLocation(),
+ SS.getWithLocInContext(Context),
+ /*TemplateKeywordLoc=*/SourceLocation(), Name, TemplateNameLoc,
+ TemplateArgs, CTAI.CanonicalConverted, CanonType);
+
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
@@ -10078,9 +10048,6 @@ static bool CheckExplicitInstantiation(Sema &S, NamedDecl *D,
/// Determine whether the given scope specifier has a template-id in it.
static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
- if (!SS.isSet())
- return false;
-
// C++11 [temp.explicit]p3:
// If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
@@ -10088,12 +10055,14 @@ static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
// name shall be a simple-template-id.
//
// C++98 has the same restriction, just worded differently.
- for (NestedNameSpecifier *NNS = SS.getScopeRep(); NNS;
- NNS = NNS->getPrefix())
- if (const Type *T = NNS->getAsType())
- if (isa<TemplateSpecializationType>(T))
- return true;
-
+ for (NestedNameSpecifier NNS = SS.getScopeRep();
+ NNS.getKind() == NestedNameSpecifier::Kind::Type;
+ /**/) {
+ const Type *T = NNS.getAsType();
+ if (isa<TemplateSpecializationType>(T))
+ return true;
+ NNS = T->getPrefix();
+ }
return false;
}
@@ -10448,7 +10417,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
if (!Pattern) {
Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type)
- << Context.getTypeDeclType(Record);
+ << Context.getCanonicalTagType(Record);
Diag(Record->getLocation(), diag::note_nontemplate_decl_here);
return true;
}
@@ -10995,7 +10964,7 @@ TypeResult Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// This has to hold, because SS is expected to be defined.
assert(Name && "Expected a name in a dependent tag");
- NestedNameSpecifier *NNS = SS.getScopeRep();
+ NestedNameSpecifier NNS = SS.getScopeRep();
if (!NNS)
return true;
@@ -11107,7 +11076,10 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
- QualType T = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
+ QualType T = CheckTemplateIdType(TypenameLoc.isValid()
+ ? ElaboratedTypeKeyword::Typename
+ : ElaboratedTypeKeyword::None,
+ Template, TemplateIILoc, TemplateArgs);
if (T.isNull())
return true;
@@ -11115,18 +11087,8 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
TypeLocBuilder Builder;
TemplateSpecializationTypeLoc SpecTL
= Builder.push<TemplateSpecializationTypeLoc>(T);
- SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
- SpecTL.setTemplateNameLoc(TemplateIILoc);
- SpecTL.setLAngleLoc(LAngleLoc);
- SpecTL.setRAngleLoc(RAngleLoc);
- for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
-
- T = Context.getElaboratedType(Keyword, SS.getScopeRep(), T);
- ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
- TL.setElaboratedKeywordLoc(TypenameLoc);
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
-
+ SpecTL.set(TypenameLoc, SS.getWithLocInContext(Context), TemplateKWLoc,
+ TemplateIILoc, TemplateArgs);
TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
return CreateParsedType(T, TSI);
}
@@ -11140,11 +11102,12 @@ static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
return false;
// ... within an explicitly-written template specialization...
- if (!NNS || !NNS.getNestedNameSpecifier()->getAsType())
+ if (NNS.getNestedNameSpecifier().getKind() != NestedNameSpecifier::Kind::Type)
return false;
- TypeLoc EnableIfTy = NNS.getTypeLoc();
- TemplateSpecializationTypeLoc EnableIfTSTLoc =
- EnableIfTy.getAs<TemplateSpecializationTypeLoc>();
+
+ // FIXME: Look through sugar.
+ auto EnableIfTSTLoc =
+ NNS.castAsTypeLoc().getAs<TemplateSpecializationTypeLoc>();
if (!EnableIfTSTLoc || EnableIfTSTLoc.getNumArgs() == 0)
return false;
const TemplateSpecializationType *EnableIfTST = EnableIfTSTLoc.getTypePtr();
@@ -11192,19 +11155,33 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (T.isNull())
return QualType();
- *TSI = Context.CreateTypeSourceInfo(T);
+ TypeLocBuilder TLB;
if (isa<DependentNameType>(T)) {
- DependentNameTypeLoc TL =
- (*TSI)->getTypeLoc().castAs<DependentNameTypeLoc>();
+ auto TL = TLB.push<DependentNameTypeLoc>(T);
TL.setElaboratedKeywordLoc(KeywordLoc);
TL.setQualifierLoc(QualifierLoc);
TL.setNameLoc(IILoc);
- } else {
- ElaboratedTypeLoc TL = (*TSI)->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ } else if (isa<DeducedTemplateSpecializationType>(T)) {
+ auto TL = TLB.push<DeducedTemplateSpecializationTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(KeywordLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(IILoc);
+ } else if (isa<TemplateTypeParmType>(T)) {
+ // FIXME: There might be a 'typename' keyword here, but we just drop it
+ // as it can't be represented.
+ assert(!QualifierLoc);
+ TLB.pushTypeSpec(T).setNameLoc(IILoc);
+ } else if (isa<TagType>(T)) {
+ auto TL = TLB.push<TagTypeLoc>(T);
TL.setElaboratedKeywordLoc(KeywordLoc);
TL.setQualifierLoc(QualifierLoc);
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IILoc);
+ TL.setNameLoc(IILoc);
+ } else if (isa<TypedefType>(T)) {
+ TLB.push<TypedefTypeLoc>(T).set(KeywordLoc, QualifierLoc, IILoc);
+ } else {
+ TLB.push<UnresolvedUsingTypeLoc>(T).set(KeywordLoc, QualifierLoc, IILoc);
}
+ *TSI = TLB.getTypeSourceInfo(Context, T);
return T;
}
@@ -11227,7 +11204,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (!Ctx) {
// If the nested-name-specifier is dependent and couldn't be
// resolved to a type, build a typename type.
- assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
+ assert(QualifierLoc.getNestedNameSpecifier().isDependent());
return Context.getDependentNameType(Keyword,
QualifierLoc.getNestedNameSpecifier(),
&II);
@@ -11309,6 +11286,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
&II);
case LookupResultKind::Found:
+ // FXIME: Missing support for UsingShadowDecl on this path?
if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
// C++ [class.qual]p2:
// In a lookup in which function names are not ignored and the
@@ -11324,15 +11302,20 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
//
// FIXME: That's not strictly true: mem-initializer-id lookup does not
// ignore functions, but that appears to be an oversight.
- QualType T = getTypeDeclType(Ctx,
- Keyword == ElaboratedTypeKeyword::Typename
- ? DiagCtorKind::Typename
- : DiagCtorKind::None,
- Type, IILoc);
- // We found a type. Build an ElaboratedType, since the
- // typename-specifier was just sugar.
- return Context.getElaboratedType(
- Keyword, QualifierLoc.getNestedNameSpecifier(), T);
+ checkTypeDeclType(Ctx,
+ Keyword == ElaboratedTypeKeyword::Typename
+ ? DiagCtorKind::Typename
+ : DiagCtorKind::None,
+ Type, IILoc);
+ // FIXME: This appears to be the only case where a template type parameter
+ // can have an elaborated keyword. We should preserve it somehow.
+ if (isa<TemplateTypeParmDecl>(Type)) {
+ assert(Keyword == ElaboratedTypeKeyword::Typename);
+ assert(!QualifierLoc);
+ Keyword = ElaboratedTypeKeyword::None;
+ }
+ return Context.getTypeDeclType(
+ Keyword, QualifierLoc.getNestedNameSpecifier(), Type);
}
// C++ [dcl.type.simple]p2:
@@ -11342,22 +11325,22 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
if (getLangOpts().CPlusPlus17) {
if (auto *TD = getAsTypeTemplateDecl(Result.getFoundDecl())) {
if (!DeducedTSTContext) {
- QualType T(QualifierLoc
- ? QualifierLoc.getNestedNameSpecifier()->getAsType()
- : nullptr, 0);
- if (!T.isNull())
+ NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Type)
Diag(IILoc, diag::err_dependent_deduced_tst)
- << (int)getTemplateNameKindForDiagnostics(TemplateName(TD)) << T;
+ << (int)getTemplateNameKindForDiagnostics(TemplateName(TD))
+ << QualType(Qualifier.getAsType(), 0);
else
Diag(IILoc, diag::err_deduced_tst)
<< (int)getTemplateNameKindForDiagnostics(TemplateName(TD));
NoteTemplateLocation(*TD);
return QualType();
}
- return Context.getElaboratedType(
- Keyword, QualifierLoc.getNestedNameSpecifier(),
- Context.getDeducedTemplateSpecializationType(TemplateName(TD),
- QualType(), false));
+ TemplateName Name = Context.getQualifiedTemplateName(
+ QualifierLoc.getNestedNameSpecifier(), /*TemplateKeyword=*/false,
+ TemplateName(TD));
+ return Context.getDeducedTemplateSpecializationType(
+ Keyword, Name, /*DeducedType=*/QualType(), /*IsDependent=*/false);
}
}
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 0d70321..3aa808e 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -691,38 +691,29 @@ DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
TemplateDeductionInfo &Info, bool PartialOrdering,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
bool *HasDeducedAnyParam) {
- QualType UP = P;
- if (const auto *IP = P->getAs<InjectedClassNameType>())
- UP = IP->getInjectedSpecializationType();
-
- assert(isa<TemplateSpecializationType>(UP.getCanonicalType()));
- const TemplateSpecializationType *TP = ::getLastTemplateSpecType(UP);
- TemplateName TNP = TP->getTemplateName();
+ TemplateName TNP;
+ ArrayRef<TemplateArgument> PResolved;
+ if (isa<TemplateSpecializationType>(P.getCanonicalType())) {
+ const TemplateSpecializationType *TP = ::getLastTemplateSpecType(P);
+ TNP = TP->getTemplateName();
+ // FIXME: To preserve sugar, the TST needs to carry sugared resolved
+ // arguments.
+ PResolved = TP->getCanonicalTypeInternal()
+ ->castAs<TemplateSpecializationType>()
+ ->template_arguments();
+ } else {
+ const auto *TT = P->castAs<InjectedClassNameType>();
+ TNP = TT->getTemplateName(S.Context);
+ PResolved = TT->getTemplateArgs(S.Context);
+ }
// If the parameter is an alias template, there is nothing to deduce.
if (const auto *TD = TNP.getAsTemplateDecl(); TD && TD->isTypeAlias())
return TemplateDeductionResult::Success;
- // FIXME: To preserve sugar, the TST needs to carry sugared resolved
- // arguments.
- ArrayRef<TemplateArgument> PResolved =
- TP->getCanonicalTypeInternal()
- ->castAs<TemplateSpecializationType>()
- ->template_arguments();
-
- QualType UA = A;
- std::optional<NestedNameSpecifier *> NNS;
- // Treat an injected-class-name as its underlying template-id.
- if (const auto *Elaborated = A->getAs<ElaboratedType>()) {
- NNS = Elaborated->getQualifier();
- } else if (const auto *Injected = A->getAs<InjectedClassNameType>()) {
- UA = Injected->getInjectedSpecializationType();
- NNS = nullptr;
- }
-
// Check whether the template argument is a dependent template-id.
- if (isa<TemplateSpecializationType>(UA.getCanonicalType())) {
- const TemplateSpecializationType *SA = ::getLastTemplateSpecType(UA);
+ if (isa<TemplateSpecializationType>(A.getCanonicalType())) {
+ const TemplateSpecializationType *SA = ::getLastTemplateSpecType(A);
TemplateName TNA = SA->getTemplateName();
// If the argument is an alias template, there is nothing to deduce.
@@ -756,34 +747,36 @@ DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
// If the argument type is a class template specialization, we
// perform template argument deduction using its template
// arguments.
- const auto *RA = UA->getAs<RecordType>();
- const auto *SA =
- RA ? dyn_cast<ClassTemplateSpecializationDecl>(RA->getDecl()) : nullptr;
- if (!SA) {
+ const auto *TA = A->getAs<TagType>();
+ TemplateName TNA;
+ if (TA) {
+ // FIXME: Can't use the template arguments from this TST, as they are not
+ // resolved.
+ if (const auto *TST = A->getAsNonAliasTemplateSpecializationType())
+ TNA = TST->getTemplateName();
+ else
+ TNA = TA->getTemplateName(S.Context);
+ }
+ if (TNA.isNull()) {
Info.FirstArg = TemplateArgument(P);
Info.SecondArg = TemplateArgument(A);
return TemplateDeductionResult::NonDeducedMismatch;
}
- TemplateName TNA = TemplateName(SA->getSpecializedTemplate());
- if (NNS)
- TNA = S.Context.getQualifiedTemplateName(
- *NNS, false, TemplateName(SA->getSpecializedTemplate()));
-
+ ArrayRef<TemplateArgument> AResolved = TA->getTemplateArgs(S.Context);
// Perform template argument deduction for the template name.
- if (auto Result = DeduceTemplateArguments(
- S, TemplateParams, TNP, TNA, Info,
- /*DefaultArguments=*/SA->getTemplateArgs().asArray(), PartialOrdering,
- Deduced, HasDeducedAnyParam);
+ if (auto Result =
+ DeduceTemplateArguments(S, TemplateParams, TNP, TNA, Info,
+ /*DefaultArguments=*/AResolved,
+ PartialOrdering, Deduced, HasDeducedAnyParam);
Result != TemplateDeductionResult::Success)
return Result;
// Perform template argument deduction for the template arguments.
- return DeduceTemplateArguments(S, TemplateParams, PResolved,
- SA->getTemplateArgs().asArray(), Info, Deduced,
- /*NumberOfArgumentsMustMatch=*/true,
- PartialOrdering, PackFold::ParameterToArgument,
- HasDeducedAnyParam);
+ return DeduceTemplateArguments(
+ S, TemplateParams, PResolved, AResolved, Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/true, PartialOrdering,
+ PackFold::ParameterToArgument, HasDeducedAnyParam);
}
static bool IsPossiblyOpaquelyQualifiedTypeInternal(const Type *T) {
@@ -2188,26 +2181,19 @@ static TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Result != TemplateDeductionResult::Success)
return Result;
- QualType TP;
- if (MPP->isSugared()) {
- TP = S.Context.getTypeDeclType(MPP->getMostRecentCXXRecordDecl());
- } else {
- NestedNameSpecifier *QP = MPP->getQualifier();
- if (QP->getKind() == NestedNameSpecifier::Identifier)
- // Skip translation if it's a non-deduced context anyway.
- return TemplateDeductionResult::Success;
- TP = QualType(QP->translateToType(S.Context), 0);
- }
+ QualType TP =
+ MPP->isSugared()
+ ? S.Context.getCanonicalTagType(MPP->getMostRecentCXXRecordDecl())
+ : QualType(MPP->getQualifier().getAsType(), 0);
assert(!TP.isNull() && "member pointer with non-type class");
- QualType TA;
- if (MPA->isSugared()) {
- TA = S.Context.getTypeDeclType(MPA->getMostRecentCXXRecordDecl());
- } else {
- NestedNameSpecifier *QA = MPA->getQualifier();
- TA = QualType(QA->translateToType(S.Context), 0).getUnqualifiedType();
- }
+ QualType TA =
+ MPA->isSugared()
+ ? S.Context.getCanonicalTagType(MPA->getMostRecentCXXRecordDecl())
+ : QualType(MPA->getQualifier().getAsType(), 0)
+ .getUnqualifiedType();
assert(!TA.isNull() && "member pointer with non-type class");
+
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, TP, TA, Info, Deduced, SubTDF,
degradeCallPartialOrderingKind(POK),
@@ -2930,13 +2916,11 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
else if (QualifiedTemplateName *QTN =
Template.getAsQualifiedTemplateName())
Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
-
- if (Arg.getKind() == TemplateArgument::Template)
- return TemplateArgumentLoc(Context, Arg,
- Builder.getWithLocInContext(Context), Loc);
-
return TemplateArgumentLoc(
- Context, Arg, Builder.getWithLocInContext(Context), Loc, Loc);
+ Context, Arg, Loc, Builder.getWithLocInContext(Context), Loc,
+ /*EllipsisLoc=*/Arg.getKind() == TemplateArgument::TemplateExpansion
+ ? Loc
+ : SourceLocation());
}
case TemplateArgument::Expression:
@@ -3154,8 +3138,9 @@ static TemplateDeductionResult ConvertDeducedTemplateArguments(
S.getLangOpts().CPlusPlus17);
DefArg = S.SubstDefaultTemplateArgumentIfAvailable(
- TD, TD->getLocation(), TD->getSourceRange().getEnd(), Param,
- CTAI.SugaredConverted, CTAI.CanonicalConverted, HasDefaultArg);
+ TD, /*TemplateKWLoc=*/SourceLocation(), TD->getLocation(),
+ TD->getSourceRange().getEnd(), Param, CTAI.SugaredConverted,
+ CTAI.CanonicalConverted, HasDefaultArg);
}
// If there was no default argument, deduction is incomplete.
@@ -3512,7 +3497,7 @@ Sema::DeduceTemplateArgumentsFromType(TemplateDecl *TD, QualType FromType,
QualType PType;
if (const auto *CTD = dyn_cast<ClassTemplateDecl>(TD)) {
// Use the InjectedClassNameType.
- PType = Context.getTypeDeclType(CTD->getTemplatedDecl());
+ PType = Context.getCanonicalTagType(CTD->getTemplatedDecl());
} else if (const auto *AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(TD)) {
PType = AliasTemplate->getTemplatedDecl()->getUnderlyingType();
} else {
@@ -4182,7 +4167,7 @@ static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
return {};
return S.Context.getMemberPointerType(
- Fn->getType(), /*Qualifier=*/nullptr, Method->getParent());
+ Fn->getType(), /*Qualifier=*/std::nullopt, Method->getParent());
}
if (!R.IsAddressOfOperand) return Fn->getType();
@@ -5138,10 +5123,12 @@ namespace {
return TransformDesugared(TLB, TL);
QualType Result = SemaRef.Context.getDeducedTemplateSpecializationType(
- TL.getTypePtr()->getTemplateName(),
+ TL.getTypePtr()->getKeyword(), TL.getTypePtr()->getTemplateName(),
Replacement, Replacement.isNull());
auto NewTL = TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setNameLoc(TL.getNameLoc());
+ NewTL.setQualifierLoc(TL.getQualifierLoc());
return Result;
}
@@ -5607,7 +5594,8 @@ static TemplateDeductionResult CheckDeductionConsistency(
bool IsDeductionGuide = isa<CXXDeductionGuideDecl>(FTD->getTemplatedDecl());
if (IsDeductionGuide) {
if (auto *Injected = P->getAs<InjectedClassNameType>())
- P = Injected->getInjectedSpecializationType();
+ P = Injected->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ S.Context);
}
QualType InstP = S.SubstType(P.getCanonicalType(), MLTAL, FTD->getLocation(),
FTD->getDeclName(), &IsIncompleteSubstitution);
@@ -5627,9 +5615,11 @@ static TemplateDeductionResult CheckDeductionConsistency(
auto T2 = S.Context.getUnqualifiedArrayType(A.getNonReferenceType());
if (IsDeductionGuide) {
if (auto *Injected = T1->getAs<InjectedClassNameType>())
- T1 = Injected->getInjectedSpecializationType();
+ T1 = Injected->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ S.Context);
if (auto *Injected = T2->getAs<InjectedClassNameType>())
- T2 = Injected->getInjectedSpecializationType();
+ T2 = Injected->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ S.Context);
}
if (!S.Context.hasSameType(T1, T2))
return TemplateDeductionResult::NonDeducedMismatch;
@@ -6471,8 +6461,8 @@ Sema::getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc) {
- QualType PT1 = PS1->getInjectedSpecializationType().getCanonicalType();
- QualType PT2 = PS2->getInjectedSpecializationType().getCanonicalType();
+ QualType PT1 = PS1->getCanonicalInjectedSpecializationType(Context);
+ QualType PT2 = PS2->getCanonicalInjectedSpecializationType(Context);
TemplateDeductionInfo Info(Loc);
return getMoreSpecialized(*this, PT1, PT2, PS1, PS2, Info);
@@ -6481,9 +6471,8 @@ Sema::getMoreSpecializedPartialSpecialization(
bool Sema::isMoreSpecializedThanPrimary(
ClassTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) {
ClassTemplateDecl *Primary = Spec->getSpecializedTemplate();
- QualType PrimaryT =
- Primary->getInjectedClassNameSpecialization().getCanonicalType();
- QualType PartialT = Spec->getInjectedSpecializationType().getCanonicalType();
+ QualType PrimaryT = Primary->getCanonicalInjectedSpecializationType(Context);
+ QualType PartialT = Spec->getCanonicalInjectedSpecializationType(Context);
ClassTemplatePartialSpecializationDecl *MaybeSpec =
getMoreSpecialized(*this, PartialT, PrimaryT, Spec, Primary, Info);
@@ -6790,19 +6779,13 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
/// Mark the template parameters that are used by the given
/// nested name specifier.
-static void
-MarkUsedTemplateParameters(ASTContext &Ctx,
- NestedNameSpecifier *NNS,
- bool OnlyDeduced,
- unsigned Depth,
- llvm::SmallBitVector &Used) {
- if (!NNS)
+static void MarkUsedTemplateParameters(ASTContext &Ctx, NestedNameSpecifier NNS,
+ bool OnlyDeduced, unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (NNS.getKind() != NestedNameSpecifier::Kind::Type)
return;
-
- MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth,
- Used);
- MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0),
- OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(Ctx, QualType(NNS.getAsType(), 0), OnlyDeduced,
+ Depth, Used);
}
/// Mark the template parameters that are used by the given
@@ -6876,7 +6859,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced,
Depth, Used);
MarkUsedTemplateParameters(Ctx,
- QualType(MemPtr->getQualifier()->getAsType(), 0),
+ QualType(MemPtr->getQualifier().getAsType(), 0),
OnlyDeduced, Depth, Used);
break;
}
@@ -6994,7 +6977,9 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
}
case Type::InjectedClassName:
- T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
+ T = cast<InjectedClassNameType>(T)
+ ->getOriginalDecl()
+ ->getCanonicalTemplateSpecializationType(Ctx);
[[fallthrough]];
case Type::TemplateSpecialization: {
diff --git a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
index 9be1c9c..6045914 100644
--- a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
+++ b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
@@ -105,22 +105,21 @@ public:
return false;
}
- QualType
- RebuildTemplateSpecializationType(TemplateName Template,
- SourceLocation TemplateNameLoc,
- TemplateArgumentListInfo &TemplateArgs) {
+ QualType RebuildTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ SourceLocation TemplateNameLoc, TemplateArgumentListInfo &TemplateArgs) {
if (!OuterInstantiationArgs ||
!isa_and_present<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()))
- return Base::RebuildTemplateSpecializationType(Template, TemplateNameLoc,
- TemplateArgs);
+ return Base::RebuildTemplateSpecializationType(
+ Keyword, Template, TemplateNameLoc, TemplateArgs);
auto *TATD = cast<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
auto *Pattern = TATD;
while (Pattern->getInstantiatedFromMemberTemplate())
Pattern = Pattern->getInstantiatedFromMemberTemplate();
if (!mightReferToOuterTemplateParameters(Pattern->getTemplatedDecl()))
- return Base::RebuildTemplateSpecializationType(Template, TemplateNameLoc,
- TemplateArgs);
+ return Base::RebuildTemplateSpecializationType(
+ Keyword, Template, TemplateNameLoc, TemplateArgs);
Decl *NewD =
TypedefNameInstantiator->InstantiateTypeAliasTemplateDecl(TATD);
@@ -131,13 +130,14 @@ public:
MaterializedTypedefs.push_back(NewTATD->getTemplatedDecl());
return Base::RebuildTemplateSpecializationType(
- TemplateName(NewTATD), TemplateNameLoc, TemplateArgs);
+ Keyword, TemplateName(NewTATD), TemplateNameLoc, TemplateArgs);
}
QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
ASTContext &Context = SemaRef.getASTContext();
- TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
+ TypedefNameDecl *OrigDecl = TL.getDecl();
TypedefNameDecl *Decl = OrigDecl;
+ const TypedefType *T = TL.getTypePtr();
// Transform the underlying type of the typedef and clone the Decl only if
// the typedef has a dependent context.
bool InDependentContext = OrigDecl->getDeclContext()->isDependentContext();
@@ -155,7 +155,7 @@ public:
// };
// };
if (OuterInstantiationArgs && InDependentContext &&
- TL.getTypePtr()->isInstantiationDependentType()) {
+ T->isInstantiationDependentType()) {
Decl = cast_if_present<TypedefNameDecl>(
TypedefNameInstantiator->InstantiateTypedefNameDecl(
OrigDecl, /*IsTypeAlias=*/isa<TypeAliasDecl>(OrigDecl)));
@@ -180,10 +180,17 @@ public:
MaterializedTypedefs.push_back(Decl);
}
- QualType TDTy = Context.getTypedefType(Decl);
- TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
- TypedefTL.setNameLoc(TL.getNameLoc());
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ }
+ QualType TDTy = Context.getTypedefType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), Decl);
+ TLB.push<TypedefTypeLoc>(TDTy).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
return TDTy;
}
};
@@ -327,7 +334,7 @@ struct ConvertConstructorToDeductionGuideTransform {
DeclarationName DeductionGuideName =
SemaRef.Context.DeclarationNames.getCXXDeductionGuideName(Template);
- QualType DeducedType = SemaRef.Context.getTypeDeclType(Primary);
+ QualType DeducedType = SemaRef.Context.getCanonicalTagType(Primary);
// Index adjustment to apply to convert depth-1 template parameters into
// depth-0 template parameters.
@@ -593,7 +600,10 @@ private:
// context of the template), so implicit deduction guides can never collide
// with explicit ones.
QualType ReturnType = DeducedType;
- TLB.pushTypeSpec(ReturnType).setNameLoc(Primary->getLocation());
+ auto TTL = TLB.push<TagTypeLoc>(ReturnType);
+ TTL.setElaboratedKeywordLoc(SourceLocation());
+ TTL.setQualifierLoc(NestedNameSpecifierLoc());
+ TTL.setNameLoc(Primary->getLocation());
// Resolving a wording defect, we also inherit the variadicness of the
// constructor.
@@ -954,7 +964,8 @@ Expr *buildIsDeducibleConstraint(Sema &SemaRef,
SmallVector<TypeSourceInfo *> IsDeducibleTypeTraitArgs = {
Context.getTrivialTypeSourceInfo(
Context.getDeducedTemplateSpecializationType(
- TemplateName(AliasTemplate), /*DeducedType=*/QualType(),
+ ElaboratedTypeKeyword::None, TemplateName(AliasTemplate),
+ /*DeducedType=*/QualType(),
/*IsDependent=*/true),
AliasTemplate->getLocation()), // template specialization type whose
// arguments will be deduced.
@@ -970,10 +981,7 @@ Expr *buildIsDeducibleConstraint(Sema &SemaRef,
std::pair<TemplateDecl *, llvm::ArrayRef<TemplateArgument>>
getRHSTemplateDeclAndArgs(Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate) {
- // Unwrap the sugared ElaboratedType.
- auto RhsType = AliasTemplate->getTemplatedDecl()
- ->getUnderlyingType()
- .getSingleStepDesugaredType(SemaRef.Context);
+ auto RhsType = AliasTemplate->getTemplatedDecl()->getUnderlyingType();
TemplateDecl *Template = nullptr;
llvm::ArrayRef<TemplateArgument> AliasRhsTemplateArgs;
if (const auto *TST = RhsType->getAs<TemplateSpecializationType>()) {
@@ -1048,12 +1056,11 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef,
// The (trailing) return type of the deduction guide.
const TemplateSpecializationType *FReturnType =
RType->getAs<TemplateSpecializationType>();
- if (const auto *InjectedCNT = RType->getAs<InjectedClassNameType>())
+ if (const auto *ICNT = RType->getAs<InjectedClassNameType>())
// implicitly-generated deduction guide.
- FReturnType = InjectedCNT->getInjectedTST();
- else if (const auto *ET = RType->getAs<ElaboratedType>())
- // explicit deduction guide.
- FReturnType = ET->getNamedType()->getAsNonAliasTemplateSpecializationType();
+ FReturnType = cast<TemplateSpecializationType>(
+ ICNT->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ SemaRef.Context));
assert(FReturnType && "expected to see a return type");
// Deduce template arguments of the deduction guide f from the RHS of
// the alias.
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 0d96d18..5e8dfd1 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -353,45 +353,50 @@ Response HandleFunctionTemplateDecl(Sema &SemaRef,
SemaRef.Context),
/*Final=*/false);
- NestedNameSpecifier *NNS = FTD->getTemplatedDecl()->getQualifier();
-
- while (const Type *Ty = NNS ? NNS->getAsType() : nullptr) {
- if (NNS->isInstantiationDependent()) {
- if (const auto *TSTy = Ty->getAs<TemplateSpecializationType>()) {
- ArrayRef<TemplateArgument> Arguments = TSTy->template_arguments();
- // Prefer template arguments from the injected-class-type if possible.
- // For example,
- // ```cpp
- // template <class... Pack> struct S {
- // template <class T> void foo();
- // };
- // template <class... Pack> template <class T>
- // ^^^^^^^^^^^^^ InjectedTemplateArgs
- // They're of kind TemplateArgument::Pack, not of
- // TemplateArgument::Type.
- // void S<Pack...>::foo() {}
- // ^^^^^^^
- // TSTy->template_arguments() (which are of PackExpansionType)
- // ```
- // This meets the contract in
- // TreeTransform::TryExpandParameterPacks that the template arguments
- // for unexpanded parameters should be of a Pack kind.
- if (TSTy->isCurrentInstantiation()) {
- auto *RD = TSTy->getCanonicalTypeInternal()->getAsCXXRecordDecl();
- if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
- Arguments = CTD->getInjectedTemplateArgs(SemaRef.Context);
- else if (auto *Specialization =
- dyn_cast<ClassTemplateSpecializationDecl>(RD))
- Arguments =
- Specialization->getTemplateInstantiationArgs().asArray();
- }
- Result.addOuterTemplateArguments(
- TSTy->getTemplateName().getAsTemplateDecl(), Arguments,
- /*Final=*/false);
- }
- }
+ NestedNameSpecifier NNS = FTD->getTemplatedDecl()->getQualifier();
+
+ for (const Type *Ty = NNS.getKind() == NestedNameSpecifier::Kind::Type
+ ? NNS.getAsType()
+ : nullptr,
+ *NextTy = nullptr;
+ Ty && Ty->isInstantiationDependentType();
+ Ty = std::exchange(NextTy, nullptr)) {
+ if (NestedNameSpecifier P = Ty->getPrefix();
+ P.getKind() == NestedNameSpecifier::Kind::Type)
+ NextTy = P.getAsType();
+ const auto *TSTy = dyn_cast<TemplateSpecializationType>(Ty);
+ if (!TSTy)
+ continue;
- NNS = NNS->getPrefix();
+ ArrayRef<TemplateArgument> Arguments = TSTy->template_arguments();
+ // Prefer template arguments from the injected-class-type if possible.
+ // For example,
+ // ```cpp
+ // template <class... Pack> struct S {
+ // template <class T> void foo();
+ // };
+ // template <class... Pack> template <class T>
+ // ^^^^^^^^^^^^^ InjectedTemplateArgs
+ // They're of kind TemplateArgument::Pack, not of
+ // TemplateArgument::Type.
+ // void S<Pack...>::foo() {}
+ // ^^^^^^^
+ // TSTy->template_arguments() (which are of PackExpansionType)
+ // ```
+ // This meets the contract in
+ // TreeTransform::TryExpandParameterPacks that the template arguments
+ // for unexpanded parameters should be of a Pack kind.
+ if (TSTy->isCurrentInstantiation()) {
+ auto *RD = TSTy->getCanonicalTypeInternal()->getAsCXXRecordDecl();
+ if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
+ Arguments = CTD->getInjectedTemplateArgs(SemaRef.Context);
+ else if (auto *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ Arguments = Specialization->getTemplateInstantiationArgs().asArray();
+ }
+ Result.addOuterTemplateArguments(
+ TSTy->getTemplateName().getAsTemplateDecl(), Arguments,
+ /*Final=*/false);
}
}
@@ -1165,7 +1170,7 @@ void Sema::PrintInstantiationStack(InstantiationContextDiagFuncRef DiagFunc) {
DiagFunc(Active->PointOfInstantiation,
PDiag(diag::note_member_synthesized_at)
<< MD->isExplicitlyDefaulted() << DFK.asSpecialMember()
- << Context.getTagDeclType(MD->getParent()));
+ << Context.getCanonicalTagType(MD->getParent()));
} else if (DFK.isComparison()) {
QualType RecordType = FD->getParamDecl(0)
->getType()
@@ -1595,15 +1600,9 @@ namespace {
VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *TSInfo, QualType T);
- /// Check for tag mismatches when instantiating an
- /// elaborated type.
- QualType RebuildElaboratedType(SourceLocation KeywordLoc,
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifierLoc QualifierLoc,
- QualType T);
-
TemplateName
- TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
+ TransformTemplateName(NestedNameSpecifierLoc &QualifierLoc,
+ SourceLocation TemplateKWLoc, TemplateName Name,
SourceLocation NameLoc,
QualType ObjectType = QualType(),
NamedDecl *FirstQualifierInScope = nullptr,
@@ -1649,22 +1648,20 @@ namespace {
return inherited::TransformFunctionProtoType(TLB, TL);
}
- QualType TransformInjectedClassNameType(TypeLocBuilder &TLB,
- InjectedClassNameTypeLoc TL) {
- auto Type = inherited::TransformInjectedClassNameType(TLB, TL);
+ QualType TransformTagType(TypeLocBuilder &TLB, TagTypeLoc TL) {
+ auto Type = inherited::TransformTagType(TLB, TL);
+ if (!Type.isNull())
+ return Type;
// Special case for transforming a deduction guide, we return a
// transformed TemplateSpecializationType.
- if (Type.isNull() &&
- SemaRef.CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides) {
- // Return a TemplateSpecializationType for transforming a deduction
- // guide.
- if (auto *ICT = TL.getType()->getAs<InjectedClassNameType>()) {
- auto Type =
- inherited::TransformType(ICT->getInjectedSpecializationType());
- TLB.pushTrivial(SemaRef.Context, Type, TL.getNameLoc());
- return Type;
- }
+ // FIXME: Why is this hack necessary?
+ if (const auto *ICNT = dyn_cast<InjectedClassNameType>(TL.getTypePtr());
+ ICNT && SemaRef.CodeSynthesisContexts.back().Kind ==
+ Sema::CodeSynthesisContext::BuildingDeductionGuides) {
+ Type = inherited::TransformType(
+ ICNT->getOriginalDecl()->getCanonicalTemplateSpecializationType(
+ SemaRef.Context));
+ TLB.pushTrivial(SemaRef.Context, Type, TL.getNameLoc());
}
return Type;
}
@@ -2049,7 +2046,7 @@ TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
if (const TagType *Tag = T->getAs<TagType>())
- return Tag->getDecl();
+ return Tag->getOriginalDecl();
// The resulting type is not a tag; complain.
getSema().Diag(Loc, diag::err_nested_name_spec_non_tag) << T;
@@ -2082,41 +2079,10 @@ VarDecl *TemplateInstantiator::RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
return Var;
}
-QualType
-TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifierLoc QualifierLoc,
- QualType T) {
- if (const TagType *TT = T->getAs<TagType>()) {
- TagDecl* TD = TT->getDecl();
-
- SourceLocation TagLocation = KeywordLoc;
-
- IdentifierInfo *Id = TD->getIdentifier();
-
- // TODO: should we even warn on struct/class mismatches for this? Seems
- // like it's likely to produce a lot of spurious errors.
- if (Id && Keyword != ElaboratedTypeKeyword::None &&
- Keyword != ElaboratedTypeKeyword::Typename) {
- TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
- if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
- TagLocation, Id)) {
- SemaRef.Diag(TagLocation, diag::err_use_with_wrong_tag)
- << Id
- << FixItHint::CreateReplacement(SourceRange(TagLocation),
- TD->getKindName());
- SemaRef.Diag(TD->getLocation(), diag::note_previous_use);
- }
- }
- }
-
- return inherited::RebuildElaboratedType(KeywordLoc, Keyword, QualifierLoc, T);
-}
-
TemplateName TemplateInstantiator::TransformTemplateName(
- CXXScopeSpec &SS, TemplateName Name, SourceLocation NameLoc,
- QualType ObjectType, NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc, QualType ObjectType,
+ NamedDecl *FirstQualifierInScope, bool AllowInjectedClassName) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())) {
if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
@@ -2163,6 +2129,12 @@ TemplateName TemplateInstantiator::TransformTemplateName(
TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && "Null template template argument");
+ if (NestedNameSpecifier Qualifier = Template.getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(SemaRef.Context, Qualifier, NameLoc);
+ QualifierLoc = Builder.getWithLocInContext(SemaRef.Context);
+ }
+
return getSema().Context.getSubstTemplateTemplateParm(
Template, AssociatedDecl, TTP->getIndex(), PackIndex, Final);
}
@@ -2181,9 +2153,9 @@ TemplateName TemplateInstantiator::TransformTemplateName(
getPackIndex(Pack), SubstPack->getFinal());
}
- return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
- FirstQualifierInScope,
- AllowInjectedClassName);
+ return inherited::TransformTemplateName(
+ QualifierLoc, TemplateKWLoc, Name, NameLoc, ObjectType,
+ FirstQualifierInScope, AllowInjectedClassName);
}
ExprResult
@@ -3159,10 +3131,6 @@ namespace {
// Only these types can contain 'auto' types, and subsequently be replaced
// by references to invented parameters.
- TemplateTypeParmDecl *VisitElaboratedType(const ElaboratedType *T) {
- return Visit(T->getNamedType());
- }
-
TemplateTypeParmDecl *VisitPointerType(const PointerType *T) {
return Visit(T->getPointeeType());
}
@@ -4557,14 +4525,14 @@ Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
}
TemplateName
-Sema::SubstTemplateName(NestedNameSpecifierLoc QualifierLoc,
- TemplateName Name, SourceLocation Loc,
+Sema::SubstTemplateName(SourceLocation TemplateKWLoc,
+ NestedNameSpecifierLoc &QualifierLoc, TemplateName Name,
+ SourceLocation NameLoc,
const MultiLevelTemplateArgumentList &TemplateArgs) {
- TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ TemplateInstantiator Instantiator(*this, TemplateArgs, NameLoc,
DeclarationName());
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
- return Instantiator.TransformTemplateName(SS, Name, Loc);
+ return Instantiator.TransformTemplateName(QualifierLoc, TemplateKWLoc, Name,
+ NameLoc);
}
static const Decl *getCanonicalParmVarDecl(const Decl *D) {
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 87ec4f7..f02a295 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -1483,9 +1483,9 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
// If the old typedef was the name for linkage purposes of an anonymous
// tag decl, re-establish that relationship for the new typedef.
if (const TagType *oldTagType = D->getUnderlyingType()->getAs<TagType>()) {
- TagDecl *oldTag = oldTagType->getDecl();
+ TagDecl *oldTag = oldTagType->getOriginalDecl();
if (oldTag->getTypedefNameForAnonDecl() == D && !Invalid) {
- TagDecl *newTag = DI->getType()->castAs<TagType>()->getDecl();
+ TagDecl *newTag = DI->getType()->castAs<TagType>()->getOriginalDecl();
assert(!newTag->hasNameForLinkage());
newTag->setTypedefNameForAnonDecl(Typedef);
}
@@ -2250,8 +2250,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
CXXRecordDecl *RecordInst = CXXRecordDecl::Create(
SemaRef.Context, Pattern->getTagKind(), DC, Pattern->getBeginLoc(),
- Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl,
- /*DelayTypeCreation=*/true);
+ Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl);
if (QualifierLoc)
RecordInst->setQualifierInfo(QualifierLoc);
@@ -2271,8 +2270,6 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (PrevClassTemplate) {
Inst->setCommonPtr(PrevClassTemplate->getCommonPtr());
- RecordInst->setTypeForDecl(
- PrevClassTemplate->getTemplatedDecl()->getTypeForDecl());
const ClassTemplateDecl *MostRecentPrevCT =
PrevClassTemplate->getMostRecentDecl();
TemplateParameterList *PrevParams =
@@ -2306,10 +2303,6 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Inst->setPreviousDecl(PrevClassTemplate);
- // Trigger creation of the type for the instantiation.
- SemaRef.Context.getInjectedClassNameType(
- RecordInst, Inst->getInjectedClassNameSpecialization());
-
// Finish handling of friends.
if (isFriend) {
DC->makeDeclVisibleInContext(Inst);
@@ -2515,11 +2508,9 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
else
Record = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
D->getBeginLoc(), D->getLocation(),
- D->getIdentifier(), PrevDecl,
- /*DelayTypeCreation=*/IsInjectedClassName);
- // Link the type of the injected-class-name to that of the outer class.
- if (IsInjectedClassName)
- (void)SemaRef.Context.getTypeDeclType(Record, cast<CXXRecordDecl>(Owner));
+ D->getIdentifier(), PrevDecl);
+
+ Record->setImplicit(D->isImplicit());
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Record))
@@ -2528,7 +2519,6 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
SemaRef.InstantiateAttrsForDecl(TemplateArgs, D, Record, LateAttrs,
StartingScope);
- Record->setImplicit(D->isImplicit());
// FIXME: Check against AS_none is an ugly hack to work around the issue that
// the tag decls introduced by friend class declarations don't have an access
// specifier. Remove once this area of the code gets sorted out.
@@ -3164,8 +3154,8 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
Method->setIneligibleOrNotSelected(true);
Method->setRangeEnd(Destructor->getEndLoc());
Method->setDeclName(SemaRef.Context.DeclarationNames.getCXXDestructorName(
- SemaRef.Context.getCanonicalType(
- SemaRef.Context.getTypeDeclType(Record))));
+
+ SemaRef.Context.getCanonicalTagType(Record)));
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
Method = CXXConversionDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
@@ -3793,19 +3783,18 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
D->getPosition(), D->isParameterPack(), D->getIdentifier(),
D->templateParameterKind(), D->wasDeclaredWithTypename(), InstParams);
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
- NestedNameSpecifierLoc QualifierLoc =
- D->getDefaultArgument().getTemplateQualifierLoc();
- QualifierLoc =
- SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs);
+ const TemplateArgumentLoc &A = D->getDefaultArgument();
+ NestedNameSpecifierLoc QualifierLoc = A.getTemplateQualifierLoc();
+ // FIXME: Pass in the template keyword location.
TemplateName TName = SemaRef.SubstTemplateName(
- QualifierLoc, D->getDefaultArgument().getArgument().getAsTemplate(),
- D->getDefaultArgument().getTemplateNameLoc(), TemplateArgs);
+ A.getTemplateKWLoc(), QualifierLoc, A.getArgument().getAsTemplate(),
+ A.getTemplateNameLoc(), TemplateArgs);
if (!TName.isNull())
Param->setDefaultArgument(
SemaRef.Context,
TemplateArgumentLoc(SemaRef.Context, TemplateArgument(TName),
- D->getDefaultArgument().getTemplateQualifierLoc(),
- D->getDefaultArgument().getTemplateNameLoc()));
+ A.getTemplateKWLoc(), QualifierLoc,
+ A.getTemplateNameLoc()));
}
Param->setAccess(AS_public);
Param->setImplicit(D->isImplicit());
@@ -3907,7 +3896,7 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName)
if (auto *RD = dyn_cast<CXXRecordDecl>(SemaRef.CurContext))
NameInfo.setName(SemaRef.Context.DeclarationNames.getCXXConstructorName(
- SemaRef.Context.getCanonicalType(SemaRef.Context.getRecordType(RD))));
+ SemaRef.Context.getCanonicalTagType(RD)));
// We only need to do redeclaration lookups if we're in a class scope (in
// fact, it's not really even possible in non-class scopes).
@@ -4819,18 +4808,13 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
ClassTemplate->findPartialSpecialization(CTAI.CanonicalConverted,
InstParams, InsertPos);
- // Build the type that describes the converted template arguments of the class
- // template partial specialization.
- TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo(
- TemplateName(ClassTemplate), TemplArgInfo->getLAngleLoc(),
- InstTemplateArgs, CTAI.CanonicalConverted);
-
// Create the class template partial specialization declaration.
ClassTemplatePartialSpecializationDecl *InstPartialSpec =
ClassTemplatePartialSpecializationDecl::Create(
SemaRef.Context, PartialSpec->getTagKind(), Owner,
PartialSpec->getBeginLoc(), PartialSpec->getLocation(), InstParams,
- ClassTemplate, CTAI.CanonicalConverted, WrittenTy->getType(),
+ ClassTemplate, CTAI.CanonicalConverted,
+ /*CanonInjectedTST=*/CanQualType(),
/*PrevDecl=*/nullptr);
InstPartialSpec->setTemplateArgsAsWritten(InstTemplateArgs);
@@ -4861,7 +4845,7 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
diag::err_partial_spec_redeclared)
<< InstPartialSpec;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here)
- << SemaRef.Context.getTypeDeclType(PrevDecl);
+ << SemaRef.Context.getCanonicalTagType(PrevDecl);
return nullptr;
}
@@ -5750,15 +5734,19 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
QualType TransformRecordType(TypeLocBuilder &TLB, RecordTypeLoc TL) {
const RecordType *T = TL.getTypePtr();
RecordDecl *Record = cast_or_null<RecordDecl>(
- getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
+ getDerived().TransformDecl(TL.getNameLoc(), T->getOriginalDecl()));
if (Record != OldDecl)
return Base::TransformRecordType(TLB, TL);
- QualType Result = getDerived().RebuildRecordType(NewDecl);
+ // FIXME: transform the rest of the record type.
+ QualType Result = getDerived().RebuildTagType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt, NewDecl);
if (Result.isNull())
return QualType();
- RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ TagTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(SourceLocation());
+ NewTL.setQualifierLoc(NestedNameSpecifierLoc());
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
@@ -6922,7 +6910,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
Args.addArgument(
getTrivialTemplateArgumentLoc(UnpackedArg, QualType(), Loc));
}
- QualType T = CheckTemplateIdType(TemplateName(TD), Loc, Args);
+ QualType T = CheckTemplateIdType(ElaboratedTypeKeyword::None,
+ TemplateName(TD), Loc, Args);
// We may get a non-null type with errors, in which case
// `getAsCXXRecordDecl` will return `nullptr`. For instance, this
// happens when one of the template arguments is an invalid
@@ -6988,16 +6977,17 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
bool IsBeingInstantiated = false;
if (CXXRecordDecl *Spec = dyn_cast<CXXRecordDecl>(ParentDC)) {
if (!Spec->isDependentContext()) {
- QualType T = Context.getTypeDeclType(Spec);
+ CanQualType T = Context.getCanonicalTagType(Spec);
const RecordType *Tag = T->getAs<RecordType>();
assert(Tag && "type of non-dependent record is not a RecordType");
- if (Tag->isBeingDefined())
+ auto *TagDecl =
+ cast<CXXRecordDecl>(Tag->getOriginalDecl())->getDefinitionOrSelf();
+ if (TagDecl->isBeingDefined())
IsBeingInstantiated = true;
- if (!Tag->isBeingDefined() &&
- RequireCompleteType(Loc, T, diag::err_incomplete_type))
+ else if (RequireCompleteType(Loc, T, diag::err_incomplete_type))
return nullptr;
- ParentDC = Tag->getDecl();
+ ParentDC = TagDecl;
}
}
@@ -7043,8 +7033,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// of member classes, and introduces ordering dependencies via
// template instantiation.
Diag(Loc, diag::err_member_not_yet_instantiated)
- << D->getDeclName()
- << Context.getTypeDeclType(cast<CXXRecordDecl>(ParentDC));
+ << D->getDeclName()
+ << Context.getCanonicalTagType(cast<CXXRecordDecl>(ParentDC));
Diag(D->getLocation(), diag::note_non_instantiated_member_here);
} else if (EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
// This enumeration constant was found when the template was defined,
@@ -7059,7 +7049,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
<< D->getDeclName()
<< Context.getTypeDeclType(cast<TypeDecl>(Spec->getDeclContext()));
Diag(Spec->getLocation(), diag::note_enum_specialized_here)
- << Context.getTypeDeclType(Spec);
+ << Context.getCanonicalTagType(Spec);
} else {
// We should have found something, but didn't.
llvm_unreachable("Unable to find instantiation of declaration!");
diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp
index d2baa2e..377f2ed 100644
--- a/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -155,21 +155,22 @@ class CollectUnexpandedParameterPacksVisitor
/// Suppress traversal into types that do not contain
/// unexpanded parameter packs.
- bool TraverseType(QualType T) override {
+ bool TraverseType(QualType T, bool TraverseQualifier = true) override {
if ((!T.isNull() && T->containsUnexpandedParameterPack()) ||
InLambdaOrBlock)
- return DynamicRecursiveASTVisitor::TraverseType(T);
+ return DynamicRecursiveASTVisitor::TraverseType(T, TraverseQualifier);
return true;
}
/// Suppress traversal into types with location information
/// that do not contain unexpanded parameter packs.
- bool TraverseTypeLoc(TypeLoc TL) override {
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) override {
if ((!TL.getType().isNull() &&
TL.getType()->containsUnexpandedParameterPack()) ||
InLambdaOrBlock)
- return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL);
+ return DynamicRecursiveASTVisitor::TraverseTypeLoc(TL,
+ TraverseQualifier);
return true;
}
@@ -195,10 +196,12 @@ class CollectUnexpandedParameterPacksVisitor
/// Suppress traversal of pack expansion expressions and types.
///@{
- bool TraversePackExpansionType(PackExpansionType *T) override {
+ bool TraversePackExpansionType(PackExpansionType *T,
+ bool TraverseQualifier) override {
return true;
}
- bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL) override {
+ bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL,
+ bool TraverseQualifier) override {
return true;
}
bool TraversePackExpansionExpr(PackExpansionExpr *E) override {
@@ -208,10 +211,12 @@ class CollectUnexpandedParameterPacksVisitor
bool TraversePackIndexingExpr(PackIndexingExpr *E) override {
return DynamicRecursiveASTVisitor::TraverseStmt(E->getIndexExpr());
}
- bool TraversePackIndexingType(PackIndexingType *E) override {
+ bool TraversePackIndexingType(PackIndexingType *E,
+ bool TraverseQualifier) override {
return DynamicRecursiveASTVisitor::TraverseStmt(E->getIndexExpr());
}
- bool TraversePackIndexingTypeLoc(PackIndexingTypeLoc TL) override {
+ bool TraversePackIndexingTypeLoc(PackIndexingTypeLoc TL,
+ bool TraverseQualifier) override {
return DynamicRecursiveASTVisitor::TraverseStmt(TL.getIndexExpr());
}
@@ -525,8 +530,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
// C++0x [temp.variadic]p5:
// An appearance of a name of a parameter pack that is not expanded is
// ill-formed.
- if (!SS.getScopeRep() ||
- !SS.getScopeRep()->containsUnexpandedParameterPack())
+ if (!SS.getScopeRep().containsUnexpandedParameterPack())
return false;
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
@@ -654,7 +658,7 @@ Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
return ParsedTemplateArgument();
return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(),
- Arg.getLocation());
+ Arg.getNameLoc());
}
case ParsedTemplateArgument::NonType: {
@@ -663,12 +667,12 @@ Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
return ParsedTemplateArgument();
return ParsedTemplateArgument(Arg.getKind(), Result.get(),
- Arg.getLocation());
+ Arg.getNameLoc());
}
case ParsedTemplateArgument::Template:
if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) {
- SourceRange R(Arg.getLocation());
+ SourceRange R(Arg.getNameLoc());
if (Arg.getScopeSpec().isValid())
R.setBegin(Arg.getScopeSpec().getBeginLoc());
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
@@ -1115,8 +1119,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
break;
case DeclaratorChunk::MemberPointer:
- if (Chunk.Mem.Scope().getScopeRep() &&
- Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack())
+ if (Chunk.Mem.Scope().getScopeRep().containsUnexpandedParameterPack())
return true;
break;
}
@@ -1300,9 +1303,9 @@ TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern(
case TemplateArgument::TemplateExpansion:
Ellipsis = OrigLoc.getTemplateEllipsisLoc();
NumExpansions = Argument.getNumTemplateExpansions();
- return TemplateArgumentLoc(Context, Argument.getPackExpansionPattern(),
- OrigLoc.getTemplateQualifierLoc(),
- OrigLoc.getTemplateNameLoc());
+ return TemplateArgumentLoc(
+ Context, Argument.getPackExpansionPattern(), OrigLoc.getTemplateKWLoc(),
+ OrigLoc.getTemplateQualifierLoc(), OrigLoc.getTemplateNameLoc());
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 1289bed..0985b5b 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -1211,14 +1211,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
DS.getTypeSpecSign() == TypeSpecifierSign::Unspecified &&
"No qualifiers on tag names!");
+ ElaboratedTypeKeyword Keyword =
+ KeywordHelpers::getKeywordForTypeSpec(DS.getTypeSpecType());
// TypeQuals handled by caller.
- Result = Context.getTypeDeclType(D);
-
- // In both C and C++, make an ElaboratedType.
- ElaboratedTypeKeyword Keyword
- = ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
- Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result,
- DS.isTypeSpecOwned() ? D : nullptr);
+ Result = Context.getTagType(Keyword, DS.getTypeSpecScope().getScopeRep(), D,
+ DS.isTypeSpecOwned());
break;
}
case DeclSpec::TST_typename: {
@@ -1241,7 +1238,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
assert(!Result.isNull() && "Didn't get a type for typeof?");
if (!Result->isDependentType())
if (const TagType *TT = Result->getAs<TagType>())
- S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
+ S.DiagnoseUseOfDecl(TT->getOriginalDecl(), DS.getTypeSpecTypeLoc());
// TypeQuals handled by caller.
Result = Context.getTypeOfType(
Result, DS.getTypeSpecType() == DeclSpec::TST_typeof_unqualType
@@ -2085,7 +2082,7 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
// an inheritance model, even if it's inside an unused typedef.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
- if (!MPTy->getQualifier()->isDependent())
+ if (!MPTy->getQualifier().isDependent())
(void)isCompleteType(Loc, T);
} else {
@@ -2120,7 +2117,9 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
if (const RecordType *EltTy = T->getAs<RecordType>()) {
// If the element type is a struct or union that contains a variadic
// array, accept it as a GNU extension: C99 6.7.2.1p2.
- if (EltTy->getDecl()->hasFlexibleArrayMember())
+ if (EltTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember())
Diag(Loc, diag::ext_flexible_array_in_array) << T;
} else if (T->isObjCObjectType()) {
Diag(Loc, diag::err_objc_array_of_interfaces) << T;
@@ -3460,7 +3459,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
if (DiagID != 0) {
SemaRef.Diag(OwnedTagDecl->getLocation(), DiagID)
- << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ << SemaRef.Context.getCanonicalTagType(OwnedTagDecl);
D.setInvalidType(true);
}
}
@@ -3653,11 +3652,22 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
// here: even (e.g.) "int ::x" is visually ambiguous even though it's
// formally unambiguous.
if (StartsWithDeclaratorId && D.getCXXScopeSpec().isValid()) {
- for (NestedNameSpecifier *NNS = D.getCXXScopeSpec().getScopeRep(); NNS;
- NNS = NNS->getPrefix()) {
- if (NNS->getKind() == NestedNameSpecifier::Global)
+ NestedNameSpecifier NNS = D.getCXXScopeSpec().getScopeRep();
+ for (;;) {
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Global:
return;
+ case NestedNameSpecifier::Kind::Type:
+ NNS = NNS.getAsType()->getPrefix();
+ continue;
+ case NestedNameSpecifier::Kind::Namespace:
+ NNS = NNS.getAsNamespaceAndPrefix().Prefix;
+ continue;
+ default:
+ goto out;
+ }
}
+ out:;
}
S.Diag(Paren.Loc, diag::warn_redundant_parens_around_declarator)
@@ -3965,7 +3975,8 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
return PointerDeclaratorKind::NonPointer;
if (auto recordType = type->getAs<RecordType>()) {
- RecordDecl *recordDecl = recordType->getDecl();
+ RecordDecl *recordDecl =
+ recordType->getOriginalDecl()->getDefinitionOrSelf();
// If this is CFErrorRef*, report it as such.
if (numNormalPointers == 2 && numTypeSpecifierPointers < 2 &&
@@ -5101,7 +5112,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Types shall not be defined in return or parameter types.
TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
- << Context.getTypeDeclType(Tag);
+ << Context.getCanonicalTagType(Tag);
}
// Exception specs are not allowed in typedefs. Complain, but add it
@@ -5319,7 +5330,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
state.getDeclarator()
.getCXXScopeSpec()
.getScopeRep()
- ->getKind() == NestedNameSpecifier::TypeSpec) ||
+ .getKind() == NestedNameSpecifier::Kind::Type) ||
state.getDeclarator().getContext() ==
DeclaratorContext::Member ||
state.getDeclarator().getContext() ==
@@ -5898,7 +5909,49 @@ namespace {
// int __attr * __attr * __attr *p;
void VisitPointerTypeLoc(PointerTypeLoc TL) { Visit(TL.getNextTypeLoc()); }
void VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<TypedefTypeLoc>());
+ return;
+ }
+ }
+ TL.set(TL.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation(),
+ DS.getTypeSpecScope().getWithLocInContext(Context),
+ DS.getTypeSpecTypeNameLoc());
+ }
+ void VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<UnresolvedUsingTypeLoc>());
+ return;
+ }
+ }
+ TL.set(TL.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation(),
+ DS.getTypeSpecScope().getWithLocInContext(Context),
+ DS.getTypeSpecTypeNameLoc());
+ }
+ void VisitUsingTypeLoc(UsingTypeLoc TL) {
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<UsingTypeLoc>());
+ return;
+ }
+ }
+ TL.set(TL.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation(),
+ DS.getTypeSpecScope().getWithLocInContext(Context),
+ DS.getTypeSpecTypeNameLoc());
}
void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
@@ -5929,16 +5982,9 @@ namespace {
}
TypeLoc OldTL = TInfo->getTypeLoc();
- if (TInfo->getType()->getAs<ElaboratedType>()) {
- ElaboratedTypeLoc ElabTL = OldTL.castAs<ElaboratedTypeLoc>();
- TemplateSpecializationTypeLoc NamedTL = ElabTL.getNamedTypeLoc()
- .castAs<TemplateSpecializationTypeLoc>();
- TL.copy(NamedTL);
- } else {
- TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>());
- assert(TL.getRAngleLoc() == OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc());
- }
-
+ TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>());
+ assert(TL.getRAngleLoc() ==
+ OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc());
}
void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr ||
@@ -5987,24 +6033,6 @@ namespace {
TL.expandBuiltinRange(DS.getTypeSpecWidthRange());
}
}
- void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- if (DS.getTypeSpecType() == TST_typename) {
- TypeSourceInfo *TInfo = nullptr;
- Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
- if (TInfo)
- if (auto ETL = TInfo->getTypeLoc().getAs<ElaboratedTypeLoc>()) {
- TL.copy(ETL);
- return;
- }
- }
- const ElaboratedType *T = TL.getTypePtr();
- TL.setElaboratedKeywordLoc(T->getKeyword() != ElaboratedTypeKeyword::None
- ? DS.getTypeSpecTypeLoc()
- : SourceLocation());
- const CXXScopeSpec& SS = DS.getTypeSpecScope();
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
- Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
- }
void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
assert(DS.getTypeSpecType() == TST_typename);
TypeSourceInfo *TInfo = nullptr;
@@ -6063,7 +6091,29 @@ namespace {
ASTTemplateArgumentListInfo::Create(Context, TemplateArgsInfo));
TL.setConceptReference(CR);
}
+ void VisitDeducedTemplateSpecializationTypeLoc(
+ DeducedTemplateSpecializationTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(
+ TInfo->getTypeLoc().castAs<DeducedTemplateSpecializationTypeLoc>());
+ }
void VisitTagTypeLoc(TagTypeLoc TL) {
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = nullptr;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(TInfo->getTypeLoc().castAs<TagTypeLoc>());
+ return;
+ }
+ }
+ TL.setElaboratedKeywordLoc(TL.getTypePtr()->getKeyword() !=
+ ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
+ TL.setQualifierLoc(DS.getTypeSpecScope().getWithLocInContext(Context));
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
}
void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
@@ -7029,9 +7079,6 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
if (const TypedefType *TT = dyn_cast<TypedefType>(Desugared)) {
Desugared = TT->desugar();
continue;
- } else if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Desugared)) {
- Desugared = ET->desugar();
- continue;
}
const AttributedType *AT = dyn_cast<AttributedType>(Desugared);
if (!AT)
@@ -9170,11 +9217,9 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
if (RequireCompleteTypeImpl(Loc, T, Kind, &Diagnoser))
return true;
- if (const TagType *Tag = T->getAs<TagType>()) {
- if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
- Tag->getDecl()->setCompleteDefinitionRequired();
- Consumer.HandleTagDeclRequiredDefinition(Tag->getDecl());
- }
+ if (auto *TD = T->getAsTagDecl(); TD && !TD->isCompleteDefinitionRequired()) {
+ TD->setCompleteDefinitionRequired();
+ Consumer.HandleTagDeclRequiredDefinition(TD);
}
return false;
}
@@ -9312,7 +9357,7 @@ bool Sema::hasReachableDefinition(NamedDecl *D, NamedDecl **Suggested,
/// Locks in the inheritance model for the given class and all of its bases.
static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
- RD = RD->getMostRecentNonInjectedDecl();
+ RD = RD->getMostRecentDecl();
if (!RD->hasAttr<MSInheritanceAttr>()) {
MSInheritanceModel IM;
bool BestCase = false;
@@ -9352,10 +9397,10 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// assert(!T->isDependentType() &&
// "Can't ask whether a dependent type is complete");
- if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
+ if (const auto *MPTy = dyn_cast<MemberPointerType>(T.getCanonicalType())) {
if (CXXRecordDecl *RD = MPTy->getMostRecentCXXRecordDecl();
RD && !RD->isDependentType()) {
- QualType T = Context.getTypeDeclType(RD);
+ CanQualType T = Context.getCanonicalTagType(RD);
if (getLangOpts().CompleteMemberPointers && !RD->isBeingDefined() &&
RequireCompleteType(Loc, T, Kind, diag::err_memptr_incomplete))
return true;
@@ -9492,10 +9537,10 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
if (Tag && !Tag->isInvalidDecl() && !Tag->getLocation().isInvalid())
- Diag(Tag->getLocation(),
- Tag->isBeingDefined() ? diag::note_type_being_defined
- : diag::note_forward_declaration)
- << Context.getTagDeclType(Tag);
+ Diag(Tag->getLocation(), Tag->isBeingDefined()
+ ? diag::note_type_being_defined
+ : diag::note_forward_declaration)
+ << Context.getCanonicalTagType(Tag);
// If the Objective-C class was a forward declaration, produce a note.
if (IFace && !IFace->isInvalidDecl() && !IFace->getLocation().isInvalid())
@@ -9550,7 +9595,8 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (!RT)
return true;
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
// A partially-defined class type can't be a literal type, because a literal
// class type must have a trivial destructor (which can't be checked until
@@ -9628,15 +9674,6 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireLiteralType(Loc, T, Diagnoser);
}
-QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
- const CXXScopeSpec &SS, QualType T,
- TagDecl *OwnedTagDecl) {
- if (T.isNull())
- return T;
- return Context.getElaboratedType(
- Keyword, SS.isValid() ? SS.getScopeRep() : nullptr, T, OwnedTagDecl);
-}
-
QualType Sema::BuildTypeofExprType(Expr *E, TypeOfKind Kind) {
assert(!E->hasPlaceholderType() && "unexpected placeholder");
@@ -9647,7 +9684,7 @@ QualType Sema::BuildTypeofExprType(Expr *E, TypeOfKind Kind) {
if (!E->isTypeDependent()) {
QualType T = E->getType();
if (const TagType *TT = T->getAs<TagType>())
- DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
+ DiagnoseUseOfDecl(TT->getOriginalDecl(), E->getExprLoc());
}
return Context.getTypeOfExprType(E, Kind);
}
@@ -9813,8 +9850,7 @@ QualType Sema::BuildPackIndexingType(QualType Pattern, Expr *IndexExpr,
static QualType GetEnumUnderlyingType(Sema &S, QualType BaseType,
SourceLocation Loc) {
assert(BaseType->isEnumeralType());
- EnumDecl *ED = BaseType->castAs<EnumType>()->getDecl();
- assert(ED && "EnumType has no EnumDecl");
+ EnumDecl *ED = BaseType->castAs<EnumType>()->getOriginalDecl();
S.DiagnoseUseOfDecl(ED, Loc);
diff --git a/clang/lib/Sema/SemaTypeTraits.cpp b/clang/lib/Sema/SemaTypeTraits.cpp
index 1d8687e..9b9dd17 100644
--- a/clang/lib/Sema/SemaTypeTraits.cpp
+++ b/clang/lib/Sema/SemaTypeTraits.cpp
@@ -32,8 +32,7 @@ static CXXMethodDecl *LookupSpecialMemberFromXValue(Sema &SemaRef,
RD = RD->getDefinition();
SourceLocation LookupLoc = RD->getLocation();
- CanQualType CanTy = SemaRef.getASTContext().getCanonicalType(
- SemaRef.getASTContext().getTagDeclType(RD));
+ CanQualType CanTy = SemaRef.getASTContext().getCanonicalTagType(RD);
DeclarationName Name;
Expr *Arg = nullptr;
unsigned NumArgs;
@@ -562,7 +561,8 @@ static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
bool (CXXRecordDecl::*HasTrivial)() const,
bool (CXXRecordDecl::*HasNonTrivial)() const,
bool (CXXMethodDecl::*IsDesiredOp)() const) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if ((RD->*HasTrivial)() && !(RD->*HasNonTrivial)())
return true;
@@ -599,6 +599,7 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
if (Decl->isLambda())
return Decl->isCapturelessLambda();
+ CanQualType T = S.Context.getCanonicalTagType(Decl);
{
EnterExpressionEvaluationContext UnevaluatedContext(
S, Sema::ExpressionEvaluationContext::Unevaluated);
@@ -606,10 +607,7 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
// const ClassT& obj;
- OpaqueValueExpr Operand(
- KeyLoc,
- Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(),
- ExprValueKind::VK_LValue);
+ OpaqueValueExpr Operand(KeyLoc, T.withConst(), ExprValueKind::VK_LValue);
UnresolvedSet<16> Functions;
// obj == obj;
S.LookupBinOp(S.TUScope, {}, BinaryOperatorKind::BO_EQ, Functions);
@@ -628,8 +626,7 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
return false;
if (!ParamT->isReferenceType() && !Decl->isTriviallyCopyable())
return false;
- if (ParamT.getNonReferenceType()->getUnqualifiedDesugaredType() !=
- Decl->getTypeForDecl())
+ if (!S.Context.hasSameUnqualifiedType(ParamT.getNonReferenceType(), T))
return false;
}
@@ -1613,9 +1610,9 @@ bool Sema::BuiltinIsBaseOf(SourceLocation RhsTLoc, QualType LhsT,
// Unions are never base classes, and never have base classes.
// It doesn't matter if they are complete or not. See PR#41843
- if (lhsRecord && lhsRecord->getDecl()->isUnion())
+ if (lhsRecord && lhsRecord->getOriginalDecl()->isUnion())
return false;
- if (rhsRecord && rhsRecord->getDecl()->isUnion())
+ if (rhsRecord && rhsRecord->getOriginalDecl()->isUnion())
return false;
if (lhsRecord == rhsRecord)
@@ -1629,8 +1626,8 @@ bool Sema::BuiltinIsBaseOf(SourceLocation RhsTLoc, QualType LhsT,
diag::err_incomplete_type_used_in_type_trait_expr))
return false;
- return cast<CXXRecordDecl>(rhsRecord->getDecl())
- ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getDecl()));
+ return cast<CXXRecordDecl>(rhsRecord->getOriginalDecl())
+ ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getOriginalDecl()));
}
static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
@@ -1670,8 +1667,9 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT,
diag::err_incomplete_type))
return false;
- return cast<CXXRecordDecl>(DerivedRecord->getDecl())
- ->isVirtuallyDerivedFrom(cast<CXXRecordDecl>(BaseRecord->getDecl()));
+ return cast<CXXRecordDecl>(DerivedRecord->getOriginalDecl())
+ ->isVirtuallyDerivedFrom(
+ cast<CXXRecordDecl>(BaseRecord->getOriginalDecl()));
}
case BTT_IsSame:
return Self.Context.hasSameType(LhsT, RhsT);
@@ -2018,11 +2016,10 @@ static ExtractedTypeTraitInfo ExtractTypeTraitFromExpression(const Expr *E) {
// std::is_xxx<>::value
if (const auto *VD = dyn_cast<VarDecl>(Ref->getDecl());
Ref->hasQualifier() && VD && VD->getIdentifier()->isStr("value")) {
- const Type *T = Ref->getQualifier()->getAsType();
- if (!T)
+ NestedNameSpecifier Qualifier = Ref->getQualifier();
+ if (Qualifier.getKind() != NestedNameSpecifier::Kind::Type)
return std::nullopt;
- const TemplateSpecializationType *Ts =
- T->getAs<TemplateSpecializationType>();
+ const auto *Ts = Qualifier.getAsType()->getAs<TemplateSpecializationType>();
if (!Ts)
return std::nullopt;
const TemplateDecl *D = Ts->getTemplateName().getAsTemplateDecl();
diff --git a/clang/lib/Sema/SemaWasm.cpp b/clang/lib/Sema/SemaWasm.cpp
index 8998492..e773113 100644
--- a/clang/lib/Sema/SemaWasm.cpp
+++ b/clang/lib/Sema/SemaWasm.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Attr.h"
#include "clang/Sema/Sema.h"
@@ -227,7 +228,8 @@ bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) {
return false;
}
-bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) {
+bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(const TargetInfo &TI,
+ CallExpr *TheCall) {
if (SemaRef.checkArgCount(TheCall, 1))
return true;
@@ -250,27 +252,31 @@ bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) {
<< ArgType << FuncPtrArg->getSourceRange();
}
- // Check that the function pointer doesn't use reference types
- if (FuncTy->getReturnType().isWebAssemblyReferenceType()) {
- return Diag(
- FuncPtrArg->getBeginLoc(),
- diag::err_wasm_builtin_test_fp_sig_cannot_include_reference_type)
- << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange();
- }
- auto NParams = FuncTy->getNumParams();
- for (unsigned I = 0; I < NParams; I++) {
- if (FuncTy->getParamType(I).isWebAssemblyReferenceType()) {
+ if (TI.getABI() == "experimental-mv") {
+ auto isStructOrUnion = [](QualType T) {
+ return T->isUnionType() || T->isStructureType();
+ };
+ if (isStructOrUnion(FuncTy->getReturnType())) {
return Diag(
FuncPtrArg->getBeginLoc(),
diag::
- err_wasm_builtin_test_fp_sig_cannot_include_reference_type)
- << 1 << FuncPtrArg->getSourceRange();
+ err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union)
+ << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange();
+ }
+ auto NParams = FuncTy->getNumParams();
+ for (unsigned I = 0; I < NParams; I++) {
+ if (isStructOrUnion(FuncTy->getParamType(I))) {
+ return Diag(
+ FuncPtrArg->getBeginLoc(),
+ diag::
+ err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union)
+ << 1 << FuncPtrArg->getSourceRange();
+ }
}
}
// Set return type to int (the result of the test)
TheCall->setType(getASTContext().IntTy);
-
return false;
}
@@ -297,7 +303,7 @@ bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
case WebAssembly::BI__builtin_wasm_table_copy:
return BuiltinWasmTableCopy(TheCall);
case WebAssembly::BI__builtin_wasm_test_function_pointer_signature:
- return BuiltinWasmTestFunctionPointerSignature(TheCall);
+ return BuiltinWasmTestFunctionPointerSignature(TI, TheCall);
}
return false;
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 0030946..1863e7f 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -591,12 +591,12 @@ public:
/// By default, transforms the template name by transforming the declarations
/// and nested-name-specifiers that occur within the template name.
/// Subclasses may override this function to provide alternate behavior.
- TemplateName
- TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
- SourceLocation NameLoc,
- QualType ObjectType = QualType(),
- NamedDecl *FirstQualifierInScope = nullptr,
- bool AllowInjectedClassName = false);
+ TemplateName TransformTemplateName(NestedNameSpecifierLoc &QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = nullptr,
+ bool AllowInjectedClassName = false);
/// Transform the given template argument.
///
@@ -610,9 +610,9 @@ public:
TemplateArgumentLoc &Output,
bool Uneval = false);
- TemplateArgument
- TransformNamedTemplateTemplateArgument(CXXScopeSpec &SS, TemplateName Name,
- SourceLocation NameLoc);
+ TemplateArgument TransformNamedTemplateTemplateArgument(
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKeywordLoc,
+ TemplateName Name, SourceLocation NameLoc);
/// Transform the given set of template arguments.
///
@@ -697,20 +697,17 @@ public:
StmtResult TransformSEHHandler(Stmt *Handler);
- QualType
- TransformTemplateSpecializationType(TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL,
- TemplateName Template);
-
- QualType
- TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL,
+ QualType TransformTemplateSpecializationType(TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
TemplateName Template,
CXXScopeSpec &SS);
QualType TransformDependentTemplateSpecializationType(
TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
- CXXScopeSpec &SS);
+ QualType ObjectType, NamedDecl *UnqualLookup,
+ bool AllowInjectedClassName);
+
+ QualType TransformTagType(TypeLocBuilder &TLB, TagTypeLoc TL);
/// Transforms the parameters of a function type into the
/// given vectors.
@@ -1020,16 +1017,22 @@ public:
/// Rebuild an unresolved typename type, given the decl that
/// the UnresolvedUsingTypenameDecl was transformed to.
- QualType RebuildUnresolvedUsingType(SourceLocation NameLoc, Decl *D);
+ QualType RebuildUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ SourceLocation NameLoc, Decl *D);
/// Build a new type found via an alias.
- QualType RebuildUsingType(UsingShadowDecl *Found, QualType Underlying) {
- return SemaRef.Context.getUsingType(Found, Underlying);
+ QualType RebuildUsingType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, UsingShadowDecl *D,
+ QualType UnderlyingType) {
+ return SemaRef.Context.getUsingType(Keyword, Qualifier, D, UnderlyingType);
}
/// Build a new typedef type.
- QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
- return SemaRef.Context.getTypeDeclType(Typedef);
+ QualType RebuildTypedefType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier,
+ TypedefNameDecl *Typedef) {
+ return SemaRef.Context.getTypedefType(Keyword, Qualifier, Typedef);
}
/// Build a new MacroDefined type.
@@ -1038,14 +1041,14 @@ public:
return SemaRef.Context.getMacroQualifiedType(T, MacroII);
}
- /// Build a new class/struct/union type.
- QualType RebuildRecordType(RecordDecl *Record) {
- return SemaRef.Context.getTypeDeclType(Record);
+ /// Build a new class/struct/union/enum type.
+ QualType RebuildTagType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier Qualifier, TagDecl *Tag) {
+ return SemaRef.Context.getTagType(Keyword, Qualifier, Tag,
+ /*OwnsTag=*/false);
}
-
- /// Build a new Enum type.
- QualType RebuildEnumType(EnumDecl *Enum) {
- return SemaRef.Context.getTypeDeclType(Enum);
+ QualType RebuildCanonicalTagType(TagDecl *Tag) {
+ return SemaRef.Context.getCanonicalTagType(Tag);
}
/// Build a new typeof(expr) type.
@@ -1094,10 +1097,10 @@ public:
/// By default, builds a new DeducedTemplateSpecializationType with the given
/// deduced type.
- QualType RebuildDeducedTemplateSpecializationType(TemplateName Template,
- QualType Deduced) {
+ QualType RebuildDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword, TemplateName Template, QualType Deduced) {
return SemaRef.Context.getDeducedTemplateSpecializationType(
- Template, Deduced, /*IsDependent*/ false);
+ Keyword, Template, Deduced, /*IsDependent*/ false);
}
/// Build a new template specialization type.
@@ -1105,7 +1108,8 @@ public:
/// By default, performs semantic analysis when building the template
/// specialization type. Subclasses may override this routine to provide
/// different behavior.
- QualType RebuildTemplateSpecializationType(TemplateName Template,
+ QualType RebuildTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &Args);
@@ -1117,41 +1121,22 @@ public:
return SemaRef.BuildParenType(InnerType);
}
- /// Build a new qualified name type.
- ///
- /// By default, builds a new ElaboratedType type from the keyword,
- /// the nested-name-specifier and the named type.
- /// Subclasses may override this routine to provide different behavior.
- QualType RebuildElaboratedType(SourceLocation KeywordLoc,
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifierLoc QualifierLoc,
- QualType Named) {
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- Named);
- }
-
/// Build a new typename type that refers to a template-id.
///
/// By default, builds a new DependentNameType type from the
/// nested-name-specifier and the given type. Subclasses may override
/// this routine to provide different behavior.
QualType RebuildDependentTemplateSpecializationType(
- ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
- SourceLocation TemplateKWLoc, TemplateName Name, SourceLocation NameLoc,
- TemplateArgumentListInfo &Args, bool AllowInjectedClassName) {
+ ElaboratedTypeKeyword Keyword, SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc, TemplateArgumentListInfo &Args,
+ bool AllowInjectedClassName) {
// If it's still dependent, make a dependent specialization.
if (const DependentTemplateStorage *S = Name.getAsDependentTemplateName())
return SemaRef.Context.getDependentTemplateSpecializationType(
Keyword, *S, Args.arguments());
- // Otherwise, make an elaborated type wrapping a non-dependent
- // specialization.
- QualType T =
- getDerived().RebuildTemplateSpecializationType(Name, NameLoc, Args);
- if (T.isNull())
- return QualType();
- return SemaRef.Context.getElaboratedType(Keyword, NNS, T);
+ return getDerived().RebuildTemplateSpecializationType(Keyword, Name,
+ NameLoc, Args);
}
/// Build a new typename type that refers to an identifier.
@@ -1168,7 +1153,7 @@ public:
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- if (QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ if (QualifierLoc.getNestedNameSpecifier().isDependent()) {
// If the name is still dependent, just build a new dependent name type.
if (!SemaRef.computeDeclContext(SS))
return SemaRef.Context.getDependentNameType(Keyword,
@@ -1238,19 +1223,14 @@ public:
}
return QualType();
}
-
if (!SemaRef.isAcceptableTagRedeclaration(Tag, Kind, /*isDefinition*/false,
IdLoc, Id)) {
SemaRef.Diag(KeywordLoc, diag::err_use_with_wrong_tag) << Id;
SemaRef.Diag(Tag->getLocation(), diag::note_previous_use);
return QualType();
}
-
- // Build the elaborated-type-specifier type.
- QualType T = SemaRef.Context.getTypeDeclType(Tag);
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- T);
+ return getDerived().RebuildTagType(
+ Keyword, QualifierLoc.getNestedNameSpecifier(), Tag);
}
/// Build a new pack expansion type.
@@ -1303,7 +1283,6 @@ public:
SourceLocation TemplateKWLoc,
const IdentifierInfo &Name,
SourceLocation NameLoc, QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName);
/// Build a new template name given a nested name specifier and the
@@ -1323,7 +1302,6 @@ public:
SourceLocation TemplateKWLoc,
IdentifierOrOverloadedOperator IO,
SourceLocation NameLoc, QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName);
/// Build a new template name given a template template parameter pack
@@ -4012,8 +3990,8 @@ public:
SemaRef.Context,
TemplateArgument(Pattern.getArgument().getAsTemplate(),
NumExpansions),
- Pattern.getTemplateQualifierLoc(), Pattern.getTemplateNameLoc(),
- EllipsisLoc);
+ Pattern.getTemplateKWLoc(), Pattern.getTemplateQualifierLoc(),
+ Pattern.getTemplateNameLoc(), EllipsisLoc);
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -4258,23 +4236,29 @@ public:
}
private:
- TypeLoc TransformTypeInObjectScope(TypeLoc TL,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- CXXScopeSpec &SS);
+ QualType TransformTypeInObjectScope(TypeLocBuilder &TLB, TypeLoc TL,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope);
TypeSourceInfo *TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- CXXScopeSpec &SS);
+ NamedDecl *FirstQualifierInScope) {
+ if (getDerived().AlreadyTransformed(TSInfo->getType()))
+ return TSInfo;
- TypeSourceInfo *TransformTSIInObjectScope(TypeLoc TL, QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- CXXScopeSpec &SS);
+ TypeLocBuilder TLB;
+ QualType T = TransformTypeInObjectScope(TLB, TSInfo->getTypeLoc(),
+ ObjectType, FirstQualifierInScope);
+ if (T.isNull())
+ return nullptr;
+ return TLB.getTypeSourceInfo(SemaRef.Context, T);
+ }
QualType TransformDependentNameType(TypeLocBuilder &TLB,
DependentNameTypeLoc TL,
- bool DeducibleTSTContext);
+ bool DeducibleTSTContext,
+ QualType ObjectType = QualType(),
+ NamedDecl *UnqualLookup = nullptr);
llvm::SmallVector<OpenACCClause *>
TransformOpenACCClauseList(OpenACCDirectiveKind DirKind,
@@ -4603,7 +4587,7 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
auto insertNNS = [&Qualifiers](NestedNameSpecifierLoc NNS) {
for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
- Qualifier = Qualifier.getPrefix())
+ Qualifier = Qualifier.getAsNamespaceAndPrefix().Prefix)
Qualifiers.push_back(Qualifier);
};
insertNNS(NNS);
@@ -4611,76 +4595,87 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
CXXScopeSpec SS;
while (!Qualifiers.empty()) {
NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
- NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier();
-
- switch (QNNS->getKind()) {
- case NestedNameSpecifier::Identifier: {
- Sema::NestedNameSpecInfo IdInfo(QNNS->getAsIdentifier(),
- Q.getLocalBeginLoc(), Q.getLocalEndLoc(),
- ObjectType);
- if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, IdInfo, false,
- SS, FirstQualifierInScope, false))
- return NestedNameSpecifierLoc();
- break;
- }
+ NestedNameSpecifier QNNS = Q.getNestedNameSpecifier();
+
+ switch (QNNS.getKind()) {
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
- case NestedNameSpecifier::Namespace: {
+ case NestedNameSpecifier::Kind::Namespace: {
auto *NS = cast<NamespaceBaseDecl>(getDerived().TransformDecl(
- Q.getLocalBeginLoc(), QNNS->getAsNamespace()));
+ Q.getLocalBeginLoc(), const_cast<NamespaceBaseDecl *>(
+ QNNS.getAsNamespaceAndPrefix().Namespace)));
SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc());
break;
}
- case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Kind::Global:
// There is no meaningful transformation that one could perform on the
// global scope.
SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc());
break;
- case NestedNameSpecifier::Super: {
- CXXRecordDecl *RD =
- cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
- SourceLocation(), QNNS->getAsRecordDecl()));
- SS.MakeSuper(SemaRef.Context, RD, Q.getBeginLoc(), Q.getEndLoc());
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
+ CXXRecordDecl *RD = cast_or_null<CXXRecordDecl>(
+ getDerived().TransformDecl(SourceLocation(), QNNS.getAsRecordDecl()));
+ SS.MakeMicrosoftSuper(SemaRef.Context, RD, Q.getBeginLoc(),
+ Q.getEndLoc());
break;
}
- case NestedNameSpecifier::TypeSpec: {
- TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType,
- FirstQualifierInScope, SS);
-
- if (!TL)
- return NestedNameSpecifierLoc();
+ case NestedNameSpecifier::Kind::Type: {
+ assert(SS.isEmpty());
+ TypeLoc TL = Q.castAsTypeLoc();
+
+ if (auto DNT = TL.getAs<DependentNameTypeLoc>()) {
+ NestedNameSpecifierLoc QualifierLoc = DNT.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, FirstQualifierInScope);
+ if (!QualifierLoc)
+ return NestedNameSpecifierLoc();
+ ObjectType = QualType();
+ FirstQualifierInScope = nullptr;
+ }
+ SS.Adopt(QualifierLoc);
+ Sema::NestedNameSpecInfo IdInfo(
+ const_cast<IdentifierInfo *>(DNT.getTypePtr()->getIdentifier()),
+ DNT.getNameLoc(), Q.getLocalEndLoc(), ObjectType);
+ if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, IdInfo,
+ false, SS,
+ FirstQualifierInScope, false))
+ return NestedNameSpecifierLoc();
+ return SS.getWithLocInContext(SemaRef.Context);
+ }
QualType T = TL.getType();
+ TypeLocBuilder TLB;
+ if (!getDerived().AlreadyTransformed(T)) {
+ T = TransformTypeInObjectScope(TLB, TL, ObjectType,
+ FirstQualifierInScope);
+ if (T.isNull())
+ return NestedNameSpecifierLoc();
+ TL = TLB.getTypeLocInContext(SemaRef.Context, T);
+ }
+
if (T->isDependentType() || T->isRecordType() ||
(SemaRef.getLangOpts().CPlusPlus11 && T->isEnumeralType())) {
if (T->isEnumeralType())
SemaRef.Diag(TL.getBeginLoc(),
diag::warn_cxx98_compat_enum_nested_name_spec);
-
- if (const auto ETL = TL.getAs<ElaboratedTypeLoc>()) {
- SS.Adopt(ETL.getQualifierLoc());
- TL = ETL.getNamedTypeLoc();
- }
-
- SS.Extend(SemaRef.Context, TL, Q.getLocalEndLoc());
+ SS.Make(SemaRef.Context, TL, Q.getLocalEndLoc());
break;
}
// If the nested-name-specifier is an invalid type def, don't emit an
// error because a previous error should have already been emitted.
TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>();
- if (!TTL || !TTL.getTypedefNameDecl()->isInvalidDecl()) {
+ if (!TTL || !TTL.getDecl()->isInvalidDecl()) {
SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
<< T << SS.getRange();
}
return NestedNameSpecifierLoc();
}
}
-
- // The qualifier-in-scope and object type only apply to the leftmost entity.
- FirstQualifierInScope = nullptr;
- ObjectType = QualType();
}
// Don't rebuild the nested-name-specifier if we don't have to.
@@ -4766,30 +4761,32 @@ template <typename Derived>
TemplateName TreeTransform<Derived>::RebuildTemplateName(
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
IdentifierOrOverloadedOperator IO, SourceLocation NameLoc,
- QualType ObjectType, NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
- if (const IdentifierInfo *II = IO.getIdentifier()) {
+ QualType ObjectType, bool AllowInjectedClassName) {
+ if (const IdentifierInfo *II = IO.getIdentifier())
return getDerived().RebuildTemplateName(SS, TemplateKWLoc, *II, NameLoc,
- ObjectType, FirstQualifierInScope,
- AllowInjectedClassName);
- }
+ ObjectType, AllowInjectedClassName);
return getDerived().RebuildTemplateName(SS, TemplateKWLoc, IO.getOperator(),
NameLoc, ObjectType,
AllowInjectedClassName);
}
-template<typename Derived>
-TemplateName
-TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
- TemplateName Name,
- SourceLocation NameLoc,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
+template <typename Derived>
+TemplateName TreeTransform<Derived>::TransformTemplateName(
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKWLoc,
+ TemplateName Name, SourceLocation NameLoc, QualType ObjectType,
+ NamedDecl *FirstQualifierInScope, bool AllowInjectedClassName) {
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
+ // FIXME: Preserve UsingTemplateName.
TemplateDecl *Template = QTN->getUnderlyingTemplate().getAsTemplateDecl();
assert(Template && "qualified template name must refer to a template");
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, FirstQualifierInScope);
+ if (!QualifierLoc)
+ return TemplateName();
+ }
+
TemplateDecl *TransTemplate
= cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
Template));
@@ -4797,41 +4794,67 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
return TemplateName();
if (!getDerived().AlwaysRebuild() &&
- SS.getScopeRep() == QTN->getQualifier() &&
+ QualifierLoc.getNestedNameSpecifier() == QTN->getQualifier() &&
TransTemplate == Template)
return Name;
-
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
return getDerived().RebuildTemplateName(SS, QTN->hasTemplateKeyword(),
TransTemplate);
}
if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
- if (SS.getScopeRep()) {
- // These apply to the scope specifier, not the template.
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, FirstQualifierInScope);
+ if (!QualifierLoc)
+ return TemplateName();
+ // The qualifier-in-scope and object type only apply to the leftmost
+ // entity.
ObjectType = QualType();
- FirstQualifierInScope = nullptr;
}
if (!getDerived().AlwaysRebuild() &&
- SS.getScopeRep() == DTN->getQualifier() &&
+ QualifierLoc.getNestedNameSpecifier() == DTN->getQualifier() &&
ObjectType.isNull())
return Name;
- // FIXME: Preserve the location of the "template" keyword.
- SourceLocation TemplateKWLoc = NameLoc;
- return getDerived().RebuildTemplateName(
- SS, TemplateKWLoc, DTN->getName(), NameLoc, ObjectType,
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ return getDerived().RebuildTemplateName(SS, TemplateKWLoc, DTN->getName(),
+ NameLoc, ObjectType,
+ AllowInjectedClassName);
+ }
+
+ if (SubstTemplateTemplateParmStorage *S =
+ Name.getAsSubstTemplateTemplateParm()) {
+ TemplateName NewName = getDerived().TransformTemplateName(
+ QualifierLoc, TemplateKWLoc, S->getReplacement(), NameLoc, ObjectType,
FirstQualifierInScope, AllowInjectedClassName);
+ if (NewName.isNull())
+ return TemplateName();
+ Decl *AssociatedDecl =
+ getDerived().TransformDecl(NameLoc, S->getAssociatedDecl());
+ if (!getDerived().AlwaysRebuild() && NewName == S->getReplacement() &&
+ AssociatedDecl == S->getAssociatedDecl())
+ return Name;
+ return SemaRef.Context.getSubstTemplateTemplateParm(
+ NewName, AssociatedDecl, S->getIndex(), S->getPackIndex(),
+ S->getFinal());
}
- // FIXME: Try to preserve more of the TemplateName.
+ assert(!Name.getAsDeducedTemplateName() &&
+ "DeducedTemplateName should not escape partial ordering");
+
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ assert(!QualifierLoc && "missed a Qualified Template");
TemplateDecl *TransTemplate
= cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
Template));
if (!TransTemplate)
return TemplateName();
+ CXXScopeSpec SS;
return getDerived().RebuildTemplateName(SS, /*TemplateKeyword=*/false,
TransTemplate);
}
@@ -4849,8 +4872,10 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
template <typename Derived>
TemplateArgument TreeTransform<Derived>::TransformNamedTemplateTemplateArgument(
- CXXScopeSpec &SS, TemplateName Name, SourceLocation NameLoc) {
- TemplateName TN = getDerived().TransformTemplateName(SS, Name, NameLoc);
+ NestedNameSpecifierLoc &QualifierLoc, SourceLocation TemplateKeywordLoc,
+ TemplateName Name, SourceLocation NameLoc) {
+ TemplateName TN = getDerived().TransformTemplateName(
+ QualifierLoc, TemplateKeywordLoc, Name, NameLoc);
if (TN.isNull())
return TemplateArgument();
return TemplateArgument(TN);
@@ -4932,21 +4957,14 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
case TemplateArgument::Template: {
NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc();
- if (QualifierLoc) {
- QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
- if (!QualifierLoc)
- return true;
- }
-
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
TemplateArgument Out = getDerived().TransformNamedTemplateTemplateArgument(
- SS, Arg.getAsTemplate(), Input.getTemplateNameLoc());
+ QualifierLoc, Input.getTemplateKWLoc(), Arg.getAsTemplate(),
+ Input.getTemplateNameLoc());
if (Out.isNull())
return true;
- Output = TemplateArgumentLoc(SemaRef.Context, Out, QualifierLoc,
- Input.getTemplateNameLoc());
+ Output = TemplateArgumentLoc(SemaRef.Context, Out, Input.getTemplateKWLoc(),
+ QualifierLoc, Input.getTemplateNameLoc());
return false;
}
@@ -5380,85 +5398,37 @@ QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
return SemaRef.BuildQualifiedType(T, Loc, Quals);
}
-template<typename Derived>
-TypeLoc
-TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
- QualType ObjectType,
- NamedDecl *UnqualLookup,
- CXXScopeSpec &SS) {
- if (getDerived().AlreadyTransformed(TL.getType()))
- return TL;
-
- TypeSourceInfo *TSI =
- TransformTSIInObjectScope(TL, ObjectType, UnqualLookup, SS);
- if (TSI)
- return TSI->getTypeLoc();
- return TypeLoc();
-}
-
-template<typename Derived>
-TypeSourceInfo *
-TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
- QualType ObjectType,
- NamedDecl *UnqualLookup,
- CXXScopeSpec &SS) {
- if (getDerived().AlreadyTransformed(TSInfo->getType()))
- return TSInfo;
-
- return TransformTSIInObjectScope(TSInfo->getTypeLoc(), ObjectType,
- UnqualLookup, SS);
-}
-
template <typename Derived>
-TypeSourceInfo *TreeTransform<Derived>::TransformTSIInObjectScope(
- TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup,
- CXXScopeSpec &SS) {
- QualType T = TL.getType();
- assert(!getDerived().AlreadyTransformed(T));
-
- TypeLocBuilder TLB;
- QualType Result;
-
- if (isa<TemplateSpecializationType>(T)) {
- TemplateSpecializationTypeLoc SpecTL =
- TL.castAs<TemplateSpecializationTypeLoc>();
-
- TemplateName Template = getDerived().TransformTemplateName(
- SS, SpecTL.getTypePtr()->getTemplateName(), SpecTL.getTemplateNameLoc(),
- ObjectType, UnqualLookup, /*AllowInjectedClassName*/true);
- if (Template.isNull())
- return nullptr;
+QualType TreeTransform<Derived>::TransformTypeInObjectScope(
+ TypeLocBuilder &TLB, TypeLoc TL, QualType ObjectType,
+ NamedDecl *UnqualLookup) {
+ assert(!getDerived().AlreadyTransformed(TL.getType()));
- Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
- Template);
- } else if (isa<DependentTemplateSpecializationType>(T)) {
- DependentTemplateSpecializationTypeLoc SpecTL =
- TL.castAs<DependentTemplateSpecializationTypeLoc>();
-
- const IdentifierInfo *II = SpecTL.getTypePtr()
- ->getDependentTemplateName()
- .getName()
- .getIdentifier();
- TemplateName Template = getDerived().RebuildTemplateName(
- SS, SpecTL.getTemplateKeywordLoc(), *II, SpecTL.getTemplateNameLoc(),
- ObjectType, UnqualLookup,
- /*AllowInjectedClassName*/ true);
- if (Template.isNull())
- return nullptr;
-
- Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
- SpecTL,
- Template,
- SS);
- } else {
- // Nothing special needs to be done for these.
- Result = getDerived().TransformType(TLB, TL);
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::DependentTemplateSpecialization:
+ return getDerived().TransformDependentTemplateSpecializationType(
+ TLB, TL.castAs<DependentTemplateSpecializationTypeLoc>(), ObjectType,
+ UnqualLookup, /*AllowInjectedClassName=*/true);
+ case TypeLoc::DependentName: {
+ return getDerived().TransformDependentNameType(
+ TLB, TL.castAs<DependentNameTypeLoc>(), /*DeducedTSTContext=*/false,
+ ObjectType, UnqualLookup);
+ }
+ case TypeLoc::Typedef:
+ case TypeLoc::TemplateSpecialization:
+ case TypeLoc::SubstTemplateTypeParm:
+ case TypeLoc::PackIndexing:
+ case TypeLoc::Enum:
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ case TypeLoc::TemplateTypeParm:
+ case TypeLoc::Decltype:
+ case TypeLoc::UnresolvedUsing:
+ case TypeLoc::Using:
+ return getDerived().TransformType(TLB, TL);
+ default:
+ llvm_unreachable("unexpected type class");
}
-
- if (Result.isNull())
- return nullptr;
-
- return TLB.getTypeSourceInfo(SemaRef.Context, Result);
}
template <class TyLoc> static inline
@@ -6694,23 +6664,38 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
template <typename Derived>
QualType TreeTransform<Derived>::TransformUnresolvedUsingType(
TypeLocBuilder &TLB, UnresolvedUsingTypeLoc TL) {
+
const UnresolvedUsingType *T = TL.getTypePtr();
- Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
+ bool Changed = false;
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (NestedNameSpecifierLoc OldQualifierLoc = QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ Changed |= QualifierLoc != OldQualifierLoc;
+ }
+
+ auto *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
if (!D)
return QualType();
+ Changed |= D != T->getDecl();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() || D != T->getDecl()) {
- Result = getDerived().RebuildUnresolvedUsingType(TL.getNameLoc(), D);
+ if (getDerived().AlwaysRebuild() || Changed) {
+ Result = getDerived().RebuildUnresolvedUsingType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), TL.getNameLoc(),
+ D);
if (Result.isNull())
return QualType();
}
- // We might get an arbitrary type spec type back. We should at
- // least always get a type spec type, though.
- TypeSpecTypeLoc NewTL = TLB.pushTypeSpec(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
+ if (isa<UsingType>(Result))
+ TLB.push<UsingTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
+ else
+ TLB.push<UnresolvedUsingTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
return Result;
}
@@ -6718,25 +6703,37 @@ template <typename Derived>
QualType TreeTransform<Derived>::TransformUsingType(TypeLocBuilder &TLB,
UsingTypeLoc TL) {
const UsingType *T = TL.getTypePtr();
+ bool Changed = false;
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (NestedNameSpecifierLoc OldQualifierLoc = QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ Changed |= QualifierLoc != OldQualifierLoc;
+ }
- auto *Found = cast_or_null<UsingShadowDecl>(getDerived().TransformDecl(
- TL.getLocalSourceRange().getBegin(), T->getFoundDecl()));
- if (!Found)
+ auto *D = cast_or_null<UsingShadowDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
+ if (!D)
return QualType();
+ Changed |= D != T->getDecl();
- QualType Underlying = getDerived().TransformType(T->desugar());
- if (Underlying.isNull())
+ QualType UnderlyingType = getDerived().TransformType(T->desugar());
+ if (UnderlyingType.isNull())
return QualType();
+ Changed |= UnderlyingType != T->desugar();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() || Found != T->getFoundDecl() ||
- Underlying != T->getUnderlyingType()) {
- Result = getDerived().RebuildUsingType(Found, Underlying);
+ if (getDerived().AlwaysRebuild() || Changed) {
+ Result = getDerived().RebuildUsingType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), D,
+ UnderlyingType);
if (Result.isNull())
return QualType();
}
-
- TLB.pushTypeSpec(Result).setNameLoc(TL.getNameLoc());
+ TLB.push<UsingTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(), QualifierLoc,
+ TL.getNameLoc());
return Result;
}
@@ -6744,23 +6741,34 @@ template<typename Derived>
QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
TypedefTypeLoc TL) {
const TypedefType *T = TL.getTypePtr();
- TypedefNameDecl *Typedef
- = cast_or_null<TypedefNameDecl>(getDerived().TransformDecl(TL.getNameLoc(),
- T->getDecl()));
+ bool Changed = false;
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (NestedNameSpecifierLoc OldQualifierLoc = QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ Changed |= QualifierLoc != OldQualifierLoc;
+ }
+
+ auto *Typedef = cast_or_null<TypedefNameDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
if (!Typedef)
return QualType();
+ Changed |= Typedef != T->getDecl();
+
+ // FIXME: Transform the UnderlyingType if different from decl.
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- Typedef != T->getDecl()) {
- Result = getDerived().RebuildTypedefType(Typedef);
+ if (getDerived().AlwaysRebuild() || Changed) {
+ Result = getDerived().RebuildTypedefType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), Typedef);
if (Result.isNull())
return QualType();
}
- TypedefTypeLoc NewTL = TLB.push<TypedefTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
+ TLB.push<TypedefTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
return Result;
}
@@ -6996,9 +7004,10 @@ QualType TreeTransform<Derived>::TransformDeducedTemplateSpecializationType(
TypeLocBuilder &TLB, DeducedTemplateSpecializationTypeLoc TL) {
const DeducedTemplateSpecializationType *T = TL.getTypePtr();
- CXXScopeSpec SS;
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
TemplateName TemplateName = getDerived().TransformTemplateName(
- SS, T->getTemplateName(), TL.getTemplateNameLoc());
+ QualifierLoc, /*TemplateKELoc=*/SourceLocation(), T->getTemplateName(),
+ TL.getTemplateNameLoc());
if (TemplateName.isNull())
return QualType();
@@ -7011,76 +7020,71 @@ QualType TreeTransform<Derived>::TransformDeducedTemplateSpecializationType(
}
QualType Result = getDerived().RebuildDeducedTemplateSpecializationType(
- TemplateName, NewDeduced);
+ T->getKeyword(), TemplateName, NewDeduced);
if (Result.isNull())
return QualType();
- DeducedTemplateSpecializationTypeLoc NewTL =
- TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ auto NewTL = TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
-
+ NewTL.setQualifierLoc(QualifierLoc);
return Result;
}
-template<typename Derived>
-QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
- RecordTypeLoc TL) {
- const RecordType *T = TL.getTypePtr();
- RecordDecl *Record
- = cast_or_null<RecordDecl>(getDerived().TransformDecl(TL.getNameLoc(),
- T->getDecl()));
- if (!Record)
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformTagType(TypeLocBuilder &TLB,
+ TagTypeLoc TL) {
+ const TagType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return QualType();
+ }
+
+ auto *TD = cast_or_null<TagDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getOriginalDecl()));
+ if (!TD)
return QualType();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- Record != T->getDecl()) {
- Result = getDerived().RebuildRecordType(Record);
+ if (getDerived().AlwaysRebuild() || QualifierLoc != TL.getQualifierLoc() ||
+ TD != T->getOriginalDecl()) {
+ if (T->isCanonicalUnqualified())
+ Result = getDerived().RebuildCanonicalTagType(TD);
+ else
+ Result = getDerived().RebuildTagType(
+ T->getKeyword(), QualifierLoc.getNestedNameSpecifier(), TD);
if (Result.isNull())
return QualType();
}
- RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ TagTypeLoc NewTL = TLB.push<TagTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB,
EnumTypeLoc TL) {
- const EnumType *T = TL.getTypePtr();
- EnumDecl *Enum
- = cast_or_null<EnumDecl>(getDerived().TransformDecl(TL.getNameLoc(),
- T->getDecl()));
- if (!Enum)
- return QualType();
-
- QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- Enum != T->getDecl()) {
- Result = getDerived().RebuildEnumType(Enum);
- if (Result.isNull())
- return QualType();
- }
-
- EnumTypeLoc NewTL = TLB.push<EnumTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
+ return getDerived().TransformTagType(TLB, TL);
+}
- return Result;
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
+ RecordTypeLoc TL) {
+ return getDerived().TransformTagType(TLB, TL);
}
template<typename Derived>
QualType TreeTransform<Derived>::TransformInjectedClassNameType(
TypeLocBuilder &TLB,
InjectedClassNameTypeLoc TL) {
- Decl *D = getDerived().TransformDecl(TL.getNameLoc(),
- TL.getTypePtr()->getDecl());
- if (!D) return QualType();
-
- QualType T = SemaRef.Context.getTypeDeclType(cast<TypeDecl>(D));
- TLB.pushTypeSpec(T).setNameLoc(TL.getNameLoc());
- return T;
+ return getDerived().TransformTagType(TLB, TL);
}
template<typename Derived>
@@ -7142,24 +7146,6 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
}
template<typename Derived>
-QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
- TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL) {
- const TemplateSpecializationType *T = TL.getTypePtr();
-
- // The nested-name-specifier never matters in a TemplateSpecializationType,
- // because we can't have a dependent nested-name-specifier anyway.
- CXXScopeSpec SS;
- TemplateName Template
- = getDerived().TransformTemplateName(SS, T->getTemplateName(),
- TL.getTemplateNameLoc());
- if (Template.isNull())
- return QualType();
-
- return getDerived().TransformTemplateSpecializationType(TLB, TL, Template);
-}
-
-template<typename Derived>
QualType TreeTransform<Derived>::TransformAtomicType(TypeLocBuilder &TLB,
AtomicTypeLoc TL) {
QualType ValueType = getDerived().TransformType(TLB, TL.getValueLoc());
@@ -7398,9 +7384,16 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
template <typename Derived>
QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
- TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL,
- TemplateName Template) {
+ TypeLocBuilder &TLB, TemplateSpecializationTypeLoc TL) {
+ const TemplateSpecializationType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ TemplateName Template = getDerived().TransformTemplateName(
+ QualifierLoc, TL.getTemplateKeywordLoc(), T->getTemplateName(),
+ TL.getTemplateNameLoc());
+ if (Template.isNull())
+ return QualType();
+
TemplateArgumentListInfo NewTemplateArgs;
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
@@ -7415,10 +7408,9 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
// original template changed. If the template changed, and even if the
// arguments didn't change, these arguments might not correspond to their
// respective parameters, therefore needing conversions.
- QualType Result =
- getDerived().RebuildTemplateSpecializationType(Template,
- TL.getTemplateNameLoc(),
- NewTemplateArgs);
+ QualType Result = getDerived().RebuildTemplateSpecializationType(
+ TL.getTypePtr()->getKeyword(), Template, TL.getTemplateNameLoc(),
+ NewTemplateArgs);
if (!Result.isNull()) {
// Specializations of template template parameters are represented as
@@ -7428,8 +7420,8 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
if (isa<DependentTemplateSpecializationType>(Result)) {
DependentTemplateSpecializationTypeLoc NewTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(SourceLocation());
- NewTL.setQualifierLoc(NestedNameSpecifierLoc());
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NewTL.setLAngleLoc(TL.getLAngleLoc());
@@ -7438,129 +7430,11 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
return Result;
}
-
- TemplateSpecializationTypeLoc NewTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
- NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
- NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
- }
-
- return Result;
-}
-
-template <typename Derived>
-QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
- TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL,
- TemplateName Template,
- CXXScopeSpec &SS) {
- TemplateArgumentListInfo NewTemplateArgs;
- NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
- NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
- typedef TemplateArgumentLocContainerIterator<
- DependentTemplateSpecializationTypeLoc> ArgIterator;
- if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
- ArgIterator(TL, TL.getNumArgs()),
- NewTemplateArgs))
- return QualType();
-
- // FIXME: maybe don't rebuild if all the template arguments are the same.
-
- if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
- assert(DTN->getQualifier() == SS.getScopeRep());
- QualType Result = getSema().Context.getDependentTemplateSpecializationType(
- TL.getTypePtr()->getKeyword(), *DTN, NewTemplateArgs.arguments());
-
- DependentTemplateSpecializationTypeLoc NewTL
- = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
- NewTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
- NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
- NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
- return Result;
- }
-
- QualType Result
- = getDerived().RebuildTemplateSpecializationType(Template,
- TL.getTemplateNameLoc(),
- NewTemplateArgs);
-
- if (!Result.isNull()) {
- /// FIXME: Wrap this in an elaborated-type-specifier?
- TemplateSpecializationTypeLoc NewTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
- NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
- NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
- }
-
- return Result;
-}
-
-template<typename Derived>
-QualType
-TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
- ElaboratedTypeLoc TL) {
- const ElaboratedType *T = TL.getTypePtr();
-
- NestedNameSpecifierLoc QualifierLoc;
- // NOTE: the qualifier in an ElaboratedType is optional.
- if (TL.getQualifierLoc()) {
- QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
- if (!QualifierLoc)
- return QualType();
- }
-
- QualType NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc());
- if (NamedT.isNull())
- return QualType();
-
- // C++0x [dcl.type.elab]p2:
- // If the identifier resolves to a typedef-name or the simple-template-id
- // resolves to an alias template specialization, the
- // elaborated-type-specifier is ill-formed.
- if (T->getKeyword() != ElaboratedTypeKeyword::None &&
- T->getKeyword() != ElaboratedTypeKeyword::Typename) {
- if (const TemplateSpecializationType *TST =
- NamedT->getAs<TemplateSpecializationType>()) {
- TemplateName Template = TST->getTemplateName();
- if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>(
- Template.getAsTemplateDecl())) {
- SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
- diag::err_tag_reference_non_tag)
- << TAT << NonTagKind::TypeAliasTemplate
- << ElaboratedType::getTagTypeKindForKeyword(T->getKeyword());
- SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
- }
- }
- }
-
- QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- QualifierLoc != TL.getQualifierLoc() ||
- NamedT != T->getNamedType()) {
- Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(),
- T->getKeyword(),
- QualifierLoc, NamedT);
- if (Result.isNull())
- return QualType();
+ TLB.push<TemplateSpecializationTypeLoc>(Result).set(
+ TL.getElaboratedKeywordLoc(), QualifierLoc, TL.getTemplateKeywordLoc(),
+ TL.getTemplateNameLoc(), NewTemplateArgs);
}
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
- NewTL.setQualifierLoc(QualifierLoc);
return Result;
}
@@ -7754,15 +7628,22 @@ QualType TreeTransform<Derived>::TransformDependentNameType(
return TransformDependentNameType(TLB, TL, false);
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentNameType(
- TypeLocBuilder &TLB, DependentNameTypeLoc TL, bool DeducedTSTContext) {
+ TypeLocBuilder &TLB, DependentNameTypeLoc TL, bool DeducedTSTContext,
+ QualType ObjectType, NamedDecl *UnqualLookup) {
const DependentNameType *T = TL.getTypePtr();
- NestedNameSpecifierLoc QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
- if (!QualifierLoc)
- return QualType();
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, UnqualLookup);
+ if (!QualifierLoc)
+ return QualType();
+ } else {
+ assert((ObjectType.isNull() && !UnqualLookup) &&
+ "must be transformed by TransformNestedNameSpecifierLoc");
+ }
QualType Result
= getDerived().RebuildDependentNameType(T->getKeyword(),
@@ -7774,13 +7655,19 @@ QualType TreeTransform<Derived>::TransformDependentNameType(
if (Result.isNull())
return QualType();
- if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) {
- QualType NamedT = ElabT->getNamedType();
- TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
-
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ if (isa<TagType>(Result)) {
+ auto NewTL = TLB.push<TagTypeLoc>(Result);
NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else if (isa<DeducedTemplateSpecializationType>(Result)) {
+ auto NewTL = TLB.push<DeducedTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getNameLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ } else if (isa<TypedefType>(Result)) {
+ TLB.push<TypedefTypeLoc>(Result).set(TL.getElaboratedKeywordLoc(),
+ QualifierLoc, TL.getNameLoc());
} else {
DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
@@ -7790,33 +7677,34 @@ QualType TreeTransform<Derived>::TransformDependentNameType(
return Result;
}
-template<typename Derived>
-QualType TreeTransform<Derived>::
- TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL) {
- NestedNameSpecifierLoc QualifierLoc;
- if (TL.getQualifierLoc()) {
- QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
- if (!QualifierLoc)
- return QualType();
- }
-
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
- return getDerived().TransformDependentTemplateSpecializationType(TLB, TL, SS);
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
+ TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL) {
+ return getDerived().TransformDependentTemplateSpecializationType(
+ TLB, TL, QualType(), nullptr, false);
}
template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
- CXXScopeSpec &SS) {
+ QualType ObjectType, NamedDecl *UnqualLookup, bool AllowInjectedClassName) {
const DependentTemplateSpecializationType *T = TL.getTypePtr();
- TemplateArgumentListInfo NewTemplateArgs;
- NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
- NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ NestedNameSpecifierLoc QualifierLoc = TL.getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(
+ QualifierLoc, ObjectType, UnqualLookup);
+ if (!QualifierLoc)
+ return QualType();
+ // These only apply to the leftmost prefix.
+ ObjectType = QualType();
+ UnqualLookup = nullptr;
+ }
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ TemplateArgumentListInfo NewTemplateArgs(TL.getLAngleLoc(),
+ TL.getRAngleLoc());
auto ArgsRange = llvm::make_range<TemplateArgumentLocContainerIterator<
DependentTemplateSpecializationTypeLoc>>({TL, 0}, {TL, TL.getNumArgs()});
@@ -7833,43 +7721,27 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || SS.getScopeRep() != DTN.getQualifier() ||
- TemplateArgumentsChanged) {
+ TemplateArgumentsChanged || !ObjectType.isNull()) {
TemplateName Name = getDerived().RebuildTemplateName(
SS, TL.getTemplateKeywordLoc(), DTN.getName(), TL.getTemplateNameLoc(),
- /*ObjectType=*/QualType(), /*FirstQualifierInScope=*/nullptr,
- /*AllowInjectedClassName=*/false);
+ ObjectType, AllowInjectedClassName);
if (Name.isNull())
return QualType();
Result = getDerived().RebuildDependentTemplateSpecializationType(
- T->getKeyword(), SS.getScopeRep(), TL.getTemplateKeywordLoc(), Name,
+ T->getKeyword(), TL.getTemplateKeywordLoc(), Name,
TL.getTemplateNameLoc(), NewTemplateArgs,
/*AllowInjectedClassName=*/false);
if (Result.isNull())
return QualType();
}
- NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(SemaRef.Context);
- if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) {
- QualType NamedT = ElabT->getNamedType();
-
- // Copy information relevant to the template specialization.
- TemplateSpecializationTypeLoc NamedTL
- = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
- NamedTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
- NamedTL.setTemplateNameLoc(TL.getTemplateNameLoc());
- NamedTL.setLAngleLoc(TL.getLAngleLoc());
- NamedTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
- NamedTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
-
- // Copy information relevant to the elaborated type.
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
- NewTL.setQualifierLoc(QualifierLoc);
+ QualifierLoc = SS.getWithLocInContext(SemaRef.Context);
+ if (isa<TemplateSpecializationType>(Result)) {
+ TLB.push<TemplateSpecializationTypeLoc>(Result).set(
+ TL.getElaboratedKeywordLoc(), QualifierLoc, TL.getTemplateKeywordLoc(),
+ TL.getTemplateNameLoc(), NewTemplateArgs);
} else {
- assert(isa<DependentTemplateSpecializationType>(Result));
- DependentTemplateSpecializationTypeLoc SpecTL
- = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ auto SpecTL = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
SpecTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
SpecTL.setQualifierLoc(QualifierLoc);
SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
@@ -8590,14 +8462,18 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
DeclChanged = true;
if (LSI) {
- if (auto *TD = dyn_cast<TypeDecl>(Transformed))
- LSI->ContainsUnexpandedParameterPack |=
- getSema()
- .getASTContext()
- .getTypeDeclType(TD)
- .getSingleStepDesugaredType(getSema().getASTContext())
- ->containsUnexpandedParameterPack();
-
+ if (auto *TD = dyn_cast<TypeDecl>(Transformed)) {
+ if (auto *TN = dyn_cast<TypedefNameDecl>(TD)) {
+ LSI->ContainsUnexpandedParameterPack |=
+ TN->getUnderlyingType()->containsUnexpandedParameterPack();
+ } else {
+ LSI->ContainsUnexpandedParameterPack |=
+ getSema()
+ .getASTContext()
+ .getTypeDeclType(TD)
+ ->containsUnexpandedParameterPack();
+ }
+ }
if (auto *VD = dyn_cast<VarDecl>(Transformed))
LSI->ContainsUnexpandedParameterPack |=
VD->getType()->containsUnexpandedParameterPack();
@@ -14353,7 +14229,7 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
auto EvalCtx = Sema::ExpressionEvaluationContext::Unevaluated;
if (E->isGLValue())
if (auto *RecordT = Op->getType()->getAs<RecordType>())
- if (cast<CXXRecordDecl>(RecordT->getDecl())->isPolymorphic())
+ if (cast<CXXRecordDecl>(RecordT->getOriginalDecl())->isPolymorphic())
EvalCtx = SemaRef.ExprEvalContexts.back().Context;
EnterExpressionEvaluationContext Unevaluated(SemaRef, EvalCtx,
@@ -14593,7 +14469,8 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
QualType ElementType
= SemaRef.Context.getBaseElementType(E->getAllocatedType());
if (const RecordType *RecordT = ElementType->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl());
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) {
SemaRef.MarkFunctionReferenced(E->getBeginLoc(), Destructor);
}
@@ -14663,7 +14540,9 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
QualType Destroyed = SemaRef.Context.getBaseElementType(
E->getDestroyedType());
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ CXXRecordDecl *Record =
+ cast<CXXRecordDecl>(DestroyedRec->getOriginalDecl())
+ ->getDefinitionOrSelf();
SemaRef.MarkFunctionReferenced(E->getBeginLoc(),
SemaRef.LookupDestructor(Record));
}
@@ -14707,9 +14586,9 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
PseudoDestructorTypeStorage Destroyed;
if (E->getDestroyedTypeInfo()) {
- TypeSourceInfo *DestroyedTypeInfo
- = getDerived().TransformTypeInObjectScope(E->getDestroyedTypeInfo(),
- ObjectType, nullptr, SS);
+ TypeSourceInfo *DestroyedTypeInfo = getDerived().TransformTypeInObjectScope(
+ E->getDestroyedTypeInfo(), ObjectType,
+ /*FirstQualifierInScope=*/nullptr);
if (!DestroyedTypeInfo)
return ExprError();
Destroyed = DestroyedTypeInfo;
@@ -14733,9 +14612,8 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
TypeSourceInfo *ScopeTypeInfo = nullptr;
if (E->getScopeTypeInfo()) {
- CXXScopeSpec EmptySS;
ScopeTypeInfo = getDerived().TransformTypeInObjectScope(
- E->getScopeTypeInfo(), ObjectType, nullptr, EmptySS);
+ E->getScopeTypeInfo(), ObjectType, nullptr);
if (!ScopeTypeInfo)
return ExprError();
}
@@ -15856,10 +15734,6 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// parameters are dependent.
DependencyKind = getDerived().ComputeLambdaDependency(&LSICopy);
Class->setLambdaDependencyKind(DependencyKind);
- // Clean up the type cache created previously. Then, we re-create a type for
- // such Decl with the new DependencyKind.
- Class->setTypeForDecl(nullptr);
- getSema().Context.getTypeDeclType(Class);
return getDerived().RebuildLambdaExpr(E->getBeginLoc(),
Body.get()->getEndLoc(), &LSICopy);
@@ -17362,9 +17236,10 @@ QualType TreeTransform<Derived>::RebuildFunctionNoProtoType(QualType T) {
return SemaRef.Context.getFunctionNoProtoType(T);
}
-template<typename Derived>
-QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
- Decl *D) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
+ SourceLocation NameLoc, Decl *D) {
assert(D && "no decl found");
if (D->isInvalidDecl()) return QualType();
@@ -17374,7 +17249,7 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
// UsingDecls, but they must each have exactly one type, and it must be
// the same type in every case. But we must have at least one expansion!
if (UPD->expansions().empty()) {
- getSema().Diag(Loc, diag::err_using_pack_expansion_empty)
+ getSema().Diag(NameLoc, diag::err_using_pack_expansion_empty)
<< UPD->isCXXClassMember() << UPD;
return QualType();
}
@@ -17385,10 +17260,11 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
QualType FallbackT;
QualType T;
for (auto *E : UPD->expansions()) {
- QualType ThisT = RebuildUnresolvedUsingType(Loc, E);
+ QualType ThisT =
+ RebuildUnresolvedUsingType(Keyword, Qualifier, NameLoc, E);
if (ThisT.isNull())
continue;
- else if (ThisT->getAs<UnresolvedUsingType>())
+ if (ThisT->getAs<UnresolvedUsingType>())
FallbackT = ThisT;
else if (T.isNull())
T = ThisT;
@@ -17397,7 +17273,8 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
"mismatched resolved types in using pack expansion");
}
return T.isNull() ? FallbackT : T;
- } else if (auto *Using = dyn_cast<UsingDecl>(D)) {
+ }
+ if (auto *Using = dyn_cast<UsingDecl>(D)) {
assert(Using->hasTypename() &&
"UnresolvedUsingTypenameDecl transformed to non-typename using");
@@ -17405,17 +17282,14 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
assert(++Using->shadow_begin() == Using->shadow_end());
UsingShadowDecl *Shadow = *Using->shadow_begin();
- if (SemaRef.DiagnoseUseOfDecl(Shadow->getTargetDecl(), Loc))
+ if (SemaRef.DiagnoseUseOfDecl(Shadow->getTargetDecl(), NameLoc))
return QualType();
- return SemaRef.Context.getUsingType(
- Shadow, SemaRef.Context.getTypeDeclType(
- cast<TypeDecl>(Shadow->getTargetDecl())));
- } else {
- assert(isa<UnresolvedUsingTypenameDecl>(D) &&
- "UnresolvedUsingTypenameDecl transformed to non-using decl");
- return SemaRef.Context.getTypeDeclType(
- cast<UnresolvedUsingTypenameDecl>(D));
+ return SemaRef.Context.getUsingType(Keyword, Qualifier, Shadow);
}
+ assert(isa<UnresolvedUsingTypenameDecl>(D) &&
+ "UnresolvedUsingTypenameDecl transformed to non-using decl");
+ return SemaRef.Context.getUnresolvedUsingType(
+ Keyword, Qualifier, cast<UnresolvedUsingTypenameDecl>(D));
}
template <typename Derived>
@@ -17451,12 +17325,12 @@ QualType TreeTransform<Derived>::RebuildUnaryTransformType(QualType BaseType,
return SemaRef.BuildUnaryTransformType(BaseType, UKind, Loc);
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::RebuildTemplateSpecializationType(
- TemplateName Template,
- SourceLocation TemplateNameLoc,
- TemplateArgumentListInfo &TemplateArgs) {
- return SemaRef.CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+ ElaboratedTypeKeyword Keyword, TemplateName Template,
+ SourceLocation TemplateNameLoc, TemplateArgumentListInfo &TemplateArgs) {
+ return SemaRef.CheckTemplateIdType(Keyword, Template, TemplateNameLoc,
+ TemplateArgs);
}
template<typename Derived>
@@ -17499,15 +17373,10 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
TemplateName(Template));
}
-template<typename Derived>
-TemplateName
-TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const IdentifierInfo &Name,
- SourceLocation NameLoc,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope,
- bool AllowInjectedClassName) {
+template <typename Derived>
+TemplateName TreeTransform<Derived>::RebuildTemplateName(
+ CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const IdentifierInfo &Name,
+ SourceLocation NameLoc, QualType ObjectType, bool AllowInjectedClassName) {
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&Name, NameLoc);
Sema::TemplateTy Template;
@@ -17649,15 +17518,16 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
NameInfo.setNamedTypeInfo(DestroyedType);
// The scope type is now known to be a valid nested name specifier
- // component. Tack it on to the end of the nested name specifier.
+ // component. Tack it on to the nested name specifier.
if (ScopeType) {
- if (!ScopeType->getType()->getAs<TagType>()) {
+ if (!isa<TagType>(ScopeType->getType().getCanonicalType())) {
getSema().Diag(ScopeType->getTypeLoc().getBeginLoc(),
diag::err_expected_class_or_namespace)
<< ScopeType->getType() << getSema().getLangOpts().CPlusPlus;
return ExprError();
}
- SS.Extend(SemaRef.Context, ScopeType->getTypeLoc(), CCLoc);
+ SS.clear();
+ SS.Make(SemaRef.Context, ScopeType->getTypeLoc(), CCLoc);
}
SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
diff --git a/clang/lib/Sema/UsedDeclVisitor.h b/clang/lib/Sema/UsedDeclVisitor.h
index 580d702..ad475ab 100644
--- a/clang/lib/Sema/UsedDeclVisitor.h
+++ b/clang/lib/Sema/UsedDeclVisitor.h
@@ -71,9 +71,10 @@ public:
if (!DestroyedOrNull.isNull()) {
QualType Destroyed = S.Context.getBaseElementType(DestroyedOrNull);
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- if (Record->getDefinition())
- asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ CXXRecordDecl *Record =
+ cast<CXXRecordDecl>(DestroyedRec->getOriginalDecl());
+ if (auto *Def = Record->getDefinition())
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Def));
}
}
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index ed0ec9e..e8dddda 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -5500,7 +5500,7 @@ void ASTReader::InitializeContext() {
Error("Invalid FILE type in AST file");
return;
}
- Context.setFILEDecl(Tag->getDecl());
+ Context.setFILEDecl(Tag->getOriginalDecl());
}
}
}
@@ -5521,7 +5521,7 @@ void ASTReader::InitializeContext() {
Error("Invalid jmp_buf type in AST file");
return;
}
- Context.setjmp_bufDecl(Tag->getDecl());
+ Context.setjmp_bufDecl(Tag->getOriginalDecl());
}
}
}
@@ -5539,7 +5539,7 @@ void ASTReader::InitializeContext() {
else {
const TagType *Tag = Sigjmp_bufType->getAs<TagType>();
assert(Tag && "Invalid sigjmp_buf type in AST file");
- Context.setsigjmp_bufDecl(Tag->getDecl());
+ Context.setsigjmp_bufDecl(Tag->getOriginalDecl());
}
}
}
@@ -5574,7 +5574,7 @@ void ASTReader::InitializeContext() {
else {
const TagType *Tag = Ucontext_tType->getAs<TagType>();
assert(Tag && "Invalid ucontext_t type in AST file");
- Context.setucontext_tDecl(Tag->getDecl());
+ Context.setucontext_tDecl(Tag->getOriginalDecl());
}
}
}
@@ -7226,6 +7226,7 @@ public:
void VisitFunctionTypeLoc(FunctionTypeLoc);
void VisitArrayTypeLoc(ArrayTypeLoc);
+ void VisitTagTypeLoc(TagTypeLoc TL);
};
} // namespace clang
@@ -7372,15 +7373,24 @@ void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
}
void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation NameLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
}
void TypeLocReader::VisitUsingTypeLoc(UsingTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation NameLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
}
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation NameLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, NameLoc);
}
void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
@@ -7434,17 +7444,27 @@ void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
void TypeLocReader::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(readSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
TL.setTemplateNameLoc(readSourceLocation());
}
-void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
+void TypeLocReader::VisitTagTypeLoc(TagTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(readSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
TL.setNameLoc(readSourceLocation());
}
-void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ VisitTagTypeLoc(TL);
}
+void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ VisitTagTypeLoc(TL);
+}
+
+void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) { VisitTagTypeLoc(TL); }
+
void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
TL.setAttr(ReadAttr());
}
@@ -7482,14 +7502,18 @@ void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
void TypeLocReader::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
- TL.setTemplateKeywordLoc(readSourceLocation());
- TL.setTemplateNameLoc(readSourceLocation());
- TL.setLAngleLoc(readSourceLocation());
- TL.setRAngleLoc(readSourceLocation());
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(i,
- Reader.readTemplateArgumentLocInfo(
- TL.getTypePtr()->template_arguments()[i].getKind()));
+ SourceLocation ElaboratedKeywordLoc = readSourceLocation();
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc();
+ SourceLocation TemplateKeywordLoc = readSourceLocation();
+ SourceLocation NameLoc = readSourceLocation();
+ SourceLocation LAngleLoc = readSourceLocation();
+ SourceLocation RAngleLoc = readSourceLocation();
+ TL.set(ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
+ LAngleLoc, RAngleLoc);
+ MutableArrayRef<TemplateArgumentLocInfo> Args = TL.getArgLocInfos();
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ Args[I] = Reader.readTemplateArgumentLocInfo(
+ TL.getTypePtr()->template_arguments()[I].getKind());
}
void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
@@ -7497,15 +7521,6 @@ void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
TL.setRParenLoc(readSourceLocation());
}
-void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- TL.setElaboratedKeywordLoc(readSourceLocation());
- TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
-}
-
-void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
-}
-
void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
TL.setElaboratedKeywordLoc(readSourceLocation());
TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
@@ -7962,18 +7977,15 @@ ASTRecordReader::readTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind) {
return readExpr();
case TemplateArgument::Type:
return readTypeSourceInfo();
- case TemplateArgument::Template: {
- NestedNameSpecifierLoc QualifierLoc =
- readNestedNameSpecifierLoc();
- SourceLocation TemplateNameLoc = readSourceLocation();
- return TemplateArgumentLocInfo(getASTContext(), QualifierLoc,
- TemplateNameLoc, SourceLocation());
- }
+ case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
+ SourceLocation TemplateKWLoc = readSourceLocation();
NestedNameSpecifierLoc QualifierLoc = readNestedNameSpecifierLoc();
SourceLocation TemplateNameLoc = readSourceLocation();
- SourceLocation EllipsisLoc = readSourceLocation();
- return TemplateArgumentLocInfo(getASTContext(), QualifierLoc,
+ SourceLocation EllipsisLoc = Kind == TemplateArgument::TemplateExpansion
+ ? readSourceLocation()
+ : SourceLocation();
+ return TemplateArgumentLocInfo(getASTContext(), TemplateKWLoc, QualifierLoc,
TemplateNameLoc, EllipsisLoc);
}
case TemplateArgument::Null:
@@ -9586,12 +9598,6 @@ void ASTReader::AssignedLambdaNumbering(CXXRecordDecl *Lambda) {
CXXRecordDecl *Previous =
cast<CXXRecordDecl>(Iter->second)->getMostRecentDecl();
Lambda->setPreviousDecl(Previous);
- // FIXME: It will be best to use the Previous type when we creating the
- // lambda directly. But that requires us to get the lambda context decl and
- // lambda index before creating the lambda, which needs a drastic change in
- // the parser.
- const_cast<QualType &>(Lambda->TypeForDecl->CanonicalType) =
- Previous->TypeForDecl->CanonicalType;
return;
}
@@ -10107,41 +10113,37 @@ ASTRecordReader::readNestedNameSpecifierLoc() {
for (unsigned I = 0; I != N; ++I) {
auto Kind = readNestedNameSpecifierKind();
switch (Kind) {
- case NestedNameSpecifier::Identifier: {
- IdentifierInfo *II = readIdentifier();
- SourceRange Range = readSourceRange();
- Builder.Extend(Context, II, Range.getBegin(), Range.getEnd());
- break;
- }
-
- case NestedNameSpecifier::Namespace: {
+ case NestedNameSpecifier::Kind::Namespace: {
auto *NS = readDeclAs<NamespaceBaseDecl>();
SourceRange Range = readSourceRange();
Builder.Extend(Context, NS, Range.getBegin(), Range.getEnd());
break;
}
- case NestedNameSpecifier::TypeSpec: {
+ case NestedNameSpecifier::Kind::Type: {
TypeSourceInfo *T = readTypeSourceInfo();
if (!T)
return NestedNameSpecifierLoc();
SourceLocation ColonColonLoc = readSourceLocation();
- Builder.Extend(Context, T->getTypeLoc(), ColonColonLoc);
+ Builder.Make(Context, T->getTypeLoc(), ColonColonLoc);
break;
}
- case NestedNameSpecifier::Global: {
+ case NestedNameSpecifier::Kind::Global: {
SourceLocation ColonColonLoc = readSourceLocation();
Builder.MakeGlobal(Context, ColonColonLoc);
break;
}
- case NestedNameSpecifier::Super: {
+ case NestedNameSpecifier::Kind::MicrosoftSuper: {
CXXRecordDecl *RD = readDeclAs<CXXRecordDecl>();
SourceRange Range = readSourceRange();
- Builder.MakeSuper(Context, RD, Range.getBegin(), Range.getEnd());
+ Builder.MakeMicrosoftSuper(Context, RD, Range.getBegin(), Range.getEnd());
break;
}
+
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
}
@@ -10537,12 +10539,7 @@ void ASTReader::finishPendingActions() {
// happen now, after the redeclaration chains have been fully wired.
for (Decl *D : PendingDefinitions) {
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
- if (const TagType *TagT = dyn_cast<TagType>(TD->getTypeForDecl())) {
- // Make sure that the TagType points at the definition.
- const_cast<TagType*>(TagT)->decl = TD;
- }
-
- if (auto RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(TD)) {
for (auto *R = getMostRecentExistingDecl(RD); R;
R = R->getPreviousDecl()) {
assert((R == D) ==
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index 2c7beb4..6b35b20 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -538,7 +538,11 @@ void ASTDeclReader::Visit(Decl *D) {
if (auto *TD = dyn_cast<TypeDecl>(D)) {
// We have a fully initialized TypeDecl. Read its type now.
- TD->setTypeForDecl(Reader.GetType(DeferredTypeID).getTypePtrOrNull());
+ if (isa<TagDecl, TypedefDecl, TypeAliasDecl>(TD))
+ assert(DeferredTypeID == 0 &&
+ "Deferred type not used for TagDecls and Typedefs");
+ else
+ TD->setTypeForDecl(Reader.GetType(DeferredTypeID).getTypePtrOrNull());
// If this is a tag declaration with a typedef name for linkage, it's safe
// to load that typedef now.
@@ -695,7 +699,8 @@ void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
VisitNamedDecl(TD);
TD->setLocStart(readSourceLocation());
// Delay type reading until after we have fully initialized the decl.
- DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
+ if (!isa<TagDecl, TypedefDecl, TypeAliasDecl>(TD))
+ DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
}
RedeclarableResult ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
@@ -2237,15 +2242,6 @@ RedeclarableResult ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
// Merged when we merge the template.
auto *Template = readDeclAs<ClassTemplateDecl>();
D->TemplateOrInstantiation = Template;
- if (!Template->getTemplatedDecl()) {
- // We've not actually loaded the ClassTemplateDecl yet, because we're
- // currently being loaded as its pattern. Rely on it to set up our
- // TypeForDecl (see VisitClassTemplateDecl).
- //
- // Beware: we do not yet know our canonical declaration, and may still
- // get merged once the surrounding class template has got off the ground.
- DeferredTypeID = 0;
- }
break;
}
case CXXRecMemberSpecialization: {
@@ -2479,14 +2475,6 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
ReadSpecializations(*Loc.F, D, Loc.F->DeclsCursor, /*IsPartial=*/false);
ReadSpecializations(*Loc.F, D, Loc.F->DeclsCursor, /*IsPartial=*/true);
}
-
- if (D->getTemplatedDecl()->TemplateOrInstantiation) {
- // We were loaded before our templated declaration was. We've not set up
- // its corresponding type yet (see VisitCXXRecordDeclImpl), so reconstruct
- // it now.
- Reader.getContext().getInjectedClassNameType(
- D->getTemplatedDecl(), D->getInjectedClassNameSpecialization());
- }
}
void ASTDeclReader::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index c072acd..9faf107 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -345,6 +345,7 @@ public:
void VisitArrayTypeLoc(ArrayTypeLoc TyLoc);
void VisitFunctionTypeLoc(FunctionTypeLoc TyLoc);
+ void VisitTagTypeLoc(TagTypeLoc TL);
};
} // namespace
@@ -490,14 +491,20 @@ void TypeLocWriter::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
}
void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitUsingTypeLoc(UsingTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
@@ -564,17 +571,27 @@ void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
void TypeLocWriter::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getTemplateNameLoc());
}
-void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
+void TypeLocWriter::VisitTagTypeLoc(TagTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getNameLoc());
}
-void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
- addSourceLocation(TL.getNameLoc());
+void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ VisitTagTypeLoc(TL);
+}
+
+void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ VisitTagTypeLoc(TL);
}
+void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) { VisitTagTypeLoc(TL); }
+
void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddAttr(TL.getAttr());
}
@@ -612,13 +629,14 @@ void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
+ addSourceLocation(TL.getElaboratedKeywordLoc());
+ Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
addSourceLocation(TL.getTemplateKeywordLoc());
addSourceLocation(TL.getTemplateNameLoc());
addSourceLocation(TL.getLAngleLoc());
addSourceLocation(TL.getRAngleLoc());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
- TL.getArgLoc(i).getLocInfo());
+ Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i));
}
void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
@@ -630,15 +648,6 @@ void TypeLocWriter::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
addSourceLocation(TL.getExpansionLoc());
}
-void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- addSourceLocation(TL.getElaboratedKeywordLoc());
- Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
-}
-
-void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- addSourceLocation(TL.getNameLoc());
-}
-
void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
@@ -654,8 +663,7 @@ void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
addSourceLocation(TL.getLAngleLoc());
addSourceLocation(TL.getRAngleLoc());
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
- Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
- TL.getArgLoc(I).getLocInfo());
+ Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I));
}
void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
@@ -1038,7 +1046,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_OBJC_INTERFACE);
RECORD(TYPE_OBJC_OBJECT_POINTER);
RECORD(TYPE_DECLTYPE);
- RECORD(TYPE_ELABORATED);
RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM);
RECORD(TYPE_UNRESOLVED_USING);
RECORD(TYPE_INJECTED_CLASS_NAME);
@@ -6769,22 +6776,22 @@ void ASTRecordWriter::AddCXXTemporary(const CXXTemporary *Temp) {
}
void ASTRecordWriter::AddTemplateArgumentLocInfo(
- TemplateArgument::ArgKind Kind, const TemplateArgumentLocInfo &Arg) {
- switch (Kind) {
+ const TemplateArgumentLoc &Arg) {
+ const TemplateArgumentLocInfo &Info = Arg.getLocInfo();
+ switch (auto K = Arg.getArgument().getKind()) {
case TemplateArgument::Expression:
- AddStmt(Arg.getAsExpr());
+ AddStmt(Info.getAsExpr());
break;
case TemplateArgument::Type:
- AddTypeSourceInfo(Arg.getAsTypeSourceInfo());
+ AddTypeSourceInfo(Info.getAsTypeSourceInfo());
break;
case TemplateArgument::Template:
- AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc());
- AddSourceLocation(Arg.getTemplateNameLoc());
- break;
case TemplateArgument::TemplateExpansion:
+ AddSourceLocation(Arg.getTemplateKWLoc());
AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc());
AddSourceLocation(Arg.getTemplateNameLoc());
- AddSourceLocation(Arg.getTemplateEllipsisLoc());
+ if (K == TemplateArgument::TemplateExpansion)
+ AddSourceLocation(Arg.getTemplateEllipsisLoc());
break;
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -6807,7 +6814,7 @@ void ASTRecordWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg) {
if (InfoHasSameExpr)
return; // Avoid storing the same expr twice.
}
- AddTemplateArgumentLocInfo(Arg.getArgument().getKind(), Arg.getLocInfo());
+ AddTemplateArgumentLocInfo(Arg);
}
void ASTRecordWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo) {
@@ -7065,49 +7072,50 @@ void ASTRecordWriter::AddQualifierInfo(const QualifierInfo &Info) {
AddTemplateParameterList(Info.TemplParamLists[i]);
}
-void ASTRecordWriter::AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
+void ASTRecordWriter::AddNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc QualifierLoc) {
// Nested name specifiers usually aren't too long. I think that 8 would
// typically accommodate the vast majority.
SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
// Push each of the nested-name-specifiers's onto a stack for
// serialization in reverse order.
- while (NNS) {
- NestedNames.push_back(NNS);
- NNS = NNS.getPrefix();
+ while (QualifierLoc) {
+ NestedNames.push_back(QualifierLoc);
+ QualifierLoc = QualifierLoc.getAsNamespaceAndPrefix().Prefix;
}
Record->push_back(NestedNames.size());
while(!NestedNames.empty()) {
- NNS = NestedNames.pop_back_val();
- NestedNameSpecifier::SpecifierKind Kind
- = NNS.getNestedNameSpecifier()->getKind();
- Record->push_back(Kind);
+ QualifierLoc = NestedNames.pop_back_val();
+ NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier::Kind Kind = Qualifier.getKind();
+ Record->push_back(llvm::to_underlying(Kind));
switch (Kind) {
- case NestedNameSpecifier::Identifier:
- AddIdentifierRef(NNS.getNestedNameSpecifier()->getAsIdentifier());
- AddSourceRange(NNS.getLocalSourceRange());
+ case NestedNameSpecifier::Kind::Namespace:
+ AddDeclRef(Qualifier.getAsNamespaceAndPrefix().Namespace);
+ AddSourceRange(QualifierLoc.getLocalSourceRange());
break;
- case NestedNameSpecifier::Namespace:
- AddDeclRef(NNS.getNestedNameSpecifier()->getAsNamespace());
- AddSourceRange(NNS.getLocalSourceRange());
+ case NestedNameSpecifier::Kind::Type: {
+ TypeLoc TL = QualifierLoc.castAsTypeLoc();
+ AddTypeRef(TL.getType());
+ AddTypeLoc(TL);
+ AddSourceLocation(QualifierLoc.getLocalSourceRange().getEnd());
break;
+ }
- case NestedNameSpecifier::TypeSpec:
- AddTypeRef(NNS.getTypeLoc().getType());
- AddTypeLoc(NNS.getTypeLoc());
- AddSourceLocation(NNS.getLocalSourceRange().getEnd());
+ case NestedNameSpecifier::Kind::Global:
+ AddSourceLocation(QualifierLoc.getLocalSourceRange().getEnd());
break;
- case NestedNameSpecifier::Global:
- AddSourceLocation(NNS.getLocalSourceRange().getEnd());
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ AddDeclRef(Qualifier.getAsMicrosoftSuper());
+ AddSourceRange(QualifierLoc.getLocalSourceRange());
break;
- case NestedNameSpecifier::Super:
- AddDeclRef(NNS.getNestedNameSpecifier()->getAsRecordDecl());
- AddSourceRange(NNS.getLocalSourceRange());
- break;
+ case NestedNameSpecifier::Kind::Null:
+ llvm_unreachable("unexpected null nested name specifier");
}
}
}
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index 6fd25b9..ec3dda1 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -524,7 +524,7 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
// bits actually. However, if we changed the order to be 0x0f, then we can
// store it as 0b001111, which takes 6 bits only now.
DeclBits.addBits((uint64_t)D->getModuleOwnershipKind(), /*BitWidth=*/3);
- DeclBits.addBit(D->isReferenced());
+ DeclBits.addBit(D->isThisDeclarationReferenced());
DeclBits.addBit(D->isUsed(false));
DeclBits.addBits(D->getAccess(), /*BitWidth=*/2);
DeclBits.addBit(D->isImplicit());
@@ -601,7 +601,8 @@ void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) {
void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
VisitNamedDecl(D);
Record.AddSourceLocation(D->getBeginLoc());
- Record.AddTypeRef(QualType(D->getTypeForDecl(), 0));
+ if (!isa<TagDecl, TypedefDecl, TypeAliasDecl>(D))
+ Record.AddTypeRef(QualType(D->getTypeForDecl(), 0));
}
void ASTDeclWriter::VisitTypedefNameDecl(TypedefNameDecl *D) {
@@ -2561,7 +2562,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
// TypeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
Abv->Add(BitCodeAbbrevOp(
@@ -2607,7 +2607,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
// TypeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
Abv->Add(BitCodeAbbrevOp(
diff --git a/clang/lib/Serialization/TemplateArgumentHasher.cpp b/clang/lib/Serialization/TemplateArgumentHasher.cpp
index c56138e..3e8ffea 100644
--- a/clang/lib/Serialization/TemplateArgumentHasher.cpp
+++ b/clang/lib/Serialization/TemplateArgumentHasher.cpp
@@ -320,7 +320,7 @@ public:
void VisitMemberPointerType(const MemberPointerType *T) {
AddQualType(T->getPointeeType());
- AddType(T->getQualifier()->getAsType());
+ AddType(T->getQualifier().getAsType());
if (auto *RD = T->getMostRecentCXXRecordDecl())
AddDecl(RD->getCanonicalDecl());
}
@@ -358,7 +358,7 @@ public:
AddQualType(T->getReplacementType());
}
- void VisitTagType(const TagType *T) { AddDecl(T->getDecl()); }
+ void VisitTagType(const TagType *T) { AddDecl(T->getOriginalDecl()); }
void VisitRecordType(const RecordType *T) { VisitTagType(T); }
void VisitEnumType(const EnumType *T) { VisitTagType(T); }
@@ -379,10 +379,6 @@ public:
void VisitTypedefType(const TypedefType *T) { AddDecl(T->getDecl()); }
- void VisitElaboratedType(const ElaboratedType *T) {
- AddQualType(T->getNamedType());
- }
-
void VisitUnaryTransformType(const UnaryTransformType *T) {
AddQualType(T->getUnderlyingType());
AddQualType(T->getBaseType());
diff --git a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index 3b3def7..e64153d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -183,7 +183,8 @@ public:
llvm::errs() << "NewAllocator\n";
}
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const {
if (isCallbackEnabled(C, "Bind"))
llvm::errs() << "Bind\n";
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 837cbbc..921114a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -29,7 +29,8 @@ class BoolAssignmentChecker : public Checker<check::Bind> {
bool IsTainted = false) const;
public:
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
};
} // end anonymous namespace
@@ -55,6 +56,7 @@ static bool isBooleanType(QualType Ty) {
}
void BoolAssignmentChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
+ bool AtDeclInit,
CheckerContext &C) const {
// We are only interested in stores into Booleans.
diff --git a/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 2393564..731e506 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -250,7 +250,7 @@ public:
bool Find(const TypedValueRegion *R) {
QualType T = R->getValueType();
if (const RecordType *RT = T->getAsStructureType()) {
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
assert(RD && "Referred record has no definition");
for (const auto *I : RD->fields()) {
if (I->isUnnamedBitField())
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 0b52c9b..90c6537 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -53,7 +53,7 @@ static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
RecordDecl::field_iterator Iter(RD->field_begin());
RecordDecl::field_iterator End(RD->field_end());
const FieldDecl *Last = nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 350db4b..392c7ee 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -175,9 +175,12 @@ public:
/// \param Loc The value of the location (pointer).
/// \param Val The value which will be stored at the location Loc.
/// \param S The bind is performed while processing the statement S.
+ /// \param AtDeclInit Whether the bind is performed during declaration
+ /// initialization.
///
/// check::Bind
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &) const {}
/// Called after a CFG edge is taken within a function.
///
diff --git a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 152129e..395d724 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -48,7 +48,8 @@ class DereferenceChecker
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
CheckerContext &C) const;
- void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
static void AddDerefSource(raw_ostream &os,
SmallVectorImpl<SourceRange> &Ranges,
@@ -309,7 +310,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
}
void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
// If we're binding to a reference, check if the value is known to be null.
if (V.isUndef())
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 4982cd59..cee744a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -245,7 +245,7 @@ static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
assert(MD);
ASTContext &Ctx = C.getASTContext();
- QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
+ CanQualType Ty = Ctx.getPointerType(Ctx.getCanonicalTagType(MD->getParent()));
ProgramStateRef State = C.getState();
State = setDynamicTypeInfo(State, Region, Ty, /*CanBeSubClassed=*/false);
diff --git a/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
index 355e82e..054b2e9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -148,7 +148,8 @@ void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
// If the isEnumeralType() returned true, then the declaration must exist
// even if it is a stub declaration. It is up to the getDeclValuesForEnum()
// function to handle this.
- const EnumDecl *ED = T->castAs<EnumType>()->getDecl();
+ const EnumDecl *ED =
+ T->castAs<EnumType>()->getOriginalDecl()->getDefinitionOrSelf();
// [[clang::flag_enum]] annotated enums are by definition should be ignored.
if (ED->hasAttr<FlagEnumAttr>())
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index 7ad54c0..7eb9a1d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -150,7 +150,8 @@ public:
IteratorModeling() = default;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPostStmt(const UnaryOperator *UO, CheckerContext &C) const;
void checkPostStmt(const BinaryOperator *BO, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
@@ -234,7 +235,7 @@ void IteratorModeling::checkPostCall(const CallEvent &Call,
}
void IteratorModeling::checkBind(SVal Loc, SVal Val, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
if (Pos) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 1cb3848..828b6f9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -47,9 +47,6 @@ static bool InNamespace(const Decl *D, StringRef NS) {
}
static bool IsStdString(QualType T) {
- if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
- T = QT->getNamedType();
-
const TypedefType *TT = T->getAs<TypedefType>();
if (!TT)
return false;
@@ -201,7 +198,7 @@ static bool IsPartOfAST(const CXXRecordDecl *R) {
for (const auto &BS : R->bases()) {
QualType T = BS.getType();
if (const RecordType *baseT = T->getAs<RecordType>()) {
- CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
+ CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getOriginalDecl());
if (IsPartOfAST(baseD))
return true;
}
@@ -247,7 +244,7 @@ void ASTFieldVisitor::Visit(FieldDecl *D) {
ReportError(T);
if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
for (auto *I : RD->fields())
Visit(I);
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 369d619..efb9809 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -3156,7 +3156,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
SVal ArgSVal = Call.getArgSVal(I);
if (isa<Loc>(ArgSVal)) {
- SymbolRef Sym = ArgSVal.getAsSymbol();
+ SymbolRef Sym = ArgSVal.getAsSymbol(/*IncludeBaseRegions=*/true);
if (!Sym)
continue;
if (checkUseAfterFree(Sym, C, Call.getArgExpr(I)))
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 0aea981..b1a7cd7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -148,7 +148,9 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
QualType T = ArgE->getType();
const RecordType *UT = T->getAsUnionType();
- if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (!UT || !UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>())
continue;
auto CSV = DV->getAs<nonloc::CompoundVal>();
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 7927967..b5e32495 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -114,10 +114,7 @@ bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
if (AT->getAttrKind() == attr::TypeNonNull)
return true;
Ty = AT->getModifiedType();
- } else if (const auto *ET = dyn_cast<ElaboratedType>(T)) {
- const auto *TT = dyn_cast<TypedefType>(ET->getNamedType());
- if (!TT)
- return false;
+ } else if (const auto *TT = dyn_cast<TypedefType>(T)) {
Ty = TT->getDecl()->getUnderlyingType();
// It is sufficient for any intermediate typedef
// to be classified const.
diff --git a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 9744d1a..eeb6b72 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -97,7 +97,8 @@ public:
// libraries.
bool NoDiagnoseCallsToSystemHeaders = false;
- void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPostStmt(const ExplicitCastExpr *CE, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
@@ -1250,7 +1251,7 @@ static bool isARCNilInitializedLocal(CheckerContext &C, const Stmt *S) {
/// Propagate the nullability information through binds and warn when nullable
/// pointer or null symbol is assigned to a pointer with a nonnull type.
void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
const TypedValueRegion *TVR =
dyn_cast_or_null<TypedValueRegion>(L.getAsRegion());
if (!TVR)
diff --git a/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index f217520..68ab22a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -197,9 +197,9 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
BugReporter &BR) const {
// Currently this matches CoreFoundation opaque pointer typedefs.
auto CSuspiciousNumberObjectExprM = expr(ignoringParenImpCasts(
- expr(hasType(elaboratedType(namesType(typedefType(
+ expr(hasType(typedefType(
hasDeclaration(anyOf(typedefDecl(hasName("CFNumberRef")),
- typedefDecl(hasName("CFBooleanRef")))))))))
+ typedefDecl(hasName("CFBooleanRef")))))))
.bind("c_object")));
// Currently this matches XNU kernel number-object pointers.
@@ -238,8 +238,7 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
// The .bind here is in order to compose the error message more accurately.
auto ObjCSuspiciousScalarBooleanTypeM =
- qualType(elaboratedType(namesType(
- typedefType(hasDeclaration(typedefDecl(hasName("BOOL")))))))
+ qualType(typedefType(hasDeclaration(typedefDecl(hasName("BOOL")))))
.bind("objc_bool_type");
// The .bind here is in order to compose the error message more accurately.
@@ -252,8 +251,8 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
// for storing pointers.
auto SuspiciousScalarNumberTypeM =
qualType(hasCanonicalType(isInteger()),
- unless(elaboratedType(namesType(typedefType(hasDeclaration(
- typedefDecl(matchesName("^::u?intptr_t$"))))))))
+ unless(typedefType(
+ hasDeclaration(typedefDecl(matchesName("^::u?intptr_t$"))))))
.bind("int_type");
auto SuspiciousScalarTypeM =
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index ace3426..e40b4f8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -73,7 +73,8 @@ public:
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkLocation(SVal location, bool isLoad, const Stmt *S,
CheckerContext &C) const;
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal loc, SVal val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPreCall(const CallEvent &CE, CheckerContext &C) const;
void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
@@ -311,9 +312,8 @@ void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
C);
}
-
void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
// Allow assignment of anything to self. Self is a local variable in the
// initializer, so it is legal to assign anything to it, like results of
// static functions/method calls. After self is assigned something we cannot
diff --git a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index d4efbdd..7ef6595 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -104,7 +104,7 @@ public:
// There is not enough excess padding to trigger a warning.
return;
}
- reportRecord(RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
+ reportRecord(ASTContext, RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
}
/// Look for arrays of overly padded types. If the padding of the
@@ -123,7 +123,7 @@ public:
return;
// TODO: Recurse into the fields to see if they have excess padding.
- visitRecord(RT->getDecl(), Elts);
+ visitRecord(RT->getOriginalDecl()->getDefinitionOrSelf(), Elts);
}
bool shouldSkipDecl(const RecordDecl *RD) const {
@@ -159,9 +159,7 @@ public:
return true;
// Can't layout a template, so skip it. We do still layout the
// instantiations though.
- if (CXXRD->getTypeForDecl()->isDependentType())
- return true;
- if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
+ if (CXXRD->isDependentType())
return true;
}
// How do you reorder fields if you haven't got any?
@@ -306,14 +304,14 @@ public:
}
void reportRecord(
- const RecordDecl *RD, CharUnits BaselinePad, CharUnits OptimalPad,
+ const ASTContext &Ctx, const RecordDecl *RD, CharUnits BaselinePad,
+ CharUnits OptimalPad,
const SmallVector<const FieldDecl *, 20> &OptimalFieldsOrder) const {
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
Os << "Excessive padding in '";
- Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers(),
- LangOptions())
- << "'";
+ QualType(Ctx.getCanonicalTagType(RD)).print(Os, LangOptions());
+ Os << "'";
if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
// TODO: make this show up better in the console output and in
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 62bc321..1762505 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -840,20 +840,27 @@ ProgramStateRef RetainCountChecker::updateSymbol(ProgramStateRef state,
const RefCountBug &
RetainCountChecker::errorKindToBugKind(RefVal::Kind ErrorKind,
SymbolRef Sym) const {
+ const RefCountFrontend &FE = getPreferredFrontend();
+
switch (ErrorKind) {
case RefVal::ErrorUseAfterRelease:
- return *UseAfterRelease;
+ return FE.UseAfterRelease;
case RefVal::ErrorReleaseNotOwned:
- return *ReleaseNotOwned;
+ return FE.ReleaseNotOwned;
case RefVal::ErrorDeallocNotOwned:
if (Sym->getType()->getPointeeCXXRecordDecl())
- return *FreeNotOwned;
- return *DeallocNotOwned;
+ return FE.FreeNotOwned;
+ return FE.DeallocNotOwned;
default:
llvm_unreachable("Unhandled error.");
}
}
+bool RetainCountChecker::isReleaseUnownedError(RefVal::Kind ErrorKind) const {
+ return ErrorKind == RefVal::ErrorReleaseNotOwned ||
+ ErrorKind == RefVal::ErrorDeallocNotOwned;
+}
+
void RetainCountChecker::processNonLeakError(ProgramStateRef St,
SourceRange ErrorRange,
RefVal::Kind ErrorKind,
@@ -874,8 +881,8 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St,
return;
auto report = std::make_unique<RefCountReport>(
- errorKindToBugKind(ErrorKind, Sym),
- C.getASTContext().getLangOpts(), N, Sym);
+ errorKindToBugKind(ErrorKind, Sym), C.getASTContext().getLangOpts(), N,
+ Sym, /*isLeak=*/false, isReleaseUnownedError(ErrorKind));
report->addRange(ErrorRange);
C.emitReport(std::move(report));
}
@@ -1090,8 +1097,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred);
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
- auto R =
- std::make_unique<RefLeakReport>(*LeakAtReturn, LOpts, N, Sym, C);
+ auto R = std::make_unique<RefLeakReport>(
+ getPreferredFrontend().LeakAtReturn, LOpts, N, Sym, C);
C.emitReport(std::move(R));
}
return N;
@@ -1113,7 +1120,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred);
if (N) {
auto R = std::make_unique<RefCountReport>(
- *ReturnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
+ getPreferredFrontend().ReturnNotOwnedForOwned,
+ C.getASTContext().getLangOpts(), N, Sym);
C.emitReport(std::move(R));
}
return N;
@@ -1128,7 +1136,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
//===----------------------------------------------------------------------===//
void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
- CheckerContext &C) const {
+ bool AtDeclInit, CheckerContext &C) const {
ProgramStateRef state = C.getState();
const MemRegion *MR = loc.getAsRegion();
@@ -1261,8 +1269,8 @@ ProgramStateRef RetainCountChecker::handleAutoreleaseCounts(
os << "has a +" << V.getCount() << " retain count";
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- auto R = std::make_unique<RefCountReport>(*OverAutorelease, LOpts, N, Sym,
- os.str());
+ auto R = std::make_unique<RefCountReport>(
+ getPreferredFrontend().OverAutorelease, LOpts, N, Sym, os.str());
Ctx.emitReport(std::move(R));
}
@@ -1307,8 +1315,10 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
if (N) {
+ const RefCountFrontend &FE = getPreferredFrontend();
+ const RefCountBug &BT = Pred ? FE.LeakWithinFunction : FE.LeakAtReturn;
+
for (SymbolRef L : Leaked) {
- const RefCountBug &BT = Pred ? *LeakWithinFunction : *LeakAtReturn;
Ctx.emitReport(std::make_unique<RefLeakReport>(BT, LOpts, N, L, Ctx));
}
}
@@ -1463,44 +1473,31 @@ std::unique_ptr<SimpleProgramPointTag> RetainCountChecker::DeallocSentTag;
std::unique_ptr<SimpleProgramPointTag> RetainCountChecker::CastFailTag;
void ento::registerRetainCountBase(CheckerManager &Mgr) {
- auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ auto *Chk = Mgr.getChecker<RetainCountChecker>();
Chk->DeallocSentTag = std::make_unique<SimpleProgramPointTag>(
"RetainCountChecker", "DeallocSent");
Chk->CastFailTag = std::make_unique<SimpleProgramPointTag>(
"RetainCountChecker", "DynamicCastFail");
}
-bool ento::shouldRegisterRetainCountBase(const CheckerManager &mgr) {
+bool ento::shouldRegisterRetainCountBase(const CheckerManager &) {
return true;
}
+
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
- Chk->TrackObjCAndCFObjects = true;
+ Chk->RetainCount.enable(Mgr);
Chk->TrackNSCFStartParam = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
Mgr.getCurrentCheckerName(), "TrackNSCFStartParam");
-
-#define INIT_BUGTYPE(KIND) \
- Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
- RefCountBug::KIND);
- // TODO: Ideally, we should have a checker for each of these bug types.
- INIT_BUGTYPE(UseAfterRelease)
- INIT_BUGTYPE(ReleaseNotOwned)
- INIT_BUGTYPE(DeallocNotOwned)
- INIT_BUGTYPE(FreeNotOwned)
- INIT_BUGTYPE(OverAutorelease)
- INIT_BUGTYPE(ReturnNotOwnedForOwned)
- INIT_BUGTYPE(LeakWithinFunction)
- INIT_BUGTYPE(LeakAtReturn)
-#undef INIT_BUGTYPE
}
-bool ento::shouldRegisterRetainCountChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterRetainCountChecker(const CheckerManager &) {
return true;
}
void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
- Chk->TrackOSObjects = true;
+ Chk->OSObjectRetainCount.enable(Mgr);
// FIXME: We want bug reports to always have the same checker name associated
// with them, yet here, if RetainCountChecker is disabled but
@@ -1511,21 +1508,8 @@ void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
// diagnostics, and **hidden checker options** with the fine-tuning of
// modeling. Following this logic, OSObjectRetainCountChecker should be the
// latter, but we can't just remove it for backward compatibility reasons.
-#define LAZY_INIT_BUGTYPE(KIND) \
- if (!Chk->KIND) \
- Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
- RefCountBug::KIND);
- LAZY_INIT_BUGTYPE(UseAfterRelease)
- LAZY_INIT_BUGTYPE(ReleaseNotOwned)
- LAZY_INIT_BUGTYPE(DeallocNotOwned)
- LAZY_INIT_BUGTYPE(FreeNotOwned)
- LAZY_INIT_BUGTYPE(OverAutorelease)
- LAZY_INIT_BUGTYPE(ReturnNotOwnedForOwned)
- LAZY_INIT_BUGTYPE(LeakWithinFunction)
- LAZY_INIT_BUGTYPE(LeakAtReturn)
-#undef LAZY_INIT_BUGTYPE
}
-bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index 0e81143..dc8bad6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -235,51 +235,32 @@ public:
};
class RetainCountChecker
- : public Checker< check::Bind,
- check::DeadSymbols,
- check::BeginFunction,
- check::EndFunction,
- check::PostStmt<BlockExpr>,
- check::PostStmt<CastExpr>,
- check::PostStmt<ObjCArrayLiteral>,
- check::PostStmt<ObjCDictionaryLiteral>,
- check::PostStmt<ObjCBoxedExpr>,
- check::PostStmt<ObjCIvarRefExpr>,
- check::PostCall,
- check::RegionChanges,
- eval::Assume,
- eval::Call > {
+ : public CheckerFamily<
+ check::Bind, check::DeadSymbols, check::BeginFunction,
+ check::EndFunction, check::PostStmt<BlockExpr>,
+ check::PostStmt<CastExpr>, check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCBoxedExpr>, check::PostStmt<ObjCIvarRefExpr>,
+ check::PostCall, check::RegionChanges, eval::Assume, eval::Call> {
public:
- std::unique_ptr<RefCountBug> UseAfterRelease;
- std::unique_ptr<RefCountBug> ReleaseNotOwned;
- std::unique_ptr<RefCountBug> DeallocNotOwned;
- std::unique_ptr<RefCountBug> FreeNotOwned;
- std::unique_ptr<RefCountBug> OverAutorelease;
- std::unique_ptr<RefCountBug> ReturnNotOwnedForOwned;
- std::unique_ptr<RefCountBug> LeakWithinFunction;
- std::unique_ptr<RefCountBug> LeakAtReturn;
+ RefCountFrontend RetainCount;
+ RefCountFrontend OSObjectRetainCount;
mutable std::unique_ptr<RetainSummaryManager> Summaries;
static std::unique_ptr<SimpleProgramPointTag> DeallocSentTag;
static std::unique_ptr<SimpleProgramPointTag> CastFailTag;
- /// Track Objective-C and CoreFoundation objects.
- bool TrackObjCAndCFObjects = false;
-
- /// Track sublcasses of OSObject.
- bool TrackOSObjects = false;
-
/// Track initial parameters (for the entry point) for NS/CF objects.
bool TrackNSCFStartParam = false;
- RetainCountChecker() {};
+ StringRef getDebugTag() const override { return "RetainCountChecker"; }
RetainSummaryManager &getSummaryManager(ASTContext &Ctx) const {
if (!Summaries)
- Summaries.reset(
- new RetainSummaryManager(Ctx, TrackObjCAndCFObjects, TrackOSObjects));
+ Summaries = std::make_unique<RetainSummaryManager>(
+ Ctx, RetainCount.isEnabled(), OSObjectRetainCount.isEnabled());
return *Summaries;
}
@@ -287,10 +268,20 @@ public:
return getSummaryManager(C.getASTContext());
}
+ const RefCountFrontend &getPreferredFrontend() const {
+ // FIXME: The two frontends of this checker family are in an unusual
+ // relationship: if they are both enabled, then all bug reports are
+ // reported by RetainCount (i.e. `osx.cocoa.RetainCount`), even the bugs
+ // that "belong to" OSObjectRetainCount (i.e. `osx.OSObjectRetainCount`).
+ // This is counter-intuitive and should be fixed to avoid confusion.
+ return RetainCount.isEnabled() ? RetainCount : OSObjectRetainCount;
+ }
+
void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const override;
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal loc, SVal val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
@@ -337,6 +328,8 @@ public:
const RefCountBug &errorKindToBugKind(RefVal::Kind ErrorKind,
SymbolRef Sym) const;
+ bool isReleaseUnownedError(RefVal::Kind ErrorKind) const;
+
void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
CheckerContext &C) const;
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index c9f5dc9..cad2c72 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -21,57 +21,6 @@ using namespace clang;
using namespace ento;
using namespace retaincountchecker;
-StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugKind BT) {
- switch (BT) {
- case UseAfterRelease:
- return "Use-after-release";
- case ReleaseNotOwned:
- return "Bad release";
- case DeallocNotOwned:
- return "-dealloc sent to non-exclusively owned object";
- case FreeNotOwned:
- return "freeing non-exclusively owned object";
- case OverAutorelease:
- return "Object autoreleased too many times";
- case ReturnNotOwnedForOwned:
- return "Method should return an owned object";
- case LeakWithinFunction:
- return "Leak";
- case LeakAtReturn:
- return "Leak of returned object";
- }
- llvm_unreachable("Unknown RefCountBugKind");
-}
-
-StringRef RefCountBug::getDescription() const {
- switch (BT) {
- case UseAfterRelease:
- return "Reference-counted object is used after it is released";
- case ReleaseNotOwned:
- return "Incorrect decrement of the reference count of an object that is "
- "not owned at this point by the caller";
- case DeallocNotOwned:
- return "-dealloc sent to object that may be referenced elsewhere";
- case FreeNotOwned:
- return "'free' called on an object that may be referenced elsewhere";
- case OverAutorelease:
- return "Object autoreleased too many times";
- case ReturnNotOwnedForOwned:
- return "Object with a +0 retain count returned to caller where a +1 "
- "(owning) retain count is expected";
- case LeakWithinFunction:
- case LeakAtReturn:
- return "";
- }
- llvm_unreachable("Unknown RefCountBugKind");
-}
-
-RefCountBug::RefCountBug(CheckerNameRef Checker, RefCountBugKind BT)
- : BugType(Checker, bugTypeToName(BT), categories::MemoryRefCount,
- /*SuppressOnSink=*/BT == LeakWithinFunction ||
- BT == LeakAtReturn),
- BT(BT) {}
-
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
return isa<IntegerLiteral, CharacterLiteral, FloatingLiteral,
@@ -312,9 +261,11 @@ namespace retaincountchecker {
class RefCountReportVisitor : public BugReporterVisitor {
protected:
SymbolRef Sym;
+ bool IsReleaseUnowned;
public:
- RefCountReportVisitor(SymbolRef sym) : Sym(sym) {}
+ RefCountReportVisitor(SymbolRef S, bool IRU)
+ : Sym(S), IsReleaseUnowned(IRU) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int x = 0;
@@ -334,7 +285,8 @@ public:
class RefLeakReportVisitor : public RefCountReportVisitor {
public:
RefLeakReportVisitor(SymbolRef Sym, const MemRegion *LastBinding)
- : RefCountReportVisitor(Sym), LastBinding(LastBinding) {}
+ : RefCountReportVisitor(Sym, /*IsReleaseUnowned=*/false),
+ LastBinding(LastBinding) {}
PathDiagnosticPieceRef getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
@@ -452,12 +404,6 @@ annotateStartParameter(const ExplodedNode *N, SymbolRef Sym,
PathDiagnosticPieceRef
RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
-
- const auto &BT = static_cast<const RefCountBug&>(BR.getBugType());
-
- bool IsFreeUnowned = BT.getBugType() == RefCountBug::FreeNotOwned ||
- BT.getBugType() == RefCountBug::DeallocNotOwned;
-
const SourceManager &SM = BRC.getSourceManager();
CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
if (auto CE = N->getLocationAs<CallExitBegin>())
@@ -490,7 +436,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
- if (PrevT && IsFreeUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
+ if (PrevT && IsReleaseUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
os << "Object is now not exclusively owned";
auto Pos = PathDiagnosticLocation::create(N->getLocation(), SM);
return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
@@ -815,10 +761,8 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
if (K == ObjKind::ObjC || K == ObjKind::CF) {
os << "whose name ('" << *FD
<< "') does not contain 'Copy' or 'Create'. This violates the "
- "naming"
- " convention rules given in the Memory Management Guide for "
- "Core"
- " Foundation";
+ "naming convention rules given in the Memory Management Guide "
+ "for Core Foundation";
} else if (RV->getObjKind() == ObjKind::OS) {
std::string FuncName = FD->getNameAsString();
os << "whose name ('" << FuncName << "') starts with '"
@@ -836,19 +780,20 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
- ExplodedNode *n, SymbolRef sym, bool isLeak)
- : PathSensitiveBugReport(D, D.getDescription(), n), Sym(sym),
+ ExplodedNode *n, SymbolRef sym, bool isLeak,
+ bool IsReleaseUnowned)
+ : PathSensitiveBugReport(D, D.getReportMessage(), n), Sym(sym),
isLeak(isLeak) {
if (!isLeak)
- addVisitor<RefCountReportVisitor>(sym);
+ addVisitor<RefCountReportVisitor>(sym, IsReleaseUnowned);
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
StringRef endText)
- : PathSensitiveBugReport(D, D.getDescription(), endText, n) {
+ : PathSensitiveBugReport(D, D.getReportMessage(), endText, n) {
- addVisitor<RefCountReportVisitor>(sym);
+ addVisitor<RefCountReportVisitor>(sym, /*IsReleaseUnowned=*/false);
}
void RefLeakReport::deriveParamLocation(CheckerContext &Ctx) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index d059008..6ceb86f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -25,25 +25,44 @@ namespace ento {
namespace retaincountchecker {
class RefCountBug : public BugType {
+ StringRef ReportMessage;
+
public:
- enum RefCountBugKind {
- UseAfterRelease,
- ReleaseNotOwned,
- DeallocNotOwned,
- FreeNotOwned,
- OverAutorelease,
- ReturnNotOwnedForOwned,
- LeakWithinFunction,
- LeakAtReturn,
- };
- RefCountBug(CheckerNameRef Checker, RefCountBugKind BT);
- StringRef getDescription() const;
-
- RefCountBugKind getBugType() const { return BT; }
-
-private:
- RefCountBugKind BT;
- static StringRef bugTypeToName(RefCountBugKind BT);
+ RefCountBug(const CheckerFrontend *CF, StringRef Desc, StringRef ReportMsg,
+ bool SuppressOnSink = false)
+ : BugType(CF, Desc, categories::MemoryRefCount, SuppressOnSink),
+ ReportMessage(ReportMsg) {}
+ StringRef getReportMessage() const { return ReportMessage; }
+};
+
+class RefCountFrontend : public CheckerFrontend {
+public:
+ const RefCountBug UseAfterRelease{
+ this, "Use-after-release",
+ "Reference-counted object is used after it is released"};
+ const RefCountBug ReleaseNotOwned{
+ this, "Bad release",
+ "Incorrect decrement of the reference count of an object that is not "
+ "owned at this point by the caller"};
+ const RefCountBug DeallocNotOwned{
+ this, "-dealloc sent to non-exclusively owned object",
+ "-dealloc sent to object that may be referenced elsewhere"};
+ const RefCountBug FreeNotOwned{
+ this, "freeing non-exclusively owned object",
+ "'free' called on an object that may be referenced elsewhere"};
+ const RefCountBug OverAutorelease{this, "Object autoreleased too many times",
+ "Object autoreleased too many times"};
+ const RefCountBug ReturnNotOwnedForOwned{
+ this, "Method should return an owned object",
+ "Object with a +0 retain count returned to caller where a +1 (owning) "
+ "retain count is expected"};
+ // For these two bug types the report message will be generated dynamically
+ // by `RefLeakReport::createDescription` so the empty string taken from the
+ // BugType will be ignored (overwritten).
+ const RefCountBug LeakWithinFunction{this, "Leak", /*ReportMsg=*/"",
+ /*SuppressOnSink=*/true};
+ const RefCountBug LeakAtReturn{this, "Leak of returned object",
+ /*ReportMsg=*/"", /*SuppressOnSink=*/true};
};
class RefCountReport : public PathSensitiveBugReport {
@@ -53,8 +72,8 @@ protected:
public:
RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
- ExplodedNode *n, SymbolRef sym,
- bool isLeak=false);
+ ExplodedNode *n, SymbolRef sym, bool isLeak = false,
+ bool IsReleaseUnowned = false);
RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
ExplodedNode *n, SymbolRef sym,
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 52b3d1e..844447f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -1589,7 +1589,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// and we have a TypedefDecl with the name 'FILE'.
for (Decl *D : LookupRes)
if (auto *TD = dyn_cast<TypedefNameDecl>(D))
- return ACtx.getTypeDeclType(TD).getCanonicalType();
+ return ACtx.getCanonicalTypeDeclType(TD);
// Find the first TypeDecl.
// There maybe cases when a function has the same name as a struct.
@@ -1597,7 +1597,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// int stat(const char *restrict path, struct stat *restrict buf);
for (Decl *D : LookupRes)
if (auto *TD = dyn_cast<TypeDecl>(D))
- return ACtx.getTypeDeclType(TD).getCanonicalType();
+ return ACtx.getCanonicalTypeDeclType(TD);
return std::nullopt;
}
} lookupTy(ACtx);
diff --git a/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp
index afad419..2bb3917 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StoreToImmutableChecker.cpp
@@ -26,53 +26,11 @@ class StoreToImmutableChecker : public Checker<check::Bind> {
const BugType BT{this, "Write to immutable memory", "CERT Environment (ENV)"};
public:
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
};
} // end anonymous namespace
-static bool isInitializationContext(const Stmt *S, CheckerContext &C) {
- // Check if this is a DeclStmt (variable declaration)
- if (isa<DeclStmt>(S))
- return true;
-
- // This part is specific for initialization of const lambdas pre-C++17.
- // Lets look at the AST of the statement:
- // ```
- // const auto lambda = [](){};
- // ```
- //
- // The relevant part of the AST for this case prior to C++17 is:
- // ...
- // `-DeclStmt
- // `-VarDecl
- // `-ExprWithCleanups
- // `-CXXConstructExpr
- // ...
- // In C++17 and later, the AST is different:
- // ...
- // `-DeclStmt
- // `-VarDecl
- // `-ImplicitCastExpr
- // `-LambdaExpr
- // |-CXXRecordDecl
- // `-CXXConstructExpr
- // ...
- // And even beside this, the statement `S` that is given to the checkBind
- // callback is the VarDecl in C++17 and later, and the CXXConstructExpr in
- // C++14 and before. So in order to support the C++14 we need the following
- // ugly hack to detect whether this construction is used to initialize a
- // variable.
- //
- // FIXME: This should be eliminated by improving the API of checkBind to
- // ensure that it consistently passes the `VarDecl` (instead of the
- // `CXXConstructExpr`) when the constructor call denotes the initialization
- // of a variable with a lambda, or maybe less preferably, try the more
- // invasive approach of passing the information forward to the checkers
- // whether the current bind is an initialization or an assignment.
- const auto *ConstructExp = dyn_cast<CXXConstructExpr>(S);
- return ConstructExp && ConstructExp->isElidable();
-}
-
static bool isEffectivelyConstRegion(const MemRegion *MR, CheckerContext &C) {
if (isa<GlobalImmutableSpaceRegion>(MR))
return true;
@@ -128,6 +86,7 @@ getInnermostEnclosingConstDeclRegion(const MemRegion *MR, CheckerContext &C) {
}
void StoreToImmutableChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
+ bool AtDeclInit,
CheckerContext &C) const {
// We are only interested in stores to memory regions
const MemRegion *MR = Loc.getAsRegion();
@@ -136,9 +95,7 @@ void StoreToImmutableChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
// Skip variable declarations and initializations - we only want to catch
// actual writes
- // FIXME: If the API of checkBind would allow to distinguish between
- // initialization and assignment, we could use that instead.
- if (isInitializationContext(S, C))
+ if (AtDeclInit)
return;
// Check if the region is in the global immutable space
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index e98de33..7f8923c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -26,13 +26,13 @@ class UndefinedAssignmentChecker
const BugType BT{this, "Assigned value is uninitialized"};
public:
- void checkBind(SVal location, SVal val, const Stmt *S,
+ void checkBind(SVal location, SVal val, const Stmt *S, bool AtDeclInit,
CheckerContext &C) const;
};
}
void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
- const Stmt *StoreE,
+ const Stmt *StoreE, bool AtDeclInit,
CheckerContext &C) const {
if (!val.isUndef())
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index cb73ac6..116dd93 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -62,7 +62,8 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
};
@@ -188,7 +189,7 @@ void VforkChecker::checkPreCall(const CallEvent &Call,
}
// Prohibit writes in child process (except for vfork's lhs).
-void VforkChecker::checkBind(SVal L, SVal V, const Stmt *S,
+void VforkChecker::checkBind(SVal L, SVal V, const Stmt *S, bool AtDeclInit,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
if (!isChildProcess(State))
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index 72199af..36c1258 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -181,10 +181,6 @@ template <typename Predicate>
static bool isPtrOfType(const clang::QualType T, Predicate Pred) {
QualType type = T;
while (!type.isNull()) {
- if (auto *elaboratedT = type->getAs<ElaboratedType>()) {
- type = elaboratedT->desugar();
- continue;
- }
if (auto *SpecialT = type->getAs<TemplateSpecializationType>()) {
auto *Decl = SpecialT->getTemplateName().getAsTemplateDecl();
return Decl && Pred(Decl->getNameAsString());
@@ -248,13 +244,15 @@ void RetainTypeChecker::visitTypedef(const TypedefDecl *TD) {
const RecordType *RT = PointeeQT->getAs<RecordType>();
if (!RT) {
if (TD->hasAttr<ObjCBridgeAttr>() || TD->hasAttr<ObjCBridgeMutableAttr>()) {
- if (auto *Type = TD->getTypeForDecl())
- RecordlessTypes.insert(Type);
+ RecordlessTypes.insert(TD->getASTContext()
+ .getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, TD)
+ .getTypePtr());
}
return;
}
- for (auto *Redecl : RT->getDecl()->getMostRecentDecl()->redecls()) {
+ for (auto *Redecl : RT->getOriginalDecl()->getMostRecentDecl()->redecls()) {
if (Redecl->getAttr<ObjCBridgeAttr>() ||
Redecl->getAttr<ObjCBridgeMutableAttr>()) {
CFPointees.insert(RT);
@@ -266,21 +264,10 @@ void RetainTypeChecker::visitTypedef(const TypedefDecl *TD) {
bool RetainTypeChecker::isUnretained(const QualType QT, bool ignoreARC) {
if (ento::cocoa::isCocoaObjectRef(QT) && (!IsARCEnabled || ignoreARC))
return true;
- auto CanonicalType = QT.getCanonicalType();
- auto PointeeType = CanonicalType->getPointeeType();
- auto *RT = dyn_cast_or_null<RecordType>(PointeeType.getTypePtrOrNull());
- if (!RT) {
- auto *Type = QT.getTypePtrOrNull();
- while (Type) {
- if (RecordlessTypes.contains(Type))
- return true;
- auto *ET = dyn_cast_or_null<ElaboratedType>(Type);
- if (!ET)
- break;
- Type = ET->desugar().getTypePtrOrNull();
- }
- }
- return RT && CFPointees.contains(RT);
+ if (auto *RT = dyn_cast_or_null<RecordType>(
+ QT.getCanonicalType()->getPointeeType().getTypePtrOrNull()))
+ return CFPointees.contains(RT);
+ return RecordlessTypes.contains(QT.getTypePtr());
}
std::optional<bool> isUnretained(const QualType T, bool IsARCEnabled) {
@@ -306,7 +293,7 @@ std::optional<bool> isUnretained(const QualType T, bool IsARCEnabled) {
auto *Record = PointeeType->getAsStructureType();
if (!Record)
return false;
- auto *Decl = Record->getDecl();
+ auto *Decl = Record->getOriginalDecl();
if (!Decl)
return false;
auto TypeName = Decl->getName();
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
index 98c587d..6f3a280 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -113,10 +113,6 @@ public:
auto CastType = Cast->getType();
if (auto *PtrType = dyn_cast<PointerType>(CastType)) {
auto PointeeType = PtrType->getPointeeType();
- while (auto *ET = dyn_cast<ElaboratedType>(PointeeType)) {
- if (ET->isSugared())
- PointeeType = ET->desugar();
- }
if (auto *ParmType = dyn_cast<TemplateTypeParmType>(PointeeType)) {
if (ArgList) {
auto ParmIndex = ParmType->getIndex();
@@ -125,13 +121,13 @@ public:
return true;
}
} else if (auto *RD = dyn_cast<RecordType>(PointeeType)) {
- if (RD->getDecl() == ClassDecl)
+ if (declaresSameEntity(RD->getOriginalDecl(), ClassDecl))
return true;
} else if (auto *ST =
dyn_cast<SubstTemplateTypeParmType>(PointeeType)) {
auto Type = ST->getReplacementType();
if (auto *RD = dyn_cast<RecordType>(Type)) {
- if (RD->getDecl() == ClassDecl)
+ if (declaresSameEntity(RD->getOriginalDecl(), ClassDecl))
return true;
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 34fcb9b..180056c 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -89,7 +89,7 @@ static bool isCallback(QualType T) {
T = T->getPointeeType();
if (const RecordType *RT = T->getAsStructureType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (const auto *I : RD->fields()) {
QualType FieldT = I->getType();
if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
@@ -391,7 +391,9 @@ bool CallEvent::isVariadic(const Decl *D) {
static bool isTransparentUnion(QualType T) {
const RecordType *UT = T->getAsUnionType();
- return UT && UT->getDecl()->hasAttr<TransparentUnionAttr>();
+ return UT && UT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<TransparentUnionAttr>();
}
// In some cases, symbolic cases should be transformed before we associate
@@ -843,7 +845,7 @@ void CXXInstanceCall::getInitialStackFrameContents(
if (MD->getCanonicalDecl() != getDecl()->getCanonicalDecl()) {
ASTContext &Ctx = SVB.getContext();
const CXXRecordDecl *Class = MD->getParent();
- QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
+ CanQualType Ty = Ctx.getPointerType(Ctx.getCanonicalTagType(Class));
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
std::optional<SVal> V =
@@ -854,7 +856,8 @@ void CXXInstanceCall::getInitialStackFrameContents(
// Fall back to a generic pointer cast for this-value.
const CXXMethodDecl *StaticMD = cast<CXXMethodDecl>(getDecl());
const CXXRecordDecl *StaticClass = StaticMD->getParent();
- QualType StaticTy = Ctx.getPointerType(Ctx.getRecordType(StaticClass));
+ CanQualType StaticTy =
+ Ctx.getPointerType(Ctx.getCanonicalTagType(StaticClass));
ThisVal = SVB.evalCast(ThisVal, Ty, StaticTy);
} else
ThisVal = *V;
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 0fe677e..44c6f9f 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -376,11 +376,13 @@ namespace {
const Stmt *S;
ExprEngine &Eng;
const ProgramPoint &PP;
+ bool AtDeclInit;
- CheckBindContext(const CheckersTy &checkers,
- SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
+ CheckBindContext(const CheckersTy &checkers, SVal loc, SVal val,
+ const Stmt *s, bool AtDeclInit, ExprEngine &eng,
const ProgramPoint &pp)
- : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp),
+ AtDeclInit(AtDeclInit) {}
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
@@ -391,7 +393,7 @@ namespace {
const ProgramPoint &L = PP.withTag(checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L);
- checkFn(Loc, Val, S, C);
+ checkFn(Loc, Val, S, AtDeclInit, C);
}
};
@@ -408,10 +410,10 @@ namespace {
/// Run checkers for binding of a value to a location.
void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- SVal location, SVal val,
- const Stmt *S, ExprEngine &Eng,
+ SVal location, SVal val, const Stmt *S,
+ bool AtDeclInit, ExprEngine &Eng,
const ProgramPoint &PP) {
- CheckBindContext C(BindCheckers, location, val, S, Eng, PP);
+ CheckBindContext C(BindCheckers, location, val, S, AtDeclInit, Eng, PP);
llvm::TimeTraceScope TimeScope{
"CheckerManager::runCheckersForBind",
[&val]() { return getTimeTraceBindMetadata(val); }};
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index d874844..c853c00 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -3714,9 +3714,8 @@ ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore and (soon) VisitDeclStmt, and others.
void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
- ExplodedNode *Pred,
- SVal location, SVal Val,
- bool atDeclInit, const ProgramPoint *PP) {
+ ExplodedNode *Pred, SVal location, SVal Val,
+ bool AtDeclInit, const ProgramPoint *PP) {
const LocationContext *LC = Pred->getLocationContext();
PostStmt PS(StoreE, LC);
if (!PP)
@@ -3725,7 +3724,7 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// Do a previsit of the bind.
ExplodedNodeSet CheckedSet;
getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
- StoreE, *this, *PP);
+ StoreE, AtDeclInit, *this, *PP);
StmtNodeBuilder Bldr(CheckedSet, Dst, *currBldrCtx);
@@ -3748,8 +3747,8 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// When binding the value, pass on the hint that this is a initialization.
// For initializations, we do not need to inform clients of region
// changes.
- state = state->bindLoc(location.castAs<Loc>(),
- Val, LC, /* notifyChanges = */ !atDeclInit);
+ state = state->bindLoc(location.castAs<Loc>(), Val, LC,
+ /* notifyChanges = */ !AtDeclInit);
const MemRegion *LocReg = nullptr;
if (std::optional<loc::MemRegionVal> LocRegVal =
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index fe70558..c0b28d2 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -85,7 +85,7 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
evalLocation(Tmp, CallExpr, VExpr, Pred, Pred->getState(), V,
/*isLoad=*/true);
for (ExplodedNode *N : Tmp)
- evalBind(Dst, CallExpr, N, ThisVal, V, true);
+ evalBind(Dst, CallExpr, N, ThisVal, V, !AlwaysReturnsLValue);
PostStmt PS(CallExpr, LCtx);
for (ExplodedNode *N : Dst) {
diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 0058a0d..5f27196 100644
--- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -192,11 +192,11 @@ QualType ObjCIvarRegion::getValueType() const {
}
QualType CXXBaseObjectRegion::getValueType() const {
- return QualType(getDecl()->getTypeForDecl(), 0);
+ return getContext().getCanonicalTagType(getDecl());
}
QualType CXXDerivedObjectRegion::getValueType() const {
- return QualType(getDecl()->getTypeForDecl(), 0);
+ return getContext().getCanonicalTagType(getDecl());
}
QualType ParamVarRegion::getValueType() const {
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 388034b..02375b0c 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -2453,7 +2453,8 @@ NonLoc RegionStoreManager::createLazyBinding(RegionBindingsConstRef B,
SVal RegionStoreManager::getBindingForStruct(RegionBindingsConstRef B,
const TypedValueRegion *R) {
- const RecordDecl *RD = R->getValueType()->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ R->getValueType()->castAs<RecordType>()->getOriginalDecl();
if (!RD->getDefinition())
return UnknownVal();
@@ -2844,7 +2845,7 @@ RegionStoreManager::bindStruct(LimitedRegionBindingsConstRef B,
assert(T->isStructureOrClassType());
const RecordType* RT = T->castAs<RecordType>();
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (!RD->isCompleteDefinition())
return B;
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 2276c45..a6f4463 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -320,8 +320,8 @@ loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
/// Return a memory region for the 'this' object reference.
loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
const StackFrameContext *SFC) {
- const Type *T = D->getTypeForDecl();
- QualType PT = getContext().getPointerType(QualType(T, 0));
+ CanQualType PT =
+ getContext().getPointerType(getContext().getCanonicalTagType(D));
return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
}
diff --git a/clang/lib/Tooling/ASTDiff/ASTDiff.cpp b/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
index 5f7153c..d70a679 100644
--- a/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -246,7 +246,7 @@ struct PreorderVisitor : public RecursiveASTVisitor<PreorderVisitor> {
PostTraverse(SavedState);
return true;
}
- bool TraverseType(QualType T) { return true; }
+ bool TraverseType(QualType T, bool TraverseQualifier = true) { return true; }
bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
if (isNodeExcluded(Tree.AST.getSourceManager(), Init))
return true;
@@ -428,11 +428,12 @@ std::string SyntaxTree::Impl::getDeclValue(const Decl *D) const {
Value += getRelativeName(N) + ";";
if (auto *T = dyn_cast<TypedefNameDecl>(D))
return Value + T->getUnderlyingType().getAsString(TypePP) + ";";
- if (auto *T = dyn_cast<TypeDecl>(D))
- if (T->getTypeForDecl())
- Value +=
- T->getTypeForDecl()->getCanonicalTypeInternal().getAsString(TypePP) +
- ";";
+ if (auto *T = dyn_cast<TypeDecl>(D)) {
+ const ASTContext &Ctx = T->getASTContext();
+ Value +=
+ Ctx.getTypeDeclType(T)->getCanonicalTypeInternal().getAsString(TypePP) +
+ ";";
+ }
if (auto *U = dyn_cast<UsingDirectiveDecl>(D))
return std::string(U->getNominatedNamespace()->getName());
if (auto *A = dyn_cast<AccessSpecDecl>(D)) {
diff --git a/clang/lib/Tooling/Refactoring/Lookup.cpp b/clang/lib/Tooling/Refactoring/Lookup.cpp
index 757fba0..dedde86 100644
--- a/clang/lib/Tooling/Refactoring/Lookup.cpp
+++ b/clang/lib/Tooling/Refactoring/Lookup.cpp
@@ -108,16 +108,6 @@ static StringRef getBestNamespaceSubstr(const DeclContext *DeclA,
}
}
-/// Check if the name specifier begins with a written "::".
-static bool isFullyQualified(const NestedNameSpecifier *NNS) {
- while (NNS) {
- if (NNS->getKind() == NestedNameSpecifier::Global)
- return true;
- NNS = NNS->getPrefix();
- }
- return false;
-}
-
// Adds more scope specifier to the spelled name until the spelling is not
// ambiguous. A spelling is ambiguous if the resolution of the symbol is
// ambiguous. For example, if QName is "::y::bar", the spelling is "y::bar", and
@@ -182,7 +172,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
return Disambiguated;
}
-std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
+std::string tooling::replaceNestedName(NestedNameSpecifier Use,
SourceLocation UseLoc,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
@@ -217,7 +207,7 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
// We work backwards (from most specific possible namespace to least
// specific).
StringRef Suggested = getBestNamespaceSubstr(UseContext, ReplacementString,
- isFullyQualified(Use));
+ Use.isFullyQualified());
return disambiguateSpellingInScope(Suggested, ReplacementString, *UseContext,
UseLoc);
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index 8eff778..d944411 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -107,45 +107,83 @@ private:
};
SourceLocation StartLocationForType(TypeLoc TL) {
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
+
// For elaborated types (e.g. `struct a::A`) we want the portion after the
- // `struct` but including the namespace qualifier, `a::`.
- if (auto ElaboratedTypeLoc = TL.getAs<clang::ElaboratedTypeLoc>()) {
- NestedNameSpecifierLoc NestedNameSpecifier =
- ElaboratedTypeLoc.getQualifierLoc();
- if (NestedNameSpecifier.getNestedNameSpecifier())
- return NestedNameSpecifier.getBeginLoc();
- TL = TL.getNextTypeLoc();
+ // `struct`, including the namespace qualifier, `a::`.
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ case TypeLoc::Enum: {
+ auto TTL = TL.castAs<TagTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::Typedef: {
+ auto TTL = TL.castAs<TypedefTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::UnresolvedUsing: {
+ auto TTL = TL.castAs<UnresolvedUsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::Using: {
+ auto TTL = TL.castAs<UsingTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::TemplateSpecialization: {
+ auto TTL = TL.castAs<TemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getTemplateNameLoc();
+ }
+ case TypeLoc::DeducedTemplateSpecialization: {
+ auto DTL = TL.castAs<clang::DeducedTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = DTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return DTL.getTemplateNameLoc();
+ }
+ case TypeLoc::DependentName: {
+ auto TTL = TL.castAs<DependentNameTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getNameLoc();
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto TTL = TL.castAs<DependentTemplateSpecializationTypeLoc>();
+ if (NestedNameSpecifierLoc QualifierLoc = TTL.getQualifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return TTL.getTemplateNameLoc();
+ }
+ default:
+ llvm_unreachable("unhandled TypeLoc class");
}
- return TL.getBeginLoc();
}
SourceLocation EndLocationForType(TypeLoc TL) {
- // Dig past any namespace or keyword qualifications.
- while (TL.getTypeLocClass() == TypeLoc::Elaborated ||
- TL.getTypeLocClass() == TypeLoc::Qualified)
- TL = TL.getNextTypeLoc();
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
// The location for template specializations (e.g. Foo<int>) includes the
// templated types in its location range. We want to restrict this to just
// before the `<` character.
- if (TL.getTypeLocClass() == TypeLoc::TemplateSpecialization) {
- return TL.castAs<TemplateSpecializationTypeLoc>()
- .getLAngleLoc()
- .getLocWithOffset(-1);
- }
+ if (auto TTL = TL.getAs<TemplateSpecializationTypeLoc>())
+ return TTL.getLAngleLoc().getLocWithOffset(-1);
return TL.getEndLoc();
}
-NestedNameSpecifier *GetNestedNameForType(TypeLoc TL) {
- // Dig past any keyword qualifications.
- while (TL.getTypeLocClass() == TypeLoc::Qualified)
- TL = TL.getNextTypeLoc();
-
- // For elaborated types (e.g. `struct a::A`) we want the portion after the
- // `struct` but including the namespace qualifier, `a::`.
- if (auto ElaboratedTypeLoc = TL.getAs<clang::ElaboratedTypeLoc>())
- return ElaboratedTypeLoc.getQualifierLoc().getNestedNameSpecifier();
- return nullptr;
+NestedNameSpecifier GetNestedNameForType(TypeLoc TL) {
+ if (auto QTL = TL.getAs<QualifiedTypeLoc>())
+ TL = QTL.getUnqualifiedLoc();
+ return TL.getPrefix().getNestedNameSpecifier();
}
// Find all locations identified by the given USRs for rename.
@@ -168,14 +206,14 @@ public:
const NamedDecl *FromDecl;
// The declaration in which the nested name is contained (can be nullptr).
const Decl *Context;
- // The nested name being replaced (can be nullptr).
- const NestedNameSpecifier *Specifier;
+ // The nested name being replaced.
+ NestedNameSpecifier Specifier;
// Determine whether the prefix qualifiers of the NewName should be ignored.
// Normally, we set it to true for the symbol declaration and definition to
// avoid adding prefix qualifiers.
// For example, if it is true and NewName is "a::b::foo", then the symbol
// occurrence which the RenameInfo points to will be renamed to "foo".
- bool IgnorePrefixQualifers;
+ bool IgnorePrefixQualifiers;
};
bool VisitNamedDecl(const NamedDecl *Decl) {
@@ -203,8 +241,8 @@ public:
EndLoc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
- /*IgnorePrefixQualifers=*/true};
+ /*Specifier=*/std::nullopt,
+ /*IgnorePrefixQualifiers=*/true};
RenameInfos.push_back(Info);
}
}
@@ -217,10 +255,10 @@ public:
auto EndLoc = Expr->getMemberLoc();
if (isInUSRSet(Decl)) {
RenameInfos.push_back({StartLoc, EndLoc,
- /*FromDecl=*/nullptr,
- /*Context=*/nullptr,
- /*Specifier=*/nullptr,
- /*IgnorePrefixQualifiers=*/true});
+ /*FromDecl=*/nullptr,
+ /*Context=*/nullptr,
+ /*Specifier=*/std::nullopt,
+ /*IgnorePrefixQualifiers=*/true});
}
return true;
}
@@ -235,7 +273,7 @@ public:
RenameInfos.push_back({StartLoc, EndLoc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
+ /*Specifier=*/std::nullopt,
/*IgnorePrefixQualifiers=*/true});
}
}
@@ -257,7 +295,7 @@ public:
RenameInfos.push_back({Loc, Loc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
+ /*Specifier=*/std::nullopt,
/*IgnorePrefixQualifiers=*/true});
}
}
@@ -288,7 +326,7 @@ public:
RenameInfos.push_back({EndLoc, EndLoc,
/*FromDecl=*/nullptr,
/*Context=*/nullptr,
- /*Specifier=*/nullptr,
+ /*Specifier=*/std::nullopt,
/*IgnorePrefixQualifiers=*/true});
return true;
}
@@ -332,7 +370,7 @@ public:
Decl,
getClosestAncestorDecl(*Expr),
Expr->getQualifier(),
- /*IgnorePrefixQualifers=*/false};
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
@@ -350,18 +388,18 @@ public:
}
bool VisitNestedNameSpecifierLocations(NestedNameSpecifierLoc NestedLoc) {
- if (!NestedLoc.getNestedNameSpecifier()->getAsType())
+ TypeLoc TL = NestedLoc.getAsTypeLoc();
+ if (!TL)
return true;
- if (const auto *TargetDecl =
- getSupportedDeclFromTypeLoc(NestedLoc.getTypeLoc())) {
+ if (const auto *TargetDecl = getSupportedDeclFromTypeLoc(TL)) {
if (isInUSRSet(TargetDecl)) {
RenameInfo Info = {NestedLoc.getBeginLoc(),
- EndLocationForType(NestedLoc.getTypeLoc()),
+ EndLocationForType(TL),
TargetDecl,
getClosestAncestorDecl(NestedLoc),
- NestedLoc.getNestedNameSpecifier()->getPrefix(),
- /*IgnorePrefixQualifers=*/false};
+ /*Specifier=*/std::nullopt,
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
}
@@ -411,7 +449,7 @@ public:
TargetDecl,
getClosestAncestorDecl(Loc),
GetNestedNameForType(Loc),
- /*IgnorePrefixQualifers=*/false};
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
return true;
@@ -421,33 +459,17 @@ public:
// Handle specific template class specialiation cases.
if (const auto *TemplateSpecType =
dyn_cast<TemplateSpecializationType>(Loc.getType())) {
- TypeLoc TargetLoc = Loc;
- if (!ParentTypeLoc.isNull()) {
- if (llvm::isa<ElaboratedType>(ParentTypeLoc.getType()))
- TargetLoc = ParentTypeLoc;
- }
-
if (isInUSRSet(TemplateSpecType->getTemplateName().getAsTemplateDecl())) {
- TypeLoc TargetLoc = Loc;
- // FIXME: Find a better way to handle this case.
- // For the qualified template class specification type like
- // "ns::Foo<int>" in "ns::Foo<int>& f();", we want the parent typeLoc
- // (ElaboratedType) of the TemplateSpecializationType in order to
- // catch the prefix qualifiers "ns::".
- if (!ParentTypeLoc.isNull() &&
- llvm::isa<ElaboratedType>(ParentTypeLoc.getType()))
- TargetLoc = ParentTypeLoc;
-
- auto StartLoc = StartLocationForType(TargetLoc);
- auto EndLoc = EndLocationForType(TargetLoc);
+ auto StartLoc = StartLocationForType(Loc);
+ auto EndLoc = EndLocationForType(Loc);
if (IsValidEditLoc(Context.getSourceManager(), StartLoc)) {
RenameInfo Info = {
StartLoc,
EndLoc,
TemplateSpecType->getTemplateName().getAsTemplateDecl(),
- getClosestAncestorDecl(DynTypedNode::create(TargetLoc)),
- GetNestedNameForType(TargetLoc),
- /*IgnorePrefixQualifers=*/false};
+ getClosestAncestorDecl(DynTypedNode::create(Loc)),
+ GetNestedNameForType(Loc),
+ /*IgnorePrefixQualifiers=*/false};
RenameInfos.push_back(Info);
}
}
@@ -469,12 +491,7 @@ private:
const NamedDecl *getSupportedDeclFromTypeLoc(TypeLoc Loc) {
if (const auto* TT = Loc.getType()->getAs<clang::TypedefType>())
return TT->getDecl();
- if (const auto *RD = Loc.getType()->getAsCXXRecordDecl())
- return RD;
- if (const auto *ED =
- llvm::dyn_cast_or_null<EnumDecl>(Loc.getType()->getAsTagDecl()))
- return ED;
- return nullptr;
+ return Loc.getType()->getAsTagDecl();
}
// Get the closest ancester which is a declaration of a given AST node.
@@ -549,7 +566,7 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
for (const auto &RenameInfo : Finder.getRenameInfos()) {
std::string ReplacedName = NewName.str();
- if (RenameInfo.IgnorePrefixQualifers) {
+ if (RenameInfo.IgnorePrefixQualifiers) {
// Get the name without prefix qualifiers from NewName.
size_t LastColonPos = NewName.find_last_of(':');
if (LastColonPos != std::string::npos)
diff --git a/clang/lib/Tooling/Syntax/BuildTree.cpp b/clang/lib/Tooling/Syntax/BuildTree.cpp
index eb9fa7a..546161c 100644
--- a/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -918,97 +918,91 @@ public:
return true;
}
- // FIXME: Fix `NestedNameSpecifierLoc::getLocalSourceRange` for the
- // `DependentTemplateSpecializationType` case.
- /// Given a nested-name-specifier return the range for the last name
- /// specifier.
- ///
- /// e.g. `std::T::template X<U>::` => `template X<U>::`
- SourceRange getLocalSourceRange(const NestedNameSpecifierLoc &NNSLoc) {
- auto SR = NNSLoc.getLocalSourceRange();
-
- // The method `NestedNameSpecifierLoc::getLocalSourceRange` *should*
- // return the desired `SourceRange`, but there is a corner case. For a
- // `DependentTemplateSpecializationType` this method returns its
- // qualifiers as well, in other words in the example above this method
- // returns `T::template X<U>::` instead of only `template X<U>::`
- if (auto TL = NNSLoc.getTypeLoc()) {
- if (auto DependentTL =
- TL.getAs<DependentTemplateSpecializationTypeLoc>()) {
- // The 'template' keyword is always present in dependent template
- // specializations. Except in the case of incorrect code
- // TODO: Treat the case of incorrect code.
- SR.setBegin(DependentTL.getTemplateKeywordLoc());
- }
- }
-
- return SR;
- }
-
- syntax::NodeKind getNameSpecifierKind(const NestedNameSpecifier &NNS) {
- switch (NNS.getKind()) {
- case NestedNameSpecifier::Global:
- return syntax::NodeKind::GlobalNameSpecifier;
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::Identifier:
- return syntax::NodeKind::IdentifierNameSpecifier;
- case NestedNameSpecifier::TypeSpec: {
- const auto *NNSType = NNS.getAsType();
- assert(NNSType);
- if (isa<DecltypeType>(NNSType))
- return syntax::NodeKind::DecltypeNameSpecifier;
- if (isa<TemplateSpecializationType, DependentTemplateSpecializationType>(
- NNSType))
- return syntax::NodeKind::SimpleTemplateNameSpecifier;
- return syntax::NodeKind::IdentifierNameSpecifier;
- }
- default:
- // FIXME: Support Microsoft's __super
- llvm::report_fatal_error("We don't yet support the __super specifier",
- true);
- }
+ syntax::NameSpecifier *buildIdentifier(SourceRange SR,
+ bool DropBack = false) {
+ auto NameSpecifierTokens = Builder.getRange(SR).drop_back(DropBack);
+ assert(NameSpecifierTokens.size() == 1);
+ Builder.markChildToken(NameSpecifierTokens.begin(),
+ syntax::NodeRole::Unknown);
+ auto *NS = new (allocator()) syntax::IdentifierNameSpecifier;
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
+ }
+
+ syntax::NameSpecifier *buildSimpleTemplateName(SourceRange SR) {
+ auto NameSpecifierTokens = Builder.getRange(SR);
+ // TODO: Build `SimpleTemplateNameSpecifier` children and implement
+ // accessors to them.
+ // Be aware, we cannot do that simply by calling `TraverseTypeLoc`,
+ // some `TypeLoc`s have inside them the previous name specifier and
+ // we want to treat them independently.
+ auto *NS = new (allocator()) syntax::SimpleTemplateNameSpecifier;
+ Builder.foldNode(NameSpecifierTokens, NS, nullptr);
+ return NS;
}
syntax::NameSpecifier *
buildNameSpecifier(const NestedNameSpecifierLoc &NNSLoc) {
assert(NNSLoc.hasQualifier());
- auto NameSpecifierTokens =
- Builder.getRange(getLocalSourceRange(NNSLoc)).drop_back();
- switch (getNameSpecifierKind(*NNSLoc.getNestedNameSpecifier())) {
- case syntax::NodeKind::GlobalNameSpecifier:
+ switch (NNSLoc.getNestedNameSpecifier().getKind()) {
+ case NestedNameSpecifier::Kind::Global:
return new (allocator()) syntax::GlobalNameSpecifier;
- case syntax::NodeKind::IdentifierNameSpecifier: {
- assert(NameSpecifierTokens.size() == 1);
- Builder.markChildToken(NameSpecifierTokens.begin(),
- syntax::NodeRole::Unknown);
- auto *NS = new (allocator()) syntax::IdentifierNameSpecifier;
- Builder.foldNode(NameSpecifierTokens, NS, nullptr);
- return NS;
- }
- case syntax::NodeKind::SimpleTemplateNameSpecifier: {
- // TODO: Build `SimpleTemplateNameSpecifier` children and implement
- // accessors to them.
- // Be aware, we cannot do that simply by calling `TraverseTypeLoc`,
- // some `TypeLoc`s have inside them the previous name specifier and
- // we want to treat them independently.
- auto *NS = new (allocator()) syntax::SimpleTemplateNameSpecifier;
- Builder.foldNode(NameSpecifierTokens, NS, nullptr);
- return NS;
- }
- case syntax::NodeKind::DecltypeNameSpecifier: {
- const auto TL = NNSLoc.getTypeLoc().castAs<DecltypeTypeLoc>();
- if (!RecursiveASTVisitor::TraverseDecltypeTypeLoc(TL))
- return nullptr;
- auto *NS = new (allocator()) syntax::DecltypeNameSpecifier;
- // TODO: Implement accessor to `DecltypeNameSpecifier` inner
- // `DecltypeTypeLoc`.
- // For that add mapping from `TypeLoc` to `syntax::Node*` then:
- // Builder.markChild(TypeLoc, syntax::NodeRole);
- Builder.foldNode(NameSpecifierTokens, NS, nullptr);
- return NS;
+
+ case NestedNameSpecifier::Kind::Namespace:
+ return buildIdentifier(NNSLoc.getLocalSourceRange(), /*DropBack=*/true);
+
+ case NestedNameSpecifier::Kind::Type: {
+ TypeLoc TL = NNSLoc.castAsTypeLoc();
+ switch (TL.getTypeLocClass()) {
+ case TypeLoc::Record:
+ case TypeLoc::InjectedClassName:
+ case TypeLoc::Enum:
+ return buildIdentifier(TL.castAs<TagTypeLoc>().getNameLoc());
+ case TypeLoc::Typedef:
+ return buildIdentifier(TL.castAs<TypedefTypeLoc>().getNameLoc());
+ case TypeLoc::UnresolvedUsing:
+ return buildIdentifier(
+ TL.castAs<UnresolvedUsingTypeLoc>().getNameLoc());
+ case TypeLoc::Using:
+ return buildIdentifier(TL.castAs<UsingTypeLoc>().getNameLoc());
+ case TypeLoc::DependentName:
+ return buildIdentifier(TL.castAs<DependentNameTypeLoc>().getNameLoc());
+ case TypeLoc::TemplateSpecialization: {
+ auto TST = TL.castAs<TemplateSpecializationTypeLoc>();
+ SourceLocation BeginLoc = TST.getTemplateKeywordLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = TST.getTemplateNameLoc();
+ return buildSimpleTemplateName({BeginLoc, TST.getEndLoc()});
+ }
+ case TypeLoc::DependentTemplateSpecialization: {
+ auto DT = TL.castAs<DependentTemplateSpecializationTypeLoc>();
+ SourceLocation BeginLoc = DT.getTemplateKeywordLoc();
+ if (BeginLoc.isInvalid())
+ BeginLoc = DT.getTemplateNameLoc();
+ return buildSimpleTemplateName({BeginLoc, DT.getEndLoc()});
+ }
+ case TypeLoc::Decltype: {
+ const auto DTL = TL.castAs<DecltypeTypeLoc>();
+ if (!RecursiveASTVisitor::TraverseDecltypeTypeLoc(
+ DTL, /*TraverseQualifier=*/true))
+ return nullptr;
+ auto *NS = new (allocator()) syntax::DecltypeNameSpecifier;
+ // TODO: Implement accessor to `DecltypeNameSpecifier` inner
+ // `DecltypeTypeLoc`.
+ // For that add mapping from `TypeLoc` to `syntax::Node*` then:
+ // Builder.markChild(TypeLoc, syntax::NodeRole);
+ Builder.foldNode(Builder.getRange(DTL.getLocalSourceRange()), NS,
+ nullptr);
+ return NS;
+ }
+ default:
+ return buildIdentifier(TL.getLocalSourceRange());
+ }
}
default:
- llvm_unreachable("getChildKind() does not return this value");
+ // FIXME: Support Microsoft's __super
+ llvm::report_fatal_error("We don't yet support the __super specifier",
+ true);
}
}
@@ -1019,12 +1013,16 @@ public:
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) {
if (!QualifierLoc)
return true;
- for (auto It = QualifierLoc; It; It = It.getPrefix()) {
+ for (auto It = QualifierLoc; It; /**/) {
auto *NS = buildNameSpecifier(It);
if (!NS)
return false;
Builder.markChild(NS, syntax::NodeRole::ListElement);
Builder.markChildToken(It.getEndLoc(), syntax::NodeRole::ListDelimiter);
+ if (TypeLoc TL = It.getAsTypeLoc())
+ It = TL.getPrefix();
+ else
+ It = It.getAsNamespaceAndPrefix().Prefix;
}
Builder.foldNode(Builder.getRange(QualifierLoc.getSourceRange()),
new (allocator()) syntax::NestedNameSpecifier,
@@ -1328,7 +1326,7 @@ public:
// FIXME: Deleting the `TraverseParenTypeLoc` override doesn't change test
// results. Find test coverage or remove it.
- bool TraverseParenTypeLoc(ParenTypeLoc L) {
+ bool TraverseParenTypeLoc(ParenTypeLoc L, bool TraverseQualifier) {
// We reverse order of traversal to get the proper syntax structure.
if (!WalkUpFromParenTypeLoc(L))
return false;
@@ -1391,7 +1389,8 @@ public:
return WalkUpFromFunctionTypeLoc(L);
}
- bool TraverseMemberPointerTypeLoc(MemberPointerTypeLoc L) {
+ bool TraverseMemberPointerTypeLoc(MemberPointerTypeLoc L,
+ bool TraverseQualifier) {
// In the source code "void (Y::*mp)()" `MemberPointerTypeLoc` corresponds
// to "Y::*" but it points to a `ParenTypeLoc` that corresponds to
// "(Y::*mp)" We thus reverse the order of traversal to get the proper
diff --git a/clang/lib/Tooling/Transformer/RangeSelector.cpp b/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 00f4611..171c786 100644
--- a/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -222,14 +222,10 @@ RangeSelector transformer::name(std::string ID) {
return CharSourceRange::getTokenRange(L, L);
}
if (const auto *T = Node.get<TypeLoc>()) {
- TypeLoc Loc = *T;
- auto ET = Loc.getAs<ElaboratedTypeLoc>();
- if (!ET.isNull())
- Loc = ET.getNamedTypeLoc();
- if (auto SpecLoc = Loc.getAs<TemplateSpecializationTypeLoc>();
+ if (auto SpecLoc = T->getAs<TemplateSpecializationTypeLoc>();
!SpecLoc.isNull())
return CharSourceRange::getTokenRange(SpecLoc.getTemplateNameLoc());
- return CharSourceRange::getTokenRange(Loc.getSourceRange());
+ return CharSourceRange::getTokenRange(T->getSourceRange());
}
return typeError(ID, Node.getNodeKind(),
"DeclRefExpr, NamedDecl, CXXCtorInitializer, TypeLoc");
diff --git a/clang/test/AST/ByteCode/arrays.cpp b/clang/test/AST/ByteCode/arrays.cpp
index 2dd51c2..8ef5e4d 100644
--- a/clang/test/AST/ByteCode/arrays.cpp
+++ b/clang/test/AST/ByteCode/arrays.cpp
@@ -779,3 +779,20 @@ namespace DiscardedSubScriptExpr {
return true;
}
}
+
+namespace ZeroSizeArrayRead {
+ constexpr char str[0] = {};
+ constexpr unsigned checksum(const char *s) {
+ unsigned result = 0;
+ for (const char *p = s; *p != '\0'; ++p) { // both-note {{read of dereferenced one-past-the-end pointer}}
+ result += *p;
+ }
+ return result;
+ }
+ constexpr unsigned C = checksum(str); // both-error {{must be initialized by a constant expression}} \
+ // both-note {{in call to}}
+
+ constexpr const char *p1 = &str[0];
+ constexpr const char *p2 = &str[1]; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{cannot refer to element 1 of array of 0 elements in a constant expression}}
+}
diff --git a/clang/test/AST/ByteCode/cxx11.cpp b/clang/test/AST/ByteCode/cxx11.cpp
index 7aecf23b..08caca0 100644
--- a/clang/test/AST/ByteCode/cxx11.cpp
+++ b/clang/test/AST/ByteCode/cxx11.cpp
@@ -330,3 +330,33 @@ namespace ReadMutableInCopyCtor {
// both-note {{read of mutable member 'u'}} \
// both-note {{in call to 'G(g1)'}}
}
+
+namespace GH150709 {
+ struct C { };
+ struct D : C {
+ constexpr int f() const { return 1; };
+ };
+ struct E : C { };
+ struct F : D { };
+ struct G : E { };
+
+ constexpr C c1, c2[2];
+ constexpr D d1, d2[2];
+ constexpr E e1, e2[2];
+ constexpr F f;
+ constexpr G g;
+
+ constexpr auto mp = static_cast<int (C::*)() const>(&D::f);
+
+ // sanity checks for fix of GH150709 (unchanged behavior)
+ static_assert((c1.*mp)() == 1, ""); // both-error {{constant expression}}
+ static_assert((d1.*mp)() == 1, "");
+ static_assert((f.*mp)() == 1, "");
+ static_assert((c2[0].*mp)() == 1, ""); // ref-error {{constant expression}}
+ static_assert((d2[0].*mp)() == 1, "");
+
+ // incorrectly undiagnosed before fix of GH150709
+ static_assert((e1.*mp)() == 1, ""); // ref-error {{constant expression}}
+ static_assert((e2[0].*mp)() == 1, ""); // ref-error {{constant expression}}
+ static_assert((g.*mp)() == 1, ""); // ref-error {{constant expression}}
+}
diff --git a/clang/test/AST/ByteCode/cxx23.cpp b/clang/test/AST/ByteCode/cxx23.cpp
index 45dd4f5..2182d7c 100644
--- a/clang/test/AST/ByteCode/cxx23.cpp
+++ b/clang/test/AST/ByteCode/cxx23.cpp
@@ -309,15 +309,14 @@ namespace NonLiteralDtorInParam {
~NonLiteral() {} // all23-note {{declared here}}
};
constexpr int F2(NonLiteral N) { // all20-error {{constexpr function's 1st parameter type 'NonLiteral' is not a literal type}} \
- // ref23-note {{non-constexpr function '~NonLiteral' cannot be used in a constant expression}}
+ // all23-note {{non-constexpr function '~NonLiteral' cannot be used in a constant expression}}
return 8;
}
void test() {
NonLiteral L;
- constexpr auto D = F2(L); // all23-error {{must be initialized by a constant expression}} \
- // expected23-note {{non-constexpr function '~NonLiteral' cannot be used in a constant expression}}
+ constexpr auto D = F2(L); // all23-error {{must be initialized by a constant expression}}
}
}
diff --git a/clang/test/AST/ByteCode/cxx2a.cpp b/clang/test/AST/ByteCode/cxx2a.cpp
index ac2f988..744c99e 100644
--- a/clang/test/AST/ByteCode/cxx2a.cpp
+++ b/clang/test/AST/ByteCode/cxx2a.cpp
@@ -225,3 +225,17 @@ namespace Dtor {
static_assert(pseudo(true, false)); // both-error {{constant expression}} both-note {{in call}}
static_assert(pseudo(false, true));
}
+
+namespace GH150705 {
+ struct A { };
+ struct B : A { };
+ struct C : A {
+ constexpr virtual int foo() const { return 0; }
+ };
+
+ constexpr auto p = &C::foo;
+ constexpr auto q = static_cast<int (A::*)() const>(p);
+ constexpr B b;
+ constexpr const A& a = b;
+ constexpr auto x = (a.*q)(); // both-error {{constant expression}}
+}
diff --git a/clang/test/AST/ByteCode/functions.cpp b/clang/test/AST/ByteCode/functions.cpp
index 36e7bb3..3c00de0 100644
--- a/clang/test/AST/ByteCode/functions.cpp
+++ b/clang/test/AST/ByteCode/functions.cpp
@@ -708,3 +708,8 @@ namespace NoDiags {
return true;
}
}
+
+namespace EnableIfWithTemporary {
+ struct A { ~A(); };
+ int &h() __attribute__((enable_if((A(), true), ""))); // both-warning {{clang extension}}
+}
diff --git a/clang/test/AST/ByteCode/lifetimes.cpp b/clang/test/AST/ByteCode/lifetimes.cpp
index 5c8d562..c8bf02c 100644
--- a/clang/test/AST/ByteCode/lifetimes.cpp
+++ b/clang/test/AST/ByteCode/lifetimes.cpp
@@ -94,14 +94,13 @@ namespace CallScope {
int n = 0;
constexpr int f() const { return 0; }
};
- constexpr Q *out_of_lifetime(Q q) { return &q; } // both-warning {{address of stack}}
+ constexpr Q *out_of_lifetime(Q q) { return &q; } // both-warning {{address of stack}} \
+ // expected-note 2{{declared here}}
constexpr int k3 = out_of_lifetime({})->n; // both-error {{must be initialized by a constant expression}} \
- // expected-note {{read of temporary whose lifetime has ended}} \
- // expected-note {{temporary created here}} \
+ // expected-note {{read of variable whose lifetime has ended}} \
// ref-note {{read of object outside its lifetime}}
constexpr int k4 = out_of_lifetime({})->f(); // both-error {{must be initialized by a constant expression}} \
- // expected-note {{member call on temporary whose lifetime has ended}} \
- // expected-note {{temporary created here}} \
+ // expected-note {{member call on variable whose lifetime has ended}} \
// ref-note {{member call on object outside its lifetime}}
}
diff --git a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
index 1c8b9c1..76de39c 100644
--- a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
@@ -91,21 +91,21 @@ RESOURCE<float> Buffer;
// Default constructor
-// CHECK: CXXConstructorDecl {{.*}} [[RESOURCE]]<element_type> 'void ()' inline
+// CHECK: CXXConstructorDecl {{.*}} hlsl::[[RESOURCE]]<element_type> 'void ()' inline
// CHECK-NEXT: CompoundStmt
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_uninitializedhandle'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: AlwaysInlineAttr
// Constructor from binding
-// CHECK: CXXConstructorDecl {{.*}} [[RESOURCE]]<element_type> 'void (unsigned int, unsigned int, int, unsigned int, const char *)' inline
+// CHECK: CXXConstructorDecl {{.*}} hlsl::[[RESOURCE]]<element_type> 'void (unsigned int, unsigned int, int, unsigned int, const char *)' inline
// CHECK-NEXT: ParmVarDecl {{.*}} registerNo 'unsigned int'
// CHECK-NEXT: ParmVarDecl {{.*}} spaceNo 'unsigned int'
// CHECK-NEXT: ParmVarDecl {{.*}} range 'int'
@@ -114,12 +114,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefrombinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'registerNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
@@ -129,7 +129,7 @@ RESOURCE<float> Buffer;
// Constructor from implicit binding
-// CHECK: CXXConstructorDecl {{.*}} [[RESOURCE]]<element_type> 'void (unsigned int, int, unsigned int, unsigned int, const char *)' inline
+// CHECK: CXXConstructorDecl {{.*}} hlsl::[[RESOURCE]]<element_type> 'void (unsigned int, int, unsigned int, unsigned int, const char *)' inline
// CHECK-NEXT: ParmVarDecl {{.*}} spaceNo 'unsigned int'
// CHECK-NEXT: ParmVarDecl {{.*}} range 'int'
// CHECK-NEXT: ParmVarDecl {{.*}} index 'unsigned int'
@@ -138,12 +138,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefromimplicitbinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'index' 'unsigned int'
@@ -166,7 +166,7 @@ RESOURCE<float> Buffer;
// CHECK-SUBSCRIPT-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-SUBSCRIPT-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SUBSCRIPT-SAME: ' lvalue .__handle {{.*}}
-// CHECK-SUBSCRIPT-NEXT: CXXThisExpr {{.*}} 'const [[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-SUBSCRIPT-NEXT: CXXThisExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-SUBSCRIPT-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-SUBSCRIPT-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -183,7 +183,7 @@ RESOURCE<float> Buffer;
// CHECK-SUBSCRIPT-UAV-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-SUBSCRIPT-UAV-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SUBSCRIPT-UAV-SAME: ' lvalue .__handle {{.*}}
-// CHECK-SUBSCRIPT-UAV-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-SUBSCRIPT-UAV-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-SUBSCRIPT-UAV-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-SUBSCRIPT-UAV-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -204,7 +204,7 @@ RESOURCE<float> Buffer;
// CHECK-LOAD-SAME{LITERAL}: [[hlsl::resource_class(
// CHECK-LOAD-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-LOAD-SAME: ' lvalue .__handle {{.*}}
-// CHECK-LOAD-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-LOAD-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-LOAD-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-LOAD-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -220,7 +220,7 @@ RESOURCE<float> Buffer;
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'RWStructuredBuffer<element_type>' lvalue implicit this
+// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'hlsl::RWStructuredBuffer<element_type>' lvalue implicit this
// CHECK-COUNTER-NEXT: IntegerLiteral {{.*}} 'int' 1
// CHECK-COUNTER-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -236,7 +236,7 @@ RESOURCE<float> Buffer;
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-COUNTER-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'RWStructuredBuffer<element_type>' lvalue implicit this
+// CHECK-COUNTER-NEXT: CXXThisExpr {{.*}} 'hlsl::RWStructuredBuffer<element_type>' lvalue implicit this
// CHECK-COUNTER-NEXT: IntegerLiteral {{.*}} 'int' -1
// CHECK-COUNTER-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -254,7 +254,7 @@ RESOURCE<float> Buffer;
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-APPEND-NEXT: CallExpr {{.*}} 'unsigned int'
// CHECK-APPEND-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-APPEND-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_buffer_update_counter' 'unsigned int (...) noexcept'
@@ -262,7 +262,7 @@ RESOURCE<float> Buffer;
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-APPEND-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-APPEND-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-APPEND-NEXT: IntegerLiteral {{.*}} 'int' 1
// CHECK-APPEND-NEXT: DeclRefExpr {{.*}} 'element_type' ParmVar {{.*}} 'value' 'element_type'
@@ -279,7 +279,7 @@ RESOURCE<float> Buffer;
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-CONSUME-NEXT: CallExpr {{.*}} 'unsigned int'
// CHECK-CONSUME-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-CONSUME-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_buffer_update_counter' 'unsigned int (...) noexcept'
@@ -287,7 +287,7 @@ RESOURCE<float> Buffer;
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-CONSUME-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]' lvalue .__handle
-// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-CONSUME-NEXT: IntegerLiteral {{.*}} 'int' -1
// CHECK: ClassTemplateSpecializationDecl {{.*}} class [[RESOURCE]] definition
diff --git a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
index d6b88e2..936a060 100644
--- a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
@@ -66,21 +66,21 @@ RESOURCE<float> Buffer;
// Default constructor
-// CHECK: CXXConstructorDecl {{.*}} [[RESOURCE]]<element_type> 'void ()' inline
+// CHECK: CXXConstructorDecl {{.*}} hlsl::[[RESOURCE]]<element_type> 'void ()' inline
// CHECK-NEXT: CompoundStmt
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_uninitializedhandle'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: AlwaysInlineAttr
// Constructor from binding
-// CHECK: CXXConstructorDecl {{.*}} [[RESOURCE]]<element_type> 'void (unsigned int, unsigned int, int, unsigned int, const char *)' inline
+// CHECK: CXXConstructorDecl {{.*}} hlsl::[[RESOURCE]]<element_type> 'void (unsigned int, unsigned int, int, unsigned int, const char *)' inline
// CHECK-NEXT: ParmVarDecl {{.*}} registerNo 'unsigned int'
// CHECK-NEXT: ParmVarDecl {{.*}} spaceNo 'unsigned int'
// CHECK-NEXT: ParmVarDecl {{.*}} range 'int'
@@ -89,12 +89,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefrombinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'registerNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
@@ -104,7 +104,7 @@ RESOURCE<float> Buffer;
// Constructor from implicit binding
-// CHECK: CXXConstructorDecl {{.*}} [[RESOURCE]]<element_type> 'void (unsigned int, int, unsigned int, unsigned int, const char *)' inline
+// CHECK: CXXConstructorDecl {{.*}} hlsl::[[RESOURCE]]<element_type> 'void (unsigned int, int, unsigned int, unsigned int, const char *)' inline
// CHECK-NEXT: ParmVarDecl {{.*}} spaceNo 'unsigned int'
// CHECK-NEXT: ParmVarDecl {{.*}} range 'int'
// CHECK-NEXT: ParmVarDecl {{.*}} index 'unsigned int'
@@ -113,12 +113,12 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CompoundStmt {{.*}}
// CHECK-NEXT: BinaryOperator {{.*}} '='
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: CallExpr {{.*}} '__hlsl_resource_t
// CHECK-NEXT: ImplicitCastExpr {{.*}} <BuiltinFnToFnPtr>
// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_handlefromimplicitbinding'
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'spaceNo' 'unsigned int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'int' ParmVar {{.*}} 'range' 'int'
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'index' 'unsigned int'
@@ -141,7 +141,7 @@ RESOURCE<float> Buffer;
// CHECK-SRV-SAME{LITERAL}: [[hlsl::resource_class(SRV)]]
// CHECK-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SAME: ' lvalue .__handle {{.*}}
-// CHECK-NEXT: CXXThisExpr {{.*}} 'const [[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -157,7 +157,7 @@ RESOURCE<float> Buffer;
// CHECK-UAV-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
// CHECK-UAV-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-UAV-SAME: ' lvalue .__handle {{.*}}
-// CHECK-UAV-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-UAV-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-UAV-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-UAV-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
@@ -176,7 +176,7 @@ RESOURCE<float> Buffer;
// CHECK-SRV-SAME{LITERAL}: [[hlsl::resource_class(SRV)]]
// CHECK-SAME{LITERAL}: [[hlsl::contained_type(element_type)]]
// CHECK-SAME: ' lvalue .__handle {{.*}}
-// CHECK-NEXT: CXXThisExpr {{.*}} '[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
diff --git a/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl b/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl
index 2f08531..1c5e067 100644
--- a/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl
+++ b/clang/test/AST/HLSL/cbuffer_and_namespaces.hlsl
@@ -63,9 +63,9 @@ namespace NS2 {
// CHECK: HLSLBufferDecl {{.*}} line:[[# @LINE + 2]]:11 cbuffer CB2
// CHECK: HLSLResourceClassAttr {{.*}} Implicit CBuffer
cbuffer CB2 {
- // CHECK: VarDecl {{.*}} foo0 'hlsl_constant ::Foo':'hlsl_constant Foo'
+ // CHECK: VarDecl {{.*}} foo0 'hlsl_constant ::Foo'
::Foo foo0;
- // CHECK: VarDecl {{.*}} foo1 'hlsl_constant Foo':'hlsl_constant NS2::Foo'
+ // CHECK: VarDecl {{.*}} foo1 'hlsl_constant Foo'
Foo foo1;
// CHECK: VarDecl {{.*}} foo2 'hlsl_constant NS1::Foo'
NS1::Foo foo2;
diff --git a/clang/test/AST/HLSL/vector-constructors.hlsl b/clang/test/AST/HLSL/vector-constructors.hlsl
index 31d8dd0..fd43a7d 100644
--- a/clang/test/AST/HLSL/vector-constructors.hlsl
+++ b/clang/test/AST/HLSL/vector-constructors.hlsl
@@ -88,10 +88,10 @@ void entry() {
// CHECK-NEXT: InitListExpr
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float' <LValueToRValue>
// CHECK-NEXT: MemberExpr {{.*}} 'float' lvalue .f {{.*}}
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S':'S' lvalue Var {{.*}} 's' 'struct S':'S'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S' lvalue Var {{.*}} 's' 'struct S'
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float' <LValueToRValue>
// CHECK-NEXT: MemberExpr {{.*}} 'float' lvalue .f {{.*}}
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S':'S' lvalue Var {{.*}} 's' 'struct S':'S'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct S' lvalue Var {{.*}} 's' 'struct S'
struct T {
operator float() const { return 1.0f; }
@@ -105,12 +105,12 @@ void entry() {
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'float'
// CHECK-NEXT: MemberExpr {{.*}} '<bound member function type>' .operator float {{.*}}
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'const T' lvalue <NoOp>
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T':'T' lvalue Var {{.*}} 't' 'struct T':'T'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T' lvalue Var {{.*}} 't' 'struct T'
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'float' <UserDefinedConversion>
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'float'
// CHECK-NEXT: MemberExpr {{.*}} '<bound member function type>' .operator float {{.*}}
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'const T' lvalue <NoOp>
-// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T':'T' lvalue Var {{.*}} 't' 'struct T':'T'
+// CHECK-NEXT: DeclRefExpr {{.*}} 'struct T' lvalue Var {{.*}} 't' 'struct T'
typedef float2 second_level_of_typedefs;
second_level_of_typedefs foo6 = float2(1.0f, 2.0f);
diff --git a/clang/test/AST/arm-mfp8.cpp b/clang/test/AST/arm-mfp8.cpp
index 195c734..b1fa04a 100644
--- a/clang/test/AST/arm-mfp8.cpp
+++ b/clang/test/AST/arm-mfp8.cpp
@@ -49,7 +49,7 @@ public:
}
};
-//CHECK: | |-CXXRecordDecl {{.*}} referenced class C1
+//CHECK: | |-CXXRecordDecl {{.*}} class C1
//CHECK-NEXT: | |-FieldDecl {{.*}} f1c '__mfp8'
//CHECK-NEXT: | |-VarDecl {{.*}} f2c 'const __mfp8' static
//CHECK-NEXT: | |-FieldDecl {{.*}} f3c 'volatile __mfp8'
diff --git a/clang/test/AST/ast-dump-color.cpp b/clang/test/AST/ast-dump-color.cpp
index 87797f6..2e60e76 100644
--- a/clang/test/AST/ast-dump-color.cpp
+++ b/clang/test/AST/ast-dump-color.cpp
@@ -82,15 +82,15 @@ struct Invalid {
//CHECK: {{^}}[[Blue]]| | `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:33[[RESET]]> [[Yellow]]col:33[[RESET]] [[Green]]'const Mutex &'[[RESET]]{{$}}
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]CXXConstructorDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:33[[RESET]]> [[Yellow]]col:33[[RESET]] implicit constexpr[[CYAN]] Mutex[[RESET]] [[Green]]'void (Mutex &&)'[[RESET]] inline{{ .*$}}
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:33[[RESET]]> [[Yellow]]col:33[[RESET]] [[Green]]'Mutex &&'[[RESET]]{{$}}
-//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:25:3[[RESET]]> [[Yellow]]col:3[[RESET]] referenced[[CYAN]] mu1[[RESET]] [[Green]]'class Mutex':'Mutex'[[RESET]]
-//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'class Mutex':'Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
-//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:18:1[[RESET]], [[Yellow]]line:25:8[[RESET]]> [[Yellow]]col:8[[RESET]][[CYAN]] mu2[[RESET]] [[Green]]'class Mutex':'Mutex'[[RESET]]
-//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Green]]'class Mutex':'Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
+//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:25:3[[RESET]]> [[Yellow]]col:3[[RESET]] referenced[[CYAN]] mu1[[RESET]] [[Green]]'class Mutex'[[RESET]]
+//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'class Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
+//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:18:1[[RESET]], [[Yellow]]line:25:8[[RESET]]> [[Yellow]]col:8[[RESET]][[CYAN]] mu2[[RESET]] [[Green]]'class Mutex'[[RESET]]
+//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Green]]'class Mutex'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]{{$}}
//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:26:1[[RESET]], [[Yellow]]col:5[[RESET]]> [[Yellow]]col:5[[RESET]][[CYAN]] TestExpr[[RESET]] [[Green]]'int'[[RESET]]
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[BLUE]]GuardedByAttr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:29[[RESET]], [[Yellow]]col:43[[RESET]]>{{$}}
-//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]DeclRefExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:40[[RESET]]> [[Green]]'class Mutex':'Mutex'[[RESET]][[Cyan]] lvalue[[RESET]][[Cyan]][[RESET]] [[GREEN]]Var[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]][[CYAN]] 'mu1'[[RESET]] [[Green]]'class Mutex':'Mutex'[[RESET]] non_odr_use_unevaluated{{$}}
+//CHECK: {{^}}[[Blue]]| `-[[RESET]][[MAGENTA]]DeclRefExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:40[[RESET]]> [[Green]]'class Mutex'[[RESET]][[Cyan]] lvalue[[RESET]][[Cyan]][[RESET]] [[GREEN]]Var[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]][[CYAN]] 'mu1'[[RESET]] [[Green]]'class Mutex'[[RESET]] non_odr_use_unevaluated{{$}}
//CHECK: {{^}}[[Blue]]|-[[RESET]][[GREEN]]CXXRecordDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:28:1[[RESET]], [[Yellow]]line:30:1[[RESET]]> [[Yellow]]line:28:8[[RESET]] struct[[CYAN]] Invalid[[RESET]] definition
-//CHECK: {{^}}[[Blue]]| |-[[RESET]][[GREEN]]CXXRecordDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] implicit referenced struct[[CYAN]] Invalid[[RESET]]
+//CHECK: {{^}}[[Blue]]| |-[[RESET]][[GREEN]]CXXRecordDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] implicit struct[[CYAN]] Invalid[[RESET]]
//CHECK: {{^}}[[Blue]]| |-[[RESET]][[GREEN]]CXXConstructorDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]line:29:3[[RESET]], [[Yellow]]col:42[[RESET]]> [[Yellow]]col:29[[RESET]] invalid[[CYAN]] Invalid[[RESET]] [[Green]]'void (int)'[[RESET]]
//CHECK: {{^}}[[Blue]]| | |-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:37[[RESET]], [[Yellow]]<invalid sloc>[[RESET]]> [[Yellow]]col:42[[RESET]] invalid [[Green]]'int'[[RESET]]
//CHECK: {{^}}[[Blue]]| | `-[[RESET]][[BLUE]]NoInlineAttr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:18[[RESET]]>
@@ -100,5 +100,5 @@ struct Invalid {
//CHECK: {{^}}[[Blue]]| | `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] [[Green]]'const Invalid &'[[RESET]]
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]CXXConstructorDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] implicit constexpr[[CYAN]] Invalid[[RESET]] [[Green]]'void (Invalid &&)'[[RESET]] inline default trivial noexcept-unevaluated 0x{{[0-9a-fA-F]*}}
//CHECK: {{^}}[[Blue]]| `-[[RESET]][[GREEN]]ParmVarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:8[[RESET]]> [[Yellow]]col:8[[RESET]] [[Green]]'Invalid &&'[[RESET]]
-//CHECK: {{^}}[[Blue]]`-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:30:3[[RESET]]> [[Yellow]]col:3[[RESET]][[CYAN]] Invalid[[RESET]] [[Green]]'struct Invalid':'Invalid'[[RESET]]
-//CHECK: {{^}}[[Blue]] `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'struct Invalid':'Invalid'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]
+//CHECK: {{^}}[[Blue]]`-[[RESET]][[GREEN]]VarDecl[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:1[[RESET]], [[Yellow]]line:30:3[[RESET]]> [[Yellow]]col:3[[RESET]][[CYAN]] Invalid[[RESET]] [[Green]]'struct Invalid'[[RESET]]
+//CHECK: {{^}}[[Blue]] `-[[RESET]][[MAGENTA]]CXXConstructExpr[[RESET]][[Yellow]] 0x{{[0-9a-fA-F]*}}[[RESET]] <[[Yellow]]col:3[[RESET]]> [[Green]]'struct Invalid'[[RESET]][[Cyan]][[RESET]][[Cyan]][[RESET]] [[Green]]'void () noexcept'[[RESET]]
diff --git a/clang/test/AST/ast-dump-ctad-alias.cpp b/clang/test/AST/ast-dump-ctad-alias.cpp
index f39a4ce..781fb9f 100644
--- a/clang/test/AST/ast-dump-ctad-alias.cpp
+++ b/clang/test/AST/ast-dump-ctad-alias.cpp
@@ -39,15 +39,14 @@ Out2<double>::AInner t(1.0);
// CHECK-NEXT: | | |-DeducedTemplateSpecializationType {{.*}} 'Out2<double>::AInner' dependent
// CHECK-NEXT: | | | `-name: 'Out2<double>::AInner'
// CHECK-NEXT: | | | `-TypeAliasTemplateDecl {{.+}} AInner{{$}}
-// CHECK-NEXT: | | `-ElaboratedType {{.*}} 'Inner<Y>' sugar dependent
-// CHECK-NEXT: | | `-TemplateSpecializationType {{.*}} 'Inner<Y>' dependent
-// CHECK-NEXT: | | |-name: 'Inner':'Out<int>::Inner' qualified
-// CHECK-NEXT: | | | `-ClassTemplateDecl {{.+}} Inner{{$}}
-// CHECK-NEXT: | | `-TemplateArgument type 'Y'
-// CHECK-NEXT: | | `-SubstTemplateTypeParmType {{.*}} 'Y'
-// CHECK-NEXT: | | |-FunctionTemplate {{.*}} '<deduction guide for Inner>'
-// CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'Y' dependent depth 1 index 0
-// CHECK-NEXT: | | `-TemplateTypeParm {{.*}} 'Y'
+// CHECK-NEXT: | | `-TemplateSpecializationType {{.*}} 'Inner<Y>' dependent
+// CHECK-NEXT: | | |-name: 'Inner':'Out<int>::Inner' qualified
+// CHECK-NEXT: | | | `-ClassTemplateDecl {{.+}} Inner{{$}}
+// CHECK-NEXT: | | `-TemplateArgument type 'Y'
+// CHECK-NEXT: | | `-SubstTemplateTypeParmType {{.*}} 'Y'
+// CHECK-NEXT: | | |-FunctionTemplate {{.*}} '<deduction guide for Inner>'
+// CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'Y' dependent depth 1 index 0
+// CHECK-NEXT: | | `-TemplateTypeParm {{.*}} 'Y'
// CHECK-NEXT: | |-CXXDeductionGuideDecl {{.*}} <deduction guide for AInner> 'auto (Y) -> Inner<Y>'
// CHECK-NEXT: | | `-ParmVarDecl {{.*}} 'Y'
// CHECK-NEXT: | `-CXXDeductionGuideDecl {{.*}} used <deduction guide for AInner> 'auto (double) -> Inner<double>' implicit_instantiation
@@ -188,9 +187,9 @@ void foo() {
// CHECK-NEXT: | | | |-ImplicitConceptSpecializationDecl {{.*}}
// CHECK-NEXT: | | | | |-TemplateArgument type 'type-parameter-0-2'
// CHECK-NEXT: | | | | | `-TemplateTypeParmType {{.*}} 'type-parameter-0-2' dependent depth 0 index 2
-// CHECK-NEXT: | | | | `-TemplateArgument pack '<Packs<type-parameter-0-1...>>'
-// CHECK-NEXT: | | | | `-TemplateArgument type 'Packs<type-parameter-0-1...>'
-// CHECK-NEXT: | | | | `-TemplateSpecializationType {{.*}} 'Packs<type-parameter-0-1...>' dependent
+// CHECK-NEXT: | | | | `-TemplateArgument pack '<GH124715::Packs<type-parameter-0-1...>>'
+// CHECK-NEXT: | | | | `-TemplateArgument type 'GH124715::Packs<type-parameter-0-1...>'
+// CHECK-NEXT: | | | | `-TemplateSpecializationType {{.*}} 'GH124715::Packs<type-parameter-0-1...>' dependent
// CHECK-NEXT: | | | | |-name: 'GH124715::Packs'
// CHECK-NEXT: | | | | | `-ClassTemplateDecl {{.*}} Packs
// CHECK-NEXT: | | | | `-TemplateArgument pack '<type-parameter-0-1...>'
diff --git a/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp b/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp
index fc86aeb..09a274a 100644
--- a/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp
+++ b/clang/test/AST/ast-dump-cxx2b-deducing-this.cpp
@@ -33,5 +33,5 @@ struct B {
operator A(this B);
};
A a = A(B{});
-// CHECK: CallExpr 0x{{[^ ]*}} <col:9, col:11> 'A':'GH130272::A'
+// CHECK: CallExpr 0x{{[^ ]*}} <col:9, col:11> 'A'
}
diff --git a/clang/test/AST/ast-dump-decl-json.c b/clang/test/AST/ast-dump-decl-json.c
index ec2d75b..b84ddf9 100644
--- a/clang/test/AST/ast-dump-decl-json.c
+++ b/clang/test/AST/ast-dump-decl-json.c
@@ -585,7 +585,6 @@ void testParmVarDecl(int TestParmVarDecl);
// CHECK-NEXT: },
// CHECK-NEXT: "name": "e",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "enum TestEnumDeclAnon::(unnamed at {{.*}}:31:3)",
// CHECK-NEXT: "qualType": "enum (unnamed enum at {{.*}}:31:3)"
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -777,7 +776,6 @@ void testParmVarDecl(int TestParmVarDecl);
// CHECK-NEXT: },
// CHECK-NEXT: "name": "testRecordDeclAnon1",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "struct TestRecordDeclAnon1::(unnamed at {{.*}}:46:3)",
// CHECK-NEXT: "qualType": "struct (unnamed struct at {{.*}}:46:3)"
// CHECK-NEXT: }
// CHECK-NEXT: }
@@ -1204,7 +1202,6 @@ void testParmVarDecl(int TestParmVarDecl);
// CHECK-NEXT: },
// CHECK-NEXT: "name": "y",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "enum (unnamed at {{.*}}:69:29)",
// CHECK-NEXT: "qualType": "enum (unnamed enum at {{.*}}:69:29)"
// CHECK-NEXT: }
// CHECK-NEXT: },
diff --git a/clang/test/AST/ast-dump-decl.cpp b/clang/test/AST/ast-dump-decl.cpp
index 08d8fba..dca0e9b 100644
--- a/clang/test/AST/ast-dump-decl.cpp
+++ b/clang/test/AST/ast-dump-decl.cpp
@@ -35,9 +35,9 @@ namespace testVarDeclNRVO {
// CHECK: FunctionDecl{{.*}} TestFuncNRVO 'A ()'
// CHECK-NEXT: `-CompoundStmt
// CHECK-NEXT: |-DeclStmt
-// CHECK-NEXT: | `-VarDecl{{.*}} TestVarDeclNRVO 'A':'testVarDeclNRVO::A' nrvo callinit
+// CHECK-NEXT: | `-VarDecl{{.*}} TestVarDeclNRVO 'A' nrvo callinit
// CHECK-NEXT: | `-CXXConstructExpr
-// CHECK-NEXT: `-ReturnStmt{{.*}} nrvo_candidate(Var {{.*}} 'TestVarDeclNRVO' 'A':'testVarDeclNRVO::A')
+// CHECK-NEXT: `-ReturnStmt{{.*}} nrvo_candidate(Var {{.*}} 'TestVarDeclNRVO' 'A')
void testParmVarDeclInit(int TestParmVarDeclInit = 0);
// CHECK: ParmVarDecl{{.*}} TestParmVarDeclInit 'int'
@@ -131,8 +131,8 @@ namespace testCXXRecordDecl {
// CHECK-NEXT: CopyAssignment simple non_trivial has_const_param
// CHECK-NEXT: MoveAssignment exists simple non_trivial
// CHECK-NEXT: Destructor simple irrelevant trivial
-// CHECK-NEXT: virtual private 'A':'testCXXRecordDecl::A'
-// CHECK-NEXT: public 'B':'testCXXRecordDecl::B'
+// CHECK-NEXT: virtual private 'A'
+// CHECK-NEXT: public 'B'
// CHECK-NEXT: CXXRecordDecl{{.*}} class TestCXXRecordDecl
// CHECK-NEXT: FieldDecl
@@ -269,7 +269,7 @@ namespace testFunctionTemplateDecl {
// CHECK-NEXT: |-TemplateArgument type 'testFunctionTemplateDecl::B'
// CHECK-NEXT: | `-RecordType 0{{.+}} 'testFunctionTemplateDecl::B'
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'B'
- // CHECK-NEXT: `-ParmVarDecl 0x{{.+}} <col:40> col:41 'B':'testFunctionTemplateDecl::B'
+ // CHECK-NEXT: `-ParmVarDecl 0x{{.+}} <col:40> col:41 'B'
namespace testClassTemplateDecl {
@@ -330,8 +330,8 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | | `-Destructor irrelevant non_trivial user_declared{{$}}
// CHECK-NEXT: | |-CXXRecordDecl 0x{{.+}} <col:24, col:30> col:30 implicit referenced class TestClassTemplate{{$}}
// CHECK-NEXT: | |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-50]]:3, col:9> col:3 public{{$}}
-// CHECK-NEXT: | |-CXXConstructorDecl 0x[[#%x,TEMPLATE_CONSTRUCTOR_DECL:]] <line:[[@LINE-50]]:5, col:23> col:5 TestClassTemplate<T> 'void ()'{{$}}
-// CHECK-NEXT: | |-CXXDestructorDecl 0x[[#%x,TEMPLATE_DESTRUCTOR_DECL:]] <line:[[@LINE-50]]:5, col:24> col:5 ~TestClassTemplate<T> 'void ()' not_selected{{$}}
+// CHECK-NEXT: | |-CXXConstructorDecl 0x[[#%x,TEMPLATE_CONSTRUCTOR_DECL:]] <line:[[@LINE-50]]:5, col:23> col:5 testClassTemplateDecl::TestClassTemplate<T> 'void ()'{{$}}
+// CHECK-NEXT: | |-CXXDestructorDecl 0x[[#%x,TEMPLATE_DESTRUCTOR_DECL:]] <line:[[@LINE-50]]:5, col:24> col:5 ~testClassTemplateDecl::TestClassTemplate<T> 'void ()' not_selected{{$}}
// CHECK-NEXT: | |-CXXMethodDecl 0x[[#%x,TEMPLATE_METHOD_DECL:]] <line:[[@LINE-50]]:5, col:11> col:9 j 'int ()'{{$}}
// CHECK-NEXT: | `-FieldDecl 0x{{.+}} <line:[[@LINE-50]]:5, col:9> col:9 i 'int'{{$}}
// CHECK-NEXT: |-ClassTemplateSpecializationDecl 0x{{.+}} <line:[[@LINE-56]]:3, line:[[@LINE-50]]:3> line:[[@LINE-56]]:30 class TestClassTemplate definition implicit_instantiation{{$}}
@@ -343,7 +343,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | | |-MoveAssignment{{$}}
// CHECK-NEXT: | | `-Destructor non_trivial user_declared{{$}}
// CHECK-NEXT: | |-TemplateArgument type 'testClassTemplateDecl::A'{{$}}
-// CHECK-NEXT: | | `-RecordType 0{{.+}} 'testClassTemplateDecl::A'{{$}}
+// CHECK-NEXT: | | `-RecordType 0{{.+}} 'testClassTemplateDecl::A' canonical{{$}}
// CHECK-NEXT: | | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: | |-CXXRecordDecl 0x{{.+}} <col:24, col:30> col:30 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: | |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-67]]:3, col:9> col:3 public{{$}}
@@ -366,7 +366,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | |-MoveAssignment exists simple trivial needs_implicit{{$}}
// CHECK-NEXT: | `-Destructor simple irrelevant trivial needs_implicit{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::B'{{$}}
-// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::B'{{$}}
+// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::B' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'B'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:14, col:20> col:20 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: `-FieldDecl 0x{{.+}} <line:[[@LINE-78]]:5, col:9> col:9 j 'int'{{$}}
@@ -380,7 +380,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | |-MoveAssignment{{$}}
// CHECK-NEXT: | `-Destructor non_trivial user_declared{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::C'{{$}}
-// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::C'{{$}}
+// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::C' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'C'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <line:[[@LINE-104]]:24, col:30> col:30 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-104]]:3, col:9> col:3 public{{$}}
@@ -398,7 +398,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: | |-MoveAssignment{{$}}
// CHECK-NEXT: | `-Destructor non_trivial user_declared{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::D'{{$}}
-// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::D'{{$}}
+// CHECK-NEXT: | `-RecordType 0{{.+}} 'testClassTemplateDecl::D' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'D'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <line:[[@LINE-122]]:24, col:30> col:30 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: |-AccessSpecDecl 0x{{.+}} <line:[[@LINE-122]]:3, col:9> col:3 public{{$}}
@@ -432,7 +432,7 @@ namespace testClassTemplateDecl {
// CHECK-NEXT: |-TemplateArgument type 'type-parameter-0-0'{{$}}
// CHECK-NEXT: | `-TemplateTypeParmType 0x{{.+}} 'type-parameter-0-0' dependent depth 0 index 0{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testClassTemplateDecl::A'{{$}}
-// CHECK-NEXT: | `-RecordType 0x{{.+}} 'testClassTemplateDecl::A'{{$}}
+// CHECK-NEXT: | `-RecordType 0x{{.+}} 'testClassTemplateDecl::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <col:12, col:21> col:21 referenced typename depth 0 index 0 T1{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:25, col:31> col:31 implicit class TestClassTemplatePartial{{$}}
@@ -605,7 +605,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: | `-ParmVarDecl 0x{{.*}} <col:50> col:51 'T'{{$}}
// CHECK-NEXT: `-FunctionDecl 0x{{.*}} <line:[[@LINE-6]]:24, col:51> col:29 used TestFunctionTemplate 'void (testCanonicalTemplate::A)' implicit_instantiation{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testCanonicalTemplate::A'{{$}}
- // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A'{{$}}
+ // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: `-ParmVarDecl 0x{{.*}} <col:50> col:51 'testCanonicalTemplate::A'{{$}}
@@ -644,7 +644,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: | |-MoveAssignment exists simple trivial needs_implicit{{$}}
// CHECK-NEXT: | `-Destructor simple irrelevant trivial needs_implicit{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testCanonicalTemplate::A'{{$}}
- // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A'{{$}}
+ // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:25, col:31> col:31 implicit class TestClassTemplate{{$}}
// CHECK-NEXT: |-FriendDecl 0x{{.+}} <line:[[@LINE-30]]:5, col:40> col:40{{$}}
@@ -677,7 +677,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: | |-MoveAssignment exists simple trivial needs_implicit{{$}}
// CHECK-NEXT: | `-Destructor simple irrelevant trivial needs_implicit{{$}}
// CHECK-NEXT: |-TemplateArgument type 'testCanonicalTemplate::A'{{$}}
- // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A'{{$}}
+ // CHECK-NEXT: | `-RecordType 0x{{.+}} 'testCanonicalTemplate::A' canonical{{$}}
// CHECK-NEXT: | `-CXXRecord 0x{{.+}} 'A'{{$}}
// CHECK-NEXT: |-CXXRecordDecl 0x{{.+}} <col:25, col:31> col:31 implicit class TestClassTemplate2{{$}}
// CHECK-NEXT: |-CXXConstructorDecl 0x{{.+}} <col:31> col:31 implicit used constexpr TestClassTemplate2 'void () noexcept' inline default trivial{{$}}
@@ -721,7 +721,7 @@ namespace testCanonicalTemplate {
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <col:16, col:25> col:25 referenced typename depth 0 index 0 T{{$}}
// CHECK-NEXT: |-VarDecl 0x{{.+}} <col:28, col:43> col:43 TestVarTemplate 'const T' static{{$}}
// CHECK-NEXT: |-VarTemplateSpecializationDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <line:[[@LINE-12]]:3, line:[[@LINE-11]]:34> col:14 referenced TestVarTemplate 'const int' implicit_instantiation cinit{{$}}
- // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'testCanonicalTemplate::S'{{$}}
+ // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'S'{{$}}
// CHECK-NEXT: | |-TemplateArgument type 'int'{{$}}
// CHECK-NEXT: | | `-BuiltinType 0x{{.+}} 'int'{{$}}
// CHECK-NEXT: | `-InitListExpr 0x{{.+}} <col:32, col:34> 'int'{{$}}
@@ -735,13 +735,13 @@ namespace testCanonicalTemplate {
// CHECK: VarTemplateDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <{{.+}}:[[@LINE-24]]:3, line:[[@LINE-23]]:34> col:14 TestVarTemplate{{$}}
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <line:[[@LINE-25]]:12, col:21> col:21 referenced typename depth 0 index 0 T{{$}}
// CHECK-NEXT: |-VarDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <line:[[@LINE-25]]:3, col:34> col:14 TestVarTemplate 'const T' cinit{{$}}
- // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'testCanonicalTemplate::S'{{$}}
+ // CHECK-NEXT: | |-NestedNameSpecifier TypeSpec 'S'{{$}}
// CHECK-NEXT: | `-InitListExpr 0x{{.+}} <col:32, col:34> 'void'{{$}}
// CHECK-NEXT: |-VarTemplateSpecialization 0x{{.+}} 'TestVarTemplate' 'const int'{{$}}
// CHECK-NEXT: `-VarTemplateSpecialization 0x{{.+}} 'TestVarTemplate' 'const int'{{$}}
// CHECK: VarTemplateSpecializationDecl 0x{{.+}} parent 0x{{.+}} prev 0x{{.+}} <{{.+}}:[[@LINE-32]]:3, line:[[@LINE-31]]:34> col:14 referenced TestVarTemplate 'const int' implicit_instantiation cinit{{$}}
- // CHECK-NEXT: |-NestedNameSpecifier TypeSpec 'testCanonicalTemplate::S'{{$}}
+ // CHECK-NEXT: |-NestedNameSpecifier TypeSpec 'S'{{$}}
// CHECK-NEXT: |-TemplateArgument type 'int'{{$}}
// CHECK-NEXT: | `-BuiltinType 0x{{.+}} 'int'{{$}}
// CHECK-NEXT: `-InitListExpr 0x{{.+}} <col:32, col:34> 'int'{{$}}
@@ -901,7 +901,7 @@ template<typename T> class TestFriendDecl {
// CHECK: CXXRecord{{.*}} TestFriendDecl
// CHECK-NEXT: FriendDecl
// CHECK-NEXT: FunctionDecl{{.*}} foo
-// CHECK-NEXT: FriendDecl{{.*}} 'class A':'A'
+// CHECK-NEXT: FriendDecl{{.*}} 'class A'
// CHECK-NEXT: CXXRecordDecl{{.*}} class A
// CHECK-NEXT: FriendDecl{{.*}} 'T'
diff --git a/clang/test/AST/ast-dump-expr-json.cpp b/clang/test/AST/ast-dump-expr-json.cpp
index 11026c9..6293f8c 100644
--- a/clang/test/AST/ast-dump-expr-json.cpp
+++ b/clang/test/AST/ast-dump-expr-json.cpp
@@ -7962,7 +7962,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -7988,7 +7987,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8142,7 +8140,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8395,7 +8392,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -8421,7 +8417,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8720,7 +8715,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -8746,7 +8740,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -8900,7 +8893,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -9032,7 +9024,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: "isUsed": true,
// CHECK-NEXT: "name": "x",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "init": "call",
@@ -9053,7 +9044,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -9155,7 +9145,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "prvalue",
@@ -9181,7 +9170,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "const NS::X",
// CHECK-NEXT: "qualType": "const X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -9203,7 +9191,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: },
// CHECK-NEXT: "valueCategory": "lvalue",
@@ -9212,7 +9199,6 @@ void TestNonADLCall3() {
// CHECK-NEXT: "kind": "VarDecl",
// CHECK-NEXT: "name": "x",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "NS::X",
// CHECK-NEXT: "qualType": "X"
// CHECK-NEXT: }
// CHECK-NEXT: }
diff --git a/clang/test/AST/ast-dump-expr.cpp b/clang/test/AST/ast-dump-expr.cpp
index 6fd429d..7a686b2 100644
--- a/clang/test/AST/ast-dump-expr.cpp
+++ b/clang/test/AST/ast-dump-expr.cpp
@@ -219,13 +219,10 @@ void PostfixExpressions(S a, S *p, U<int> *r) {
// CHECK-NEXT: MemberExpr 0x{{[^ ]*}} <col:3, col:5> '<bound member function type>' .~S 0x{{[^ ]*}}
// CHECK-NEXT: DeclRefExpr 0x{{[^ ]*}} <col:3> 'S' lvalue ParmVar 0x{{[^ ]*}} 'a' 'S'
- // FIXME: similarly, there is no way to distinguish the construct below from
- // the p->~S() case.
p->::S::~S();
// CHECK: CXXMemberCallExpr 0x{{[^ ]*}} <line:[[@LINE-1]]:3, col:14> 'void'
// CHECK-NEXT: MemberExpr 0x{{[^ ]*}} <col:3, col:12> '<bound member function type>' ->~S 0x{{[^ ]*}}
- // CHECK-NEXT: NestedNameSpecifier TypeSpec 'S'
- // CHECK-NEXT: NestedNameSpecifier Global
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec '::S'
// CHECK-NEXT: ImplicitCastExpr
// CHECK-NEXT: DeclRefExpr 0x{{[^ ]*}} <col:3> 'S *' lvalue ParmVar 0x{{[^ ]*}} 'p' 'S *'
@@ -597,5 +594,5 @@ struct S {
void f() {
S(S(0, 1));
}
-// CHECK: CXXTemporaryObjectExpr {{.*}} <col:5, col:11> 'S':'GH143711::S' 'void (int, int)'
+// CHECK: CXXTemporaryObjectExpr {{.*}} <col:5, col:11> 'S' 'void (int, int)'
}
diff --git a/clang/test/AST/ast-dump-for-range-lifetime.cpp b/clang/test/AST/ast-dump-for-range-lifetime.cpp
index ee046be..c330342 100644
--- a/clang/test/AST/ast-dump-for-range-lifetime.cpp
+++ b/clang/test/AST/ast-dump-for-range-lifetime.cpp
@@ -24,14 +24,14 @@ void test1() {
// CHECK-NEXT: | |-<<<NULL>>>
// CHECK-NEXT: | |-DeclStmt {{.*}}
// CHECK-NEXT: | | `-VarDecl {{.*}} implicit used __range1 'const A &' cinit
- // CHECK-NEXT: | | `-ExprWithCleanups {{.*}} 'const A':'const P2718R0::A' lvalue
- // CHECK-NEXT: | | `-CallExpr {{.*}} 'const A':'const P2718R0::A' lvalue
+ // CHECK-NEXT: | | `-ExprWithCleanups {{.*}} 'const A' lvalue
+ // CHECK-NEXT: | | `-CallExpr {{.*}} 'const A' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const A &(*)(const A &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const A &(const A &)' lvalue Function {{.*}} 'f1' 'const A &(const A &)'
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'const A &'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'const A &'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'A (*)()' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'A ()' lvalue Function {{.*}} 'g' 'A ()'
for (auto e : f1(g()))
@@ -56,11 +56,11 @@ void test2() {
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const A *'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const A *(*)(const A &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const A *(const A &)' lvalue Function {{.*}} 'g' 'const A *(const A &)'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' lvalue <DerivedToBase (A)>
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const B':'const P2718R0::B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const B':'const P2718R0::B' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'B':'P2718R0::B' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'B':'P2718R0::B' 'void () noexcept(false)' zeroing
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A' lvalue <DerivedToBase (A)>
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const B' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'B' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'B' 'void () noexcept(false)' zeroing
for (auto e : f(g(B())))
bar(e);
}
@@ -82,9 +82,9 @@ void test3() {
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | `-BinaryOperator {{.*}} 'int[3]' lvalue ','
// CHECK-NEXT: | |-CXXStaticCastExpr {{.*}} 'void' static_cast<void> <ToVoid>
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : static_cast<void>(LockGuard()), v)
LockGuard guard;
@@ -96,9 +96,9 @@ void test3() {
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | `-BinaryOperator {{.*}} 'int[3]' lvalue ','
// CHECK-NEXT: | |-CStyleCastExpr {{.*}} 'void' <ToVoid>
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : (void)LockGuard(), v)
LockGuard guard;
@@ -109,9 +109,9 @@ void test3() {
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'int (&)[3]' cinit
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | `-BinaryOperator {{.*}} 'int[3]' lvalue ','
- // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'LockGuard' xvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : LockGuard(), v)
LockGuard guard;
@@ -130,12 +130,12 @@ void test4() {
// CHECK-NEXT: | `-CallExpr {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'int (&(*)(const A &))[3]' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'int (&(const A &))[3]' lvalue Function {{.*}} 'default_arg_fn' 'int (&(const A &))[3]'
- // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const A':'const P2718R0::A' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'A':'P2718R0::A' 'void ()'
- for (auto e : default_arg_fn())
+ // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const A' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'A' 'void ()'
+ for (auto e : default_arg_fn())
bar(e);
}
@@ -158,43 +158,43 @@ void test5() {
// CHECK-NEXT: | `-CallExpr {{.*}} 'int[3]' lvalue
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'int (&(*)(const A &))[3]' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'int (&(const A &))[3]' lvalue Function {{.*}} 'default_arg_fn' 'int (&(const A &))[3]'
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'A (*)(const A &, const DefaultA &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'A (const A &, const DefaultA &)' lvalue Function {{.*}} 'foo' 'A (const A &, const DefaultA &)'
- // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'A (*)(const A &, const DefaultA &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'A (const A &, const DefaultA &)' lvalue Function {{.*}} 'foo' 'A (const A &, const DefaultA &)'
- // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | | | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | | | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} 'A (*)(const A &, const DefaultA &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} 'A (const A &, const DefaultA &)' lvalue Function {{.*}} 'foo' 'A (const A &, const DefaultA &)'
- // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const A':'const P2718R0::A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' <NoOp>
- // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A'
- // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'A':'P2718R0::A' 'void ()'
- // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
+ // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const A' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const A' <NoOp>
+ // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'A'
+ // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'A' 'void ()'
+ // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
for (auto e : default_arg_fn(foo(foo(foo(A())))))
bar(e);
}
@@ -210,40 +210,40 @@ void test6() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} col:17 implicit used __range1 'C &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'C':'P2718R0::C' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'C':'P2718R0::C' xvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void (int, const C &, const DefaultA &)'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'C' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'C' xvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void (int, const C &, const DefaultA &)'
// CHECK-NEXT: | |-IntegerLiteral {{.*}}'int' 0
- // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const C':'const P2718R0::C' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const C':'const P2718R0::C' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void (int, const C &, const DefaultA &)'
+ // CHECK-NEXT: | |-MaterializeTemporaryExpr {{.*}} 'const C' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const C' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void (int, const C &, const DefaultA &)'
// CHECK-NEXT: | | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const C':'const P2718R0::C' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const C':'const P2718R0::C' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void (int, const C &, const DefaultA &)'
+ // CHECK-NEXT: | | |-MaterializeTemporaryExpr {{.*}} 'const C' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const C' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void (int, const C &, const DefaultA &)'
// CHECK-NEXT: | | | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const C':'const P2718R0::C' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const C':'const P2718R0::C' <NoOp>
- // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'C':'P2718R0::C'
- // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'C':'P2718R0::C' 'void ()'
- // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
- // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA':'const P2718R0::DefaultA' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA':'const P2718R0::DefaultA' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA':'P2718R0::DefaultA' 'void ()'
+ // CHECK-NEXT: | | | |-MaterializeTemporaryExpr {{.*}} 'const C' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} 'const C' <NoOp>
+ // CHECK-NEXT: | | | | `-CXXBindTemporaryExpr {{.*}} 'C'
+ // CHECK-NEXT: | | | | `-CXXTemporaryObjectExpr {{.*}} 'C' 'void ()'
+ // CHECK-NEXT: | | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
+ // CHECK-NEXT: | `-CXXDefaultArgExpr {{.*}} <<invalid sloc>> 'const DefaultA' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const DefaultA' lvalue extended by Var {{.*}} '__range1' 'C &&'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const DefaultA' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'DefaultA' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'DefaultA' 'void ()'
for (auto e : C(0, C(0, C(0, C()))))
bar(e);
}
@@ -255,28 +255,28 @@ void test7() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'A &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A':'P2718R0::A' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CallExpr {{.*}} 'A'
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'A (*)()' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'A ()' lvalue Function {{.*}} 'g' 'A ()'
for (auto e : g().r().g().r().g().r().g())
@@ -324,11 +324,11 @@ void test9() {
// CHECK-NEXT: | `-CallExpr {{.*}} 'const A *'
// CHECK-NEXT: | |-ImplicitCastExpr {{.*}} 'const A *(*)(const A &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} 'const A *(const A &)' lvalue Function {{.*}} 'dg2' 'const A *(const A &)'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A':'const P2718R0::A' lvalue <DerivedToBase (A)>
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const B':'const P2718R0::B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const B':'const P2718R0::B' <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'B':'P2718R0::B' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'B':'P2718R0::B' 'void () noexcept(false)' zeroing
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A' lvalue <DerivedToBase (A)>
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const B' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const B' <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'B' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXTemporaryObjectExpr {{.*}} 'B' 'void () noexcept(false)' zeroing
for (auto e : df2(dg2(B())))
bar(e);
}
@@ -348,10 +348,10 @@ void test10() {
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : static_cast<void>(df1(LockGuard())), v)
LockGuard guard;
@@ -366,10 +366,10 @@ void test10() {
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : (void)df1(LockGuard()), v)
LockGuard guard;
@@ -384,17 +384,17 @@ void test10() {
// CHECK-NEXT: | | |-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | | `-CallExpr {{.*}} 'const P2718R0::LockGuard' lvalue
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} 'const P2718R0::LockGuard &(*)(const P2718R0::LockGuard &)' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' lvalue Function {{.*}} 'df1' 'const P2718R0::LockGuard &(const P2718R0::LockGuard &)' (FunctionTemplate {{.*}} 'df1')
- // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
- // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard':'const P2718R0::LockGuard' <NoOp>
- // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' (CXXTemporary {{.*}})
- // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard':'P2718R0::LockGuard' 'void ()'
+ // CHECK-NEXT: | | `-MaterializeTemporaryExpr {{.*}} 'const LockGuard' lvalue extended by Var {{.*}} '__range1' 'int (&)[3]'
+ // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} 'const LockGuard' <NoOp>
+ // CHECK-NEXT: | | `-CXXBindTemporaryExpr {{.*}} 'LockGuard' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | | `-CXXTemporaryObjectExpr {{.*}} 'LockGuard' 'void ()'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int[3]' lvalue Var {{.*}} 'v' 'int[3]'
for ([[maybe_unused]] int x : df1(LockGuard()), df1(LockGuard()), v)
LockGuard guard;
@@ -403,7 +403,7 @@ void test10() {
// Test default argument && dependent context
template <typename T> int (&default_arg_fn2(const T & = T()))[3];
void test11() {
- for (auto e : default_arg_fn2<A>())
+ for (auto e : default_arg_fn2<A>())
bar(e);
}
@@ -422,24 +422,24 @@ void test13() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'A &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A':'P2718R0::A' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'A' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A':'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A':'P2718R0::A' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A'
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'A' xvalue extended by Var {{.*}} '__range1' 'A &&'
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A'
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .g {{.*}}
- // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A':'P2718R0::A' lvalue
+ // CHECK-NEXT: | `-CXXMemberCallExpr {{.*}} 'A' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} '<bound member function type>' .r {{.*}}
// CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'P2718R0::A' xvalue extended by Var {{.*}} '__range1' 'A &&'
// CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'P2718R0::A' (CXXTemporary {{.*}})
@@ -474,17 +474,17 @@ void test14() {
// CHECK-NEXT: | `-VarDecl {{.*}} implicit used __range1 'const int (&)[1]' cinit
// CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'const int[1]' lvalue
// CHECK-NEXT: | `-MemberExpr {{.*}} 'const int[1]' lvalue .arr {{.*}}
- // CHECK-NEXT: | `-MemberExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue .a {{.*}}
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14':'P2718R0::B14' xvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14':'P2718R0::B14' functional cast to B14 <NoOp>
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14':'P2718R0::B14'
+ // CHECK-NEXT: | `-MemberExpr {{.*}} 'const A14' lvalue .a {{.*}}
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14' xvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14' functional cast to B14 <NoOp>
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14'
// CHECK-NEXT: | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14':'const P2718R0::A14' <NoOp>
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14':'P2718R0::A14' functional cast to A14 <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14':'P2718R0::A14' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14':'P2718R0::A14'
+ // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14' lvalue extended by Var {{.*}} '__range1' 'const int (&)[1]'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14' <NoOp>
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14' functional cast to A14 <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14'
// CHECK-NEXT: | `-InitListExpr {{.*}} 'int[1]'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 0
for (auto &&x : B14{0}.a.arr) { exit(0); }
@@ -493,17 +493,17 @@ void test14() {
// CHECK-NEXT: |-<<<NULL>>>
// CHECK-NEXT: |-DeclStmt {{.*}}
// CHECK-NEXT: | `-VarDecl {{.*}} col:19 implicit used __range1 'B14 &&' cinit
- // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'B14':'P2718R0::B14' xvalue
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14':'P2718R0::B14' xvalue extended by Var {{.*}} '__range1' 'B14 &&'
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14':'P2718R0::B14' functional cast to B14 <NoOp>
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14':'P2718R0::B14'
+ // CHECK-NEXT: | `-ExprWithCleanups {{.*}} 'B14' xvalue
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'B14' xvalue extended by Var {{.*}} '__range1' 'B14 &&'
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'B14' functional cast to B14 <NoOp>
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'B14'
// CHECK-NEXT: | |-IntegerLiteral {{.*}} 'int' 0
- // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue has rewritten init
- // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14':'const P2718R0::A14' lvalue extended by Var {{.*}} '__range1' 'B14 &&'
- // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14':'const P2718R0::A14' <NoOp>
- // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14':'P2718R0::A14' functional cast to A14 <NoOp>
- // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14':'P2718R0::A14' (CXXTemporary {{.*}})
- // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14':'P2718R0::A14'
+ // CHECK-NEXT: | `-CXXDefaultInitExpr {{.*}} 'const A14' lvalue has rewritten init
+ // CHECK-NEXT: | `-MaterializeTemporaryExpr {{.*}} 'const A14' lvalue extended by Var {{.*}} '__range1' 'B14 &&'
+ // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} 'const A14' <NoOp>
+ // CHECK-NEXT: | `-CXXFunctionalCastExpr {{.*}} 'A14' functional cast to A14 <NoOp>
+ // CHECK-NEXT: | `-CXXBindTemporaryExpr {{.*}} 'A14' (CXXTemporary {{.*}})
+ // CHECK-NEXT: | `-InitListExpr {{.*}} 'A14'
// CHECK-NEXT: | `-InitListExpr {{.*}} 'int[1]'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 0
for (auto &&x : B14{0}) { exit(0); }
diff --git a/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp b/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp
index 1937a5d..9584e77 100644
--- a/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp
+++ b/clang/test/AST/ast-dump-openmp-begin-declare-variant_reference.cpp
@@ -192,12 +192,12 @@ int test(float &&f, short &&s) {
// CHECK-NEXT: | | `-CompoundStmt [[ADDR_44:0x[a-z0-9]*]] <col:17, line:14:1>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_45:0x[a-z0-9]*]] <line:12:3, col:51>
// CHECK-NEXT: | | | `-TypedefDecl [[ADDR_46:0x[a-z0-9]*]] <col:3, col:48> col:48 referenced _Up 'typename remove_reference<float &>::type':'float'
-// CHECK-NEXT: | | | `-ElaboratedType [[ADDR_47:0x[a-z0-9]*]] 'typename remove_reference<float &>::type' sugar
-// CHECK-NEXT: | | | `-TypedefType [[ADDR_48:0x[a-z0-9]*]] 'remove_reference<float &>::type' sugar
-// CHECK-NEXT: | | | |-Typedef [[ADDR_10]] 'type'
-// CHECK-NEXT: | | | `-SubstTemplateTypeParmType [[ADDR_11]] 'float' sugar class depth 0 index 0 _Tp
-// CHECK-NEXT: | | | |-ClassTemplateSpecialization [[ADDR_6]] 'remove_reference'
-// CHECK-NEXT: | | | `-BuiltinType [[ADDR_8]] 'float'
+// CHECK-NEXT: | | | `-TypedefType [[ADDR_48:0x[a-z0-9]*]] 'typename remove_reference<float &>::type' sugar typename
+// CHECK-NEXT: | | | |-NestedNameSpecifier TypeSpec 'remove_reference<float &>'
+// CHECK-NEXT: | | | |-Typedef [[ADDR_10]] 'type'
+// CHECK-NEXT: | | | `-SubstTemplateTypeParmType [[ADDR_11]] 'float' sugar class depth 0 index 0 _Tp
+// CHECK-NEXT: | | | |-ClassTemplateSpecialization [[ADDR_6]] 'remove_reference'
+// CHECK-NEXT: | | | `-BuiltinType [[ADDR_8]] 'float'
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_49:0x[a-z0-9]*]] <line:13:3, col:33>
// CHECK-NEXT: | | `-CXXStaticCastExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:33> '_Up':'float' xvalue static_cast<_Up &&> <NoOp>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_51:0x[a-z0-9]*]] <col:30> 'float' {{.*}}ParmVar [[ADDR_43]] '__t' 'float &'
@@ -209,12 +209,12 @@ int test(float &&f, short &&s) {
// CHECK-NEXT: | `-CompoundStmt [[ADDR_54:0x[a-z0-9]*]] <col:17, line:14:1>
// CHECK-NEXT: | |-DeclStmt [[ADDR_55:0x[a-z0-9]*]] <line:12:3, col:51>
// CHECK-NEXT: | | `-TypedefDecl [[ADDR_56:0x[a-z0-9]*]] <col:3, col:48> col:48 referenced _Up 'typename remove_reference<short &>::type':'short'
-// CHECK-NEXT: | | `-ElaboratedType [[ADDR_57:0x[a-z0-9]*]] 'typename remove_reference<short &>::type' sugar
-// CHECK-NEXT: | | `-TypedefType [[ADDR_58:0x[a-z0-9]*]] 'remove_reference<short &>::type' sugar
-// CHECK-NEXT: | | |-Typedef [[ADDR_18]] 'type'
-// CHECK-NEXT: | | `-SubstTemplateTypeParmType [[ADDR_19]] 'short' sugar class depth 0 index 0 _Tp
-// CHECK-NEXT: | | |-ClassTemplateSpecialization [[ADDR_14]] 'remove_reference'
-// CHECK-NEXT: | | `-BuiltinType [[ADDR_16]] 'short'
+// CHECK-NEXT: | | `-TypedefType [[ADDR_58:0x[a-z0-9]*]] 'typename remove_reference<short &>::type' sugar typename
+// CHECK-NEXT: | | |-NestedNameSpecifier TypeSpec 'remove_reference<short &>'
+// CHECK-NEXT: | | |-Typedef [[ADDR_18]] 'type'
+// CHECK-NEXT: | | `-SubstTemplateTypeParmType [[ADDR_19]] 'short' sugar class depth 0 index 0 _Tp
+// CHECK-NEXT: | | |-ClassTemplateSpecialization [[ADDR_14]] 'remove_reference'
+// CHECK-NEXT: | | `-BuiltinType [[ADDR_16]] 'short'
// CHECK-NEXT: | `-ReturnStmt [[ADDR_59:0x[a-z0-9]*]] <line:13:3, col:33>
// CHECK-NEXT: | `-CXXStaticCastExpr [[ADDR_60:0x[a-z0-9]*]] <col:10, col:33> '_Up':'short' xvalue static_cast<_Up &&> <NoOp>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_61:0x[a-z0-9]*]] <col:30> 'short' {{.*}}ParmVar [[ADDR_53]] '__t' 'short &'
diff --git a/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp b/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp
index ad26950..44d1cb4 100644
--- a/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp
+++ b/clang/test/AST/ast-dump-openmp-begin-declare-variant_template_3.cpp
@@ -54,7 +54,7 @@ int test() {
// CHECK-NEXT: | | | |-CopyAssignment simple trivial has_const_param needs_implicit implicit_has_const_param
// CHECK-NEXT: | | | |-MoveAssignment exists simple trivial needs_implicit
// CHECK-NEXT: | | | `-Destructor simple irrelevant trivial {{(constexpr )?}}needs_implicit
-// CHECK-NEXT: | | |-CXXRecordDecl [[ADDR_3:0x[a-z0-9]*]] <col:23, col:30> col:30 implicit referenced struct S
+// CHECK-NEXT: | | |-CXXRecordDecl [[ADDR_3:0x[a-z0-9]*]] <col:23, col:30> col:30 implicit struct S
// CHECK-NEXT: | | `-CXXConstructorDecl [[ADDR_4:0x[a-z0-9]*]] <line:6:3, col:16> col:3 S<T> 'void (int, T *)'
// CHECK-NEXT: | | |-ParmVarDecl [[ADDR_5:0x[a-z0-9]*]] <col:5> col:8 'int'
// CHECK-NEXT: | | |-ParmVarDecl [[ADDR_6:0x[a-z0-9]*]] <col:10, col:12> col:13 'T *'
diff --git a/clang/test/AST/ast-dump-record-definition-data-json.cpp b/clang/test/AST/ast-dump-record-definition-data-json.cpp
index c119089..e35bec7 100644
--- a/clang/test/AST/ast-dump-record-definition-data-json.cpp
+++ b/clang/test/AST/ast-dump-record-definition-data-json.cpp
@@ -2516,7 +2516,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "IsTrivial",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -2646,7 +2645,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "IsNotTrivial",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -3980,7 +3978,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "HasUserDeclaredConstructor",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -4234,7 +4231,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "HasConstexprNonCopyMoveConstructor",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
@@ -4381,7 +4377,6 @@ struct DoesNotAllowConstDefaultInit {
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isReferenced": true,
// CHECK-NEXT: "name": "HasNoConstexprNonCopyMoveConstructor",
// CHECK-NEXT: "tagUsed": "struct"
// CHECK-NEXT: },
diff --git a/clang/test/AST/ast-dump-records-json.cpp b/clang/test/AST/ast-dump-records-json.cpp
index 7efdcb0..941c6a6 100644
--- a/clang/test/AST/ast-dump-records-json.cpp
+++ b/clang/test/AST/ast-dump-records-json.cpp
@@ -795,7 +795,6 @@ struct Derived6 : virtual public Bases... {
// CHECK-NEXT: },
// CHECK-NEXT: "name": "b",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "C::(unnamed struct at {{.*}}:16:3)",
// CHECK-NEXT: "qualType": "struct (unnamed struct at {{.*}}:16:3)"
// CHECK-NEXT: }
// CHECK-NEXT: },
@@ -2072,7 +2071,6 @@ struct Derived6 : virtual public Bases... {
// CHECK-NEXT: },
// CHECK-NEXT: "name": "b",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "G::(unnamed struct at {{.*}}:50:3)",
// CHECK-NEXT: "qualType": "struct (unnamed struct at {{.*}}:50:3)"
// CHECK-NEXT: }
// CHECK-NEXT: },
diff --git a/clang/test/AST/ast-dump-records.c b/clang/test/AST/ast-dump-records.c
index f4a540f..1dc175c 100644
--- a/clang/test/AST/ast-dump-records.c
+++ b/clang/test/AST/ast-dump-records.c
@@ -47,7 +47,7 @@ struct C {
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-5]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-5]]:3)':'struct C::(unnamed at {{.*}}:[[@LINE-5]]:3)'
+ // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-5]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-5]]:3)'
union {
// CHECK-NEXT: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+5]]:3> line:[[@LINE-1]]:3 union definition
@@ -122,15 +122,13 @@ union E {
};
union G {
- // CHECK: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+38]]:1> line:[[@LINE-1]]:7 union G definition
+ // CHECK: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+36]]:1> line:[[@LINE-1]]:7 union G definition
struct {
// CHECK-NEXT: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+3]]:3> line:[[@LINE-1]]:3 struct definition
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // FIXME: note that it talks about 'struct G' below; the same happens in
- // other cases with union G as well.
- // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-7]]:3, line:[[@LINE-3]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-7]]:3)':'struct G::(unnamed at {{.*}}:[[@LINE-7]]:3)'
+ // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-5]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-5]]:3)'
union {
// CHECK-NEXT: RecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+5]]:3> line:[[@LINE-1]]:3 union definition
diff --git a/clang/test/AST/ast-dump-records.cpp b/clang/test/AST/ast-dump-records.cpp
index e9b37b7..edd13ba 100644
--- a/clang/test/AST/ast-dump-records.cpp
+++ b/clang/test/AST/ast-dump-records.cpp
@@ -72,7 +72,7 @@ struct C {
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-12]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-12]]:3)':'C::(unnamed struct at {{.*}}:[[@LINE-12]]:3)'
+ // CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-12]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-12]]:3)'
union {
// CHECK-NEXT: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+12]]:3> line:[[@LINE-1]]:3 union definition
@@ -179,7 +179,7 @@ union E {
};
union G {
- // CHECK: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+71]]:1> line:[[@LINE-1]]:7 union G definition
+ // CHECK: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, line:[[@LINE+69]]:1> line:[[@LINE-1]]:7 union G definition
// CHECK-NEXT: DefinitionData pass_in_registers aggregate standard_layout trivially_copyable pod trivial literal
// CHECK-NEXT: DefaultConstructor exists trivial needs_implicit
// CHECK-NEXT: CopyConstructor simple trivial has_const_param needs_implicit implicit_has_const_param
@@ -202,9 +202,7 @@ union G {
int a;
// CHECK-NEXT: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:5, col:9> col:9 a 'int'
} b;
- // FIXME: note that it talks about 'struct G' below; the same happens in
- // other cases with union G as well.
- // CHECK: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-15]]:3, line:[[@LINE-3]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-15]]:3)':'G::(unnamed struct at {{.*}}:[[@LINE-15]]:3)'
+ // CHECK: FieldDecl 0x{{[^ ]*}} <line:[[@LINE-13]]:3, line:[[@LINE-1]]:5> col:5 b 'struct (unnamed struct at {{.*}}:[[@LINE-13]]:3)'
union {
// CHECK-NEXT: CXXRecordDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:3, line:[[@LINE+13]]:3> line:[[@LINE-1]]:3 union definition
diff --git a/clang/test/AST/ast-dump-recovery.cpp b/clang/test/AST/ast-dump-recovery.cpp
index a8e30f1..060ae3b 100644
--- a/clang/test/AST/ast-dump-recovery.cpp
+++ b/clang/test/AST/ast-dump-recovery.cpp
@@ -128,7 +128,7 @@ void test2(Foo2 f) {
// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'f'
// CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1
f.func(1);
- // CHECK: RecoveryExpr {{.*}} 'ForwardClass':'Foo2::ForwardClass'
+ // CHECK: RecoveryExpr {{.*}} 'ForwardClass'
// CHECK-NEXT: `-MemberExpr {{.*}} '<bound member function type>' .createFwd
// CHECK-NEXT: `-DeclRefExpr {{.*}} 'f'
f.createFwd();
@@ -292,8 +292,8 @@ union U {
// CHECK: FunctionDecl {{.*}} foo 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}}
// CHECK-NEXT: `-DeclStmt {{.*}}
-// CHECK-NEXT: `-VarDecl {{.*}} g 'U':'GH112560::U' listinit
-// CHECK-NEXT: `-InitListExpr {{.*}} 'U':'GH112560::U' contains-errors field Field {{.*}} 'f' 'int'
+// CHECK-NEXT: `-VarDecl {{.*}} g 'U' listinit
+// CHECK-NEXT: `-InitListExpr {{.*}} 'U' contains-errors field Field {{.*}} 'f' 'int'
// CHECK-NEXT: `-CXXDefaultInitExpr {{.*}} 'int' contains-errors has rewritten init
// CHECK-NEXT: `-RecoveryExpr {{.*}} 'int' contains-errors
// DISABLED-NOT: -RecoveryExpr {{.*}} contains-errors
diff --git a/clang/test/AST/ast-dump-stmt-json.cpp b/clang/test/AST/ast-dump-stmt-json.cpp
index a8f113c..ee99ab8 100644
--- a/clang/test/AST/ast-dump-stmt-json.cpp
+++ b/clang/test/AST/ast-dump-stmt-json.cpp
@@ -224,7 +224,55 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "void ()"
// CHECK-NEXT: }
-// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "FunctionDecl",
+// CHECK-NEXT: "loc": {
+// CHECK-NEXT: "offset": 125,
+// CHECK-NEXT: "line": 4,
+// CHECK-NEXT: "col": 6,
+// CHECK-NEXT: "tokLen": 8
+// CHECK-NEXT: },
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 120,
+// CHECK-NEXT: "col": 1,
+// CHECK-NEXT: "tokLen": 4
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 137,
+// CHECK-NEXT: "col": 18,
+// CHECK-NEXT: "tokLen": 1
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "isUsed": true,
+// CHECK-NEXT: "name": "function",
+// CHECK-NEXT: "mangledName": "_ZN1n8functionEv",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void ()"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CompoundStmt",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 136,
+// CHECK-NEXT: "col": 17,
+// CHECK-NEXT: "tokLen": 1
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 137,
+// CHECK-NEXT: "col": 18,
+// CHECK-NEXT: "tokLen": 1
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -279,7 +327,37 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "int"
// CHECK-NEXT: }
-// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "VarDecl",
+// CHECK-NEXT: "loc": {
+// CHECK-NEXT: "offset": 143,
+// CHECK-NEXT: "line": 5,
+// CHECK-NEXT: "col": 5,
+// CHECK-NEXT: "tokLen": 8
+// CHECK-NEXT: },
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 139,
+// CHECK-NEXT: "col": 1,
+// CHECK-NEXT: "tokLen": 3
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 143,
+// CHECK-NEXT: "col": 5,
+// CHECK-NEXT: "tokLen": 8
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "isUsed": true,
+// CHECK-NEXT: "name": "Variable",
+// CHECK-NEXT: "mangledName": "_ZN1n8VariableE",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "int"
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -1869,6 +1947,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionDecl",
// CHECK-NEXT: "loc": {},
@@ -1936,6 +2015,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionDecl",
// CHECK-NEXT: "loc": {},
@@ -2086,6 +2166,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionDecl",
// CHECK-NEXT: "loc": {},
@@ -2153,6 +2234,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: ]
// CHECK-NEXT: }
+
// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionTemplateDecl",
// CHECK-NEXT: "loc": {
diff --git a/clang/test/AST/ast-dump-stmt.m b/clang/test/AST/ast-dump-stmt.m
index e0fc16b..b68f5b6 100644
--- a/clang/test/AST/ast-dump-stmt.m
+++ b/clang/test/AST/ast-dump-stmt.m
@@ -55,4 +55,4 @@ id TestCompoundLiteral(id a) {
// CHECK: FunctionDecl{{.*}}TestCompoundLiteral
// CHECK: ExprWithCleanups
// CHECK-NEXT: cleanup CompoundLiteralExpr
-// CHECK: CompoundLiteralExpr{{.*}}'S' lvalue
+ // CHECK: CompoundLiteralExpr{{.*}}'S':'struct S' lvalue
diff --git a/clang/test/AST/ast-dump-template-decls.cpp b/clang/test/AST/ast-dump-template-decls.cpp
index d5228d4..ba3e405 100644
--- a/clang/test/AST/ast-dump-template-decls.cpp
+++ b/clang/test/AST/ast-dump-template-decls.cpp
@@ -115,8 +115,7 @@ template <class T> struct C {
};
using type2 = typename C<int>::type1<void>;
// CHECK: TypeAliasDecl 0x{{[^ ]*}} <line:[[@LINE-1]]:1, col:42> col:7 type2 'typename C<int>::type1<void>':'void (int)'
-// CHECK-NEXT: ElaboratedType 0x{{[^ ]*}} 'typename C<int>::type1<void>' sugar
-// CHECK-NEXT: TemplateSpecializationType 0x{{[^ ]*}} 'type1<void>' sugar alias
+// CHECK-NEXT: TemplateSpecializationType 0x{{[^ ]*}} 'typename C<int>::type1<void>' sugar alias
// CHECK-NEXT: name: 'C<int>::type1':'PR55886::C<int>::type1' qualified
// CHECK-NEXT: NestedNameSpecifier TypeSpec 'C<int>':'PR55886::C<int>'
// CHECK-NEXT: TypeAliasTemplateDecl {{.+}} type1
diff --git a/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp b/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp
index cc9a82c..3e0877f 100644
--- a/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp
+++ b/clang/test/AST/ast-dump-template-json-win32-mangler-crash.cpp
@@ -537,28 +537,17 @@ int main()
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "InjectedClassNameType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "integral_constant<_Ty, _Val>"
// CHECK-NEXT: },
// CHECK-NEXT: "isDependent": true,
// CHECK-NEXT: "isInstantiationDependent": true,
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "InjectedClassNameType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "integral_constant<_Ty, _Val>"
-// CHECK-NEXT: },
-// CHECK-NEXT: "isDependent": true,
-// CHECK-NEXT: "isInstantiationDependent": true,
-// CHECK-NEXT: "decl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "CXXRecordDecl",
-// CHECK-NEXT: "name": "integral_constant"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: "decl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CXXRecordDecl",
+// CHECK-NEXT: "name": "integral_constant"
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: },
@@ -885,71 +874,60 @@ int main()
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "TemplateSpecializationType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "integral_constant<bool, _Val>"
// CHECK-NEXT: },
// CHECK-NEXT: "isDependent": true,
// CHECK-NEXT: "isInstantiationDependent": true,
+// CHECK-NEXT: "templateName": "integral_constant",
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "TemplateSpecializationType",
+// CHECK-NEXT: "kind": "TemplateArgument",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "integral_constant<bool, _Val>"
+// CHECK-NEXT: "qualType": "bool"
// CHECK-NEXT: },
-// CHECK-NEXT: "isDependent": true,
-// CHECK-NEXT: "isInstantiationDependent": true,
-// CHECK-NEXT: "templateName": "integral_constant",
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "kind": "TemplateArgument",
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "BuiltinType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: },
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "BuiltinType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
-// CHECK-NEXT: },
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "kind": "TemplateArgument",
+// CHECK-NEXT: "isExpr": true,
+// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "kind": "TemplateArgument",
-// CHECK-NEXT: "isExpr": true,
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "DeclRefExpr",
-// CHECK-NEXT: "range": {
-// CHECK-NEXT: "begin": {
-// CHECK-NEXT: "offset": 554,
-// CHECK-NEXT: "col": 47,
-// CHECK-NEXT: "tokLen": 4
-// CHECK-NEXT: },
-// CHECK-NEXT: "end": {
-// CHECK-NEXT: "offset": 554,
-// CHECK-NEXT: "col": 47,
-// CHECK-NEXT: "tokLen": 4
-// CHECK-NEXT: }
-// CHECK-NEXT: },
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: },
-// CHECK-NEXT: "valueCategory": "prvalue",
-// CHECK-NEXT: "referencedDecl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "NonTypeTemplateParmDecl",
-// CHECK-NEXT: "name": "_Val",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "bool"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "DeclRefExpr",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 554,
+// CHECK-NEXT: "col": 47,
+// CHECK-NEXT: "tokLen": 4
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 554,
+// CHECK-NEXT: "col": 47,
+// CHECK-NEXT: "tokLen": 4
// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: },
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "bool"
+// CHECK-NEXT: },
+// CHECK-NEXT: "valueCategory": "prvalue",
+// CHECK-NEXT: "referencedDecl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "NonTypeTemplateParmDecl",
+// CHECK-NEXT: "name": "_Val",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "bool"
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
diff --git a/clang/test/AST/ast-dump-template-name.cpp b/clang/test/AST/ast-dump-template-name.cpp
index acacdac..7f08508 100644
--- a/clang/test/AST/ast-dump-template-name.cpp
+++ b/clang/test/AST/ast-dump-template-name.cpp
@@ -11,13 +11,12 @@ namespace qualified {
// CHECK: Dumping qualified::TestQualified:
// CHECK-NEXT: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TemplateSpecializationType
-// CHECK-NEXT: |-name: 'N' qualified
-// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} N{{$}}
-// CHECK-NEXT: |-TemplateArgument template 'foo::A':'qualified::foo::A' qualified{{$}}
-// CHECK-NEXT: | |-NestedNameSpecifier Namespace 0x{{.+}} 'foo'{{$}}
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType
+// CHECK-NEXT: |-name: 'N' qualified
+// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} N{{$}}
+// CHECK-NEXT: |-TemplateArgument template 'foo::A':'qualified::foo::A' qualified{{$}}
+// CHECK-NEXT: | |-NestedNameSpecifier Namespace 0x{{.+}} 'foo'{{$}}
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
namespace dependent {
template <class T> struct B {
@@ -27,12 +26,11 @@ namespace dependent {
// CHECK: Dumping dependent::B::TestDependent:
// CHECK-NEXT: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TemplateSpecializationType
-// CHECK-NEXT: |-name: 'N' qualified
-// CHECK-NEXT: | `-TypeAliasTemplateDecl
-// CHECK-NEXT: |-TemplateArgument template 'T::template X':'type-parameter-0-0::template X' dependent{{$}}
-// CHECK-NEXT: | `-NestedNameSpecifier TypeSpec 'T'{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType
+// CHECK-NEXT: |-name: 'N' qualified
+// CHECK-NEXT: | `-TypeAliasTemplateDecl
+// CHECK-NEXT: |-TemplateArgument template 'T::template X':'type-parameter-0-0::template X' dependent{{$}}
+// CHECK-NEXT: | `-NestedNameSpecifier TypeSpec 'T'{{$}}
namespace subst {
template <class> struct A;
@@ -46,15 +44,14 @@ namespace subst {
// CHECK: Dumping subst::TestSubst:
// CHECK-NEXT: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TypedefType
-// CHECK-NEXT: |-TypeAlias
-// CHECK-NEXT: `-ElaboratedType
-// CHECK-NEXT: `-TemplateSpecializationType
-// CHECK-NEXT: |-name: 'C':'subst::B<subst::A>::C' qualified
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} C
-// CHECK-NEXT: |-TemplateArgument template 'subst::A' subst index 0
-// CHECK-NEXT: | |-parameter: TemplateTemplateParmDecl {{.+}} depth 0 index 0 TT{{$}}
-// CHECK-NEXT: | |-associated ClassTemplateSpecialization {{.+}} 'B'{{$}}
-// CHECK-NEXT: | `-replacement:
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
+// CHECK-NEXT: `-TypedefType
+// CHECK-NEXT: |-NestedNameSpecifier TypeSpec 'B<A>':'subst::B<subst::A>'
+// CHECK-NEXT: |-TypeAlias
+// CHECK-NEXT: `-TemplateSpecializationType
+// CHECK-NEXT: |-name: 'C':'subst::B<subst::A>::C' qualified
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} C
+// CHECK-NEXT: |-TemplateArgument template 'subst::A' subst index 0
+// CHECK-NEXT: | |-parameter: TemplateTemplateParmDecl {{.+}} depth 0 index 0 TT{{$}}
+// CHECK-NEXT: | |-associated ClassTemplateSpecialization {{.+}} 'B'{{$}}
+// CHECK-NEXT: | `-replacement:
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
diff --git a/clang/test/AST/ast-dump-templates.cpp b/clang/test/AST/ast-dump-templates.cpp
index b504acc..b42aa62 100644
--- a/clang/test/AST/ast-dump-templates.cpp
+++ b/clang/test/AST/ast-dump-templates.cpp
@@ -171,10 +171,11 @@ namespace TestDependentMemberPointer {
// DUMP-NEXT: | `-BuiltinType {{.+}} 'int'
// DUMP-NEXT: |-TypeAliasDecl {{.+}} Y 'int U::test::*'{{$}}
// DUMP-NEXT: | `-MemberPointerType {{.+}} 'int U::test::*' dependent
+// DUMP-NEXT: | |-DependentNameType {{.+}} 'U::test' dependent
// DUMP-NEXT: | `-BuiltinType {{.+}} 'int'
// DUMP-NEXT: `-TypeAliasDecl {{.+}} Z 'int U::template V<int>::*'{{$}}
// DUMP-NEXT: `-MemberPointerType {{.+}} 'int U::template V<int>::*' dependent
-// DUMP-NEXT: |-DependentTemplateSpecializationType {{.+}} 'template V<int>' dependent
+// DUMP-NEXT: |-DependentTemplateSpecializationType {{.+}} 'U::template V<int>' dependent
// DUMP-NEXT: `-BuiltinType {{.+}} 'int'
} // namespace TestDependentMemberPointer
@@ -186,14 +187,14 @@ namespace TestPartialSpecNTTP {
template <class U1, bool U2, bool U3>
struct Template2<Template1<U1, U2>, U3> {};
// DUMP: ClassTemplatePartialSpecializationDecl {{.+}} struct Template2
-// DUMP: |-TemplateArgument type 'Template1<type-parameter-0-0, value-parameter-0-1>'
-// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'Template1<type-parameter-0-0, value-parameter-0-1>' dependent
+// DUMP: |-TemplateArgument type 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>'
+// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>' dependent
// DUMP-NEXT: | |-name: 'TestPartialSpecNTTP::Template1'
// DUMP-NEXT: | | `-ClassTemplateDecl {{.+}} Template1
// DUMP-NEXT: | |-TemplateArgument type 'type-parameter-0-0'
// DUMP-NEXT: | | `-TemplateTypeParmType {{.+}} 'type-parameter-0-0' dependent depth 0 index 0
// DUMP-NEXT: | `-TemplateArgument expr canonical 'value-parameter-0-1'
-// DUMP-NEXT: | `-DeclRefExpr {{.+}} 'bool' NonTypeTemplateParm {{.+}} 'TA2' 'bool'
+// DUMP-NEXT: | `-DeclRefExpr {{.+}} 'bool' NonTypeTemplateParm {{.+}} 'U2' 'bool'
// DUMP-NEXT: |-TemplateArgument expr canonical 'value-parameter-0-2'
// DUMP-NEXT: | `-DeclRefExpr {{.+}} 'bool' NonTypeTemplateParm {{.+}} 'U3' 'bool'
// DUMP-NEXT: |-TemplateTypeParmDecl {{.+}} referenced class depth 0 index 0 U1
@@ -204,8 +205,8 @@ namespace TestPartialSpecNTTP {
template <typename U1, bool U3, bool U2>
struct Template2<Template1<U1, U2>, U3> {};
// DUMP: ClassTemplatePartialSpecializationDecl {{.+}} struct Template2 definition explicit_specialization
-// DUMP: |-TemplateArgument type 'Template1<type-parameter-0-0, value-parameter-0-2>'
-// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'Template1<type-parameter-0-0, value-parameter-0-2>' dependent
+// DUMP: |-TemplateArgument type 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>'
+// DUMP-NEXT: | `-TemplateSpecializationType {{.+}} 'TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>' dependent
// DUMP-NEXT: | |-name: 'TestPartialSpecNTTP::Template1'
// DUMP-NEXT: | | `-ClassTemplateDecl {{.+}} Template1
// DUMP-NEXT: | |-TemplateArgument type 'type-parameter-0-0'
@@ -621,7 +622,6 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: }
// JSON-NEXT: },
// JSON-NEXT: "isImplicit": true,
-// JSON-NEXT: "isReferenced": true,
// JSON-NEXT: "name": "foo",
// JSON-NEXT: "tagUsed": "struct"
// JSON-NEXT: },
@@ -4109,7 +4109,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "isImplicit": true,
// JSON-NEXT: "name": "<deduction guide for A>",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "auto () -> A<T>"
+// JSON-NEXT: "qualType": "auto () -> test3::A<T>"
// JSON-NEXT: }
// JSON-NEXT: }
// JSON-NEXT: ]
@@ -4185,7 +4185,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "isImplicit": true,
// JSON-NEXT: "name": "<deduction guide for A>",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "auto (A<T>) -> A<T>"
+// JSON-NEXT: "qualType": "auto (test3::A<T>) -> test3::A<T>"
// JSON-NEXT: },
// JSON-NEXT: "inner": [
// JSON-NEXT: {
@@ -4209,7 +4209,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: }
// JSON-NEXT: },
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "A<T>"
+// JSON-NEXT: "qualType": "test3::A<T>"
// JSON-NEXT: }
// JSON-NEXT: }
// JSON-NEXT: ]
@@ -6630,8 +6630,8 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6356,
-// JSON-NEXT: "line": 179,
+// JSON-NEXT: "offset": 6425,
+// JSON-NEXT: "line": 180,
// JSON-NEXT: "col": 1,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -6889,6 +6889,15 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "inner": [
// JSON-NEXT: {
// JSON-NEXT: "id": "0x{{.*}}",
+// JSON-NEXT: "kind": "DependentNameType",
+// JSON-NEXT: "type": {
+// JSON-NEXT: "qualType": "U::test"
+// JSON-NEXT: },
+// JSON-NEXT: "isDependent": true,
+// JSON-NEXT: "isInstantiationDependent": true
+// JSON-NEXT: },
+// JSON-NEXT: {
+// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "BuiltinType",
// JSON-NEXT: "type": {
// JSON-NEXT: "qualType": "int"
@@ -6938,7 +6947,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "DependentTemplateSpecializationType",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "template V<int>"
+// JSON-NEXT: "qualType": "U::template V<int>"
// JSON-NEXT: },
// JSON-NEXT: "isDependent": true,
// JSON-NEXT: "isInstantiationDependent": true
@@ -6964,20 +6973,20 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NamespaceDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6409,
-// JSON-NEXT: "line": 181,
+// JSON-NEXT: "offset": 6478,
+// JSON-NEXT: "line": 182,
// JSON-NEXT: "col": 11,
// JSON-NEXT: "tokLen": 19
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6399,
+// JSON-NEXT: "offset": 6468,
// JSON-NEXT: "col": 1,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 9184,
-// JSON-NEXT: "line": 221,
+// JSON-NEXT: "offset": 9336,
+// JSON-NEXT: "line": 222,
// JSON-NEXT: "col": 1,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -6988,19 +6997,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplateDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6532,
-// JSON-NEXT: "line": 183,
+// JSON-NEXT: "offset": 6601,
+// JSON-NEXT: "line": 184,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6494,
+// JSON-NEXT: "offset": 6563,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6543,
+// JSON-NEXT: "offset": 6612,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7011,18 +7020,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6510,
+// JSON-NEXT: "offset": 6579,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6504,
+// JSON-NEXT: "offset": 6573,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 5
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6510,
+// JSON-NEXT: "offset": 6579,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7036,18 +7045,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6520,
+// JSON-NEXT: "offset": 6589,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6515,
+// JSON-NEXT: "offset": 6584,
// JSON-NEXT: "col": 24,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6520,
+// JSON-NEXT: "offset": 6589,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7063,18 +7072,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6532,
+// JSON-NEXT: "offset": 6601,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6525,
+// JSON-NEXT: "offset": 6594,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6543,
+// JSON-NEXT: "offset": 6612,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7137,18 +7146,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6532,
+// JSON-NEXT: "offset": 6601,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6525,
+// JSON-NEXT: "offset": 6594,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6532,
+// JSON-NEXT: "offset": 6601,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7165,19 +7174,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplateDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6586,
-// JSON-NEXT: "line": 184,
+// JSON-NEXT: "offset": 6655,
+// JSON-NEXT: "line": 185,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6548,
+// JSON-NEXT: "offset": 6617,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6597,
+// JSON-NEXT: "offset": 6666,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7188,18 +7197,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6564,
+// JSON-NEXT: "offset": 6633,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6558,
+// JSON-NEXT: "offset": 6627,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 5
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6564,
+// JSON-NEXT: "offset": 6633,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7213,18 +7222,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6574,
+// JSON-NEXT: "offset": 6643,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6569,
+// JSON-NEXT: "offset": 6638,
// JSON-NEXT: "col": 24,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6574,
+// JSON-NEXT: "offset": 6643,
// JSON-NEXT: "col": 29,
// JSON-NEXT: "tokLen": 3
// JSON-NEXT: }
@@ -7240,18 +7249,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6586,
+// JSON-NEXT: "offset": 6655,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6579,
+// JSON-NEXT: "offset": 6648,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6597,
+// JSON-NEXT: "offset": 6666,
// JSON-NEXT: "col": 52,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7314,18 +7323,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6586,
+// JSON-NEXT: "offset": 6655,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6579,
+// JSON-NEXT: "offset": 6648,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6586,
+// JSON-NEXT: "offset": 6655,
// JSON-NEXT: "col": 41,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7342,21 +7351,21 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplatePartialSpecializationDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6650,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6719,
+// JSON-NEXT: "line": 188,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6603,
-// JSON-NEXT: "line": 186,
+// JSON-NEXT: "offset": 6672,
+// JSON-NEXT: "line": 187,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6684,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6753,
+// JSON-NEXT: "line": 188,
// JSON-NEXT: "col": 44,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7418,14 +7427,14 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: {
// JSON-NEXT: "kind": "TemplateArgument",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-1>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>"
// JSON-NEXT: },
// JSON-NEXT: "inner": [
// JSON-NEXT: {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateSpecializationType",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-1>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-1>"
// JSON-NEXT: },
// JSON-NEXT: "isDependent": true,
// JSON-NEXT: "isInstantiationDependent": true,
@@ -7463,15 +7472,14 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6520,
-// JSON-NEXT: "line": 183,
-// JSON-NEXT: "col": 29,
-// JSON-NEXT: "tokLen": 3
+// JSON-NEXT: "offset": 6743,
+// JSON-NEXT: "col": 34,
+// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6520,
-// JSON-NEXT: "col": 29,
-// JSON-NEXT: "tokLen": 3
+// JSON-NEXT: "offset": 6743,
+// JSON-NEXT: "col": 34,
+// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
// JSON-NEXT: },
// JSON-NEXT: "type": {
@@ -7481,7 +7489,7 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "referencedDecl": {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
-// JSON-NEXT: "name": "TA2",
+// JSON-NEXT: "name": "U2",
// JSON-NEXT: "type": {
// JSON-NEXT: "qualType": "bool"
// JSON-NEXT: }
@@ -7503,13 +7511,12 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6679,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6748,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6679,
+// JSON-NEXT: "offset": 6748,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7533,19 +7540,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6619,
-// JSON-NEXT: "line": 186,
+// JSON-NEXT: "offset": 6688,
+// JSON-NEXT: "line": 187,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6613,
+// JSON-NEXT: "offset": 6682,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 5
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6619,
+// JSON-NEXT: "offset": 6688,
// JSON-NEXT: "col": 19,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7560,18 +7567,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6628,
+// JSON-NEXT: "offset": 6697,
// JSON-NEXT: "col": 28,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6623,
+// JSON-NEXT: "offset": 6692,
// JSON-NEXT: "col": 23,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6628,
+// JSON-NEXT: "offset": 6697,
// JSON-NEXT: "col": 28,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7588,18 +7595,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6637,
+// JSON-NEXT: "offset": 6706,
// JSON-NEXT: "col": 37,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6632,
+// JSON-NEXT: "offset": 6701,
// JSON-NEXT: "col": 32,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6637,
+// JSON-NEXT: "offset": 6706,
// JSON-NEXT: "col": 37,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7616,19 +7623,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 6650,
-// JSON-NEXT: "line": 187,
+// JSON-NEXT: "offset": 6719,
+// JSON-NEXT: "line": 188,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 6643,
+// JSON-NEXT: "offset": 6712,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 6650,
+// JSON-NEXT: "offset": 6719,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
@@ -7643,21 +7650,21 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "ClassTemplatePartialSpecializationDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7925,
-// JSON-NEXT: "line": 205,
+// JSON-NEXT: "offset": 8035,
+// JSON-NEXT: "line": 206,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7875,
-// JSON-NEXT: "line": 204,
+// JSON-NEXT: "offset": 7985,
+// JSON-NEXT: "line": 205,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7959,
-// JSON-NEXT: "line": 205,
+// JSON-NEXT: "offset": 8069,
+// JSON-NEXT: "line": 206,
// JSON-NEXT: "col": 44,
// JSON-NEXT: "tokLen": 1
// JSON-NEXT: }
@@ -7719,14 +7726,14 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: {
// JSON-NEXT: "kind": "TemplateArgument",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-2>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>"
// JSON-NEXT: },
// JSON-NEXT: "inner": [
// JSON-NEXT: {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateSpecializationType",
// JSON-NEXT: "type": {
-// JSON-NEXT: "qualType": "Template1<type-parameter-0-0, value-parameter-0-2>"
+// JSON-NEXT: "qualType": "TestPartialSpecNTTP::Template1<type-parameter-0-0, value-parameter-0-2>"
// JSON-NEXT: },
// JSON-NEXT: "isDependent": true,
// JSON-NEXT: "isInstantiationDependent": true,
@@ -7764,12 +7771,12 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7949,
+// JSON-NEXT: "offset": 8059,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7949,
+// JSON-NEXT: "offset": 8059,
// JSON-NEXT: "col": 34,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7803,12 +7810,12 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "kind": "DeclRefExpr",
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7954,
+// JSON-NEXT: "offset": 8064,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7954,
+// JSON-NEXT: "offset": 8064,
// JSON-NEXT: "col": 39,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7832,19 +7839,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "TemplateTypeParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7894,
-// JSON-NEXT: "line": 204,
+// JSON-NEXT: "offset": 8004,
+// JSON-NEXT: "line": 205,
// JSON-NEXT: "col": 22,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7885,
+// JSON-NEXT: "offset": 7995,
// JSON-NEXT: "col": 13,
// JSON-NEXT: "tokLen": 8
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7894,
+// JSON-NEXT: "offset": 8004,
// JSON-NEXT: "col": 22,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7859,18 +7866,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7903,
+// JSON-NEXT: "offset": 8013,
// JSON-NEXT: "col": 31,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7898,
+// JSON-NEXT: "offset": 8008,
// JSON-NEXT: "col": 26,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7903,
+// JSON-NEXT: "offset": 8013,
// JSON-NEXT: "col": 31,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7887,18 +7894,18 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "NonTypeTemplateParmDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7912,
+// JSON-NEXT: "offset": 8022,
// JSON-NEXT: "col": 40,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7907,
+// JSON-NEXT: "offset": 8017,
// JSON-NEXT: "col": 35,
// JSON-NEXT: "tokLen": 4
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7912,
+// JSON-NEXT: "offset": 8022,
// JSON-NEXT: "col": 40,
// JSON-NEXT: "tokLen": 2
// JSON-NEXT: }
@@ -7915,19 +7922,19 @@ namespace TestPartialSpecNTTP {
// JSON-NEXT: "id": "0x{{.*}}",
// JSON-NEXT: "kind": "CXXRecordDecl",
// JSON-NEXT: "loc": {
-// JSON-NEXT: "offset": 7925,
-// JSON-NEXT: "line": 205,
+// JSON-NEXT: "offset": 8035,
+// JSON-NEXT: "line": 206,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: },
// JSON-NEXT: "range": {
// JSON-NEXT: "begin": {
-// JSON-NEXT: "offset": 7918,
+// JSON-NEXT: "offset": 8028,
// JSON-NEXT: "col": 3,
// JSON-NEXT: "tokLen": 6
// JSON-NEXT: },
// JSON-NEXT: "end": {
-// JSON-NEXT: "offset": 7925,
+// JSON-NEXT: "offset": 8035,
// JSON-NEXT: "col": 10,
// JSON-NEXT: "tokLen": 9
// JSON-NEXT: }
diff --git a/clang/test/AST/ast-dump-traits.cpp b/clang/test/AST/ast-dump-traits.cpp
index 72d2a2a..b844fd6 100644
--- a/clang/test/AST/ast-dump-traits.cpp
+++ b/clang/test/AST/ast-dump-traits.cpp
@@ -40,9 +40,8 @@ void test_unary_expr_or_type_trait() {
// CHECK-NEXT: | | `-EnumDecl {{.*}} <col:3, col:11> col:8{{( imported)?}} referenced E
// CHECK-NEXT: | |-CStyleCastExpr {{.*}} <line:13:3, col:21> 'void' <ToVoid>
// CHECK-NEXT: | | `-TypeTraitExpr {{.*}} <col:10, col:21> 'bool' __is_enum
-// CHECK-NEXT: | | `-ElaboratedType {{.*}} 'E' sugar
-// CHECK-NEXT: | | `-EnumType {{.*}} 'E'
-// CHECK-NEXT: | | `-Enum {{.*}} 'E'
+// CHECK-NEXT: | | `-EnumType {{.*}} 'E'
+// CHECK-NEXT: | | `-Enum {{.*}} 'E'
// CHECK-NEXT: | |-CStyleCastExpr {{.*}} <line:15:3, col:30> 'void' <ToVoid>
// CHECK-NEXT: | | `-TypeTraitExpr {{.*}} <col:10, col:30> 'bool' __is_same
// CHECK-NEXT: | | |-BuiltinType {{.*}} 'int'
diff --git a/clang/test/AST/ast-dump-types-json.cpp b/clang/test/AST/ast-dump-types-json.cpp
index cc4d4d9..aac6027 100644
--- a/clang/test/AST/ast-dump-types-json.cpp
+++ b/clang/test/AST/ast-dump-types-json.cpp
@@ -51,30 +51,20 @@ using ::TestUsingShadowDeclType;
// CHECK-NEXT: },
// CHECK-NEXT: "name": "TestElaboratedType1",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "desugaredQualType": "T",
// CHECK-NEXT: "qualType": "struct T"
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "RecordType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "struct T"
// CHECK-NEXT: },
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "RecordType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "T"
-// CHECK-NEXT: },
-// CHECK-NEXT: "decl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "CXXRecordDecl",
-// CHECK-NEXT: "name": "T"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: "decl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CXXRecordDecl",
+// CHECK-NEXT: "name": "T"
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -108,25 +98,16 @@ using ::TestUsingShadowDeclType;
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "ElaboratedType",
+// CHECK-NEXT: "kind": "RecordType",
// CHECK-NEXT: "type": {
// CHECK-NEXT: "qualType": "NS::S"
// CHECK-NEXT: },
// CHECK-NEXT: "qualifier": "NS::",
-// CHECK-NEXT: "inner": [
-// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "RecordType",
-// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "NS::S"
-// CHECK-NEXT: },
-// CHECK-NEXT: "decl": {
-// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "CXXRecordDecl",
-// CHECK-NEXT: "name": "S"
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK-NEXT: "decl": {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "CXXRecordDecl",
+// CHECK-NEXT: "name": "S"
+// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
@@ -538,7 +519,39 @@ using ::TestUsingShadowDeclType;
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
// CHECK-NEXT: {
-// CHECK-NEXT: "id": "0x0"
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "TypedefDecl",
+// CHECK-NEXT: "loc": {
+// CHECK-NEXT: "offset": 506,
+// CHECK-NEXT: "line": 23,
+// CHECK-NEXT: "col": 13,
+// CHECK-NEXT: "tokLen": 23
+// CHECK-NEXT: },
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {
+// CHECK-NEXT: "offset": 494,
+// CHECK-NEXT: "col": 1,
+// CHECK-NEXT: "tokLen": 7
+// CHECK-NEXT: },
+// CHECK-NEXT: "end": {
+// CHECK-NEXT: "offset": 506,
+// CHECK-NEXT: "col": 13,
+// CHECK-NEXT: "tokLen": 23
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: "name": "TestUsingShadowDeclType",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "int"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "BuiltinType",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "int"
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: }
diff --git a/clang/test/AST/ast-dump-using-template.cpp b/clang/test/AST/ast-dump-using-template.cpp
index 2c95849..a5a0e4d 100644
--- a/clang/test/AST/ast-dump-using-template.cpp
+++ b/clang/test/AST/ast-dump-using-template.cpp
@@ -19,40 +19,36 @@ using ns::S2;
template<typename T>
using A = S<T>;
// CHECK: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S<T>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'S<T>' dependent
-// CHECK-NEXT: |-name: 'S':'ns::S' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S'
+// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'S<T>' dependent
+// CHECK-NEXT: |-name: 'S':'ns::S' qualified
+// CHECk-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S'
// TemplateName in TemplateArgument.
template <template <typename> class T> class X {};
using B = X<S>;
// CHECK: TypeAliasDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'X<S>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'X<S>' sugar
-// CHECK-NEXT: |-name: 'X' qualified
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} X
-// CHECK-NEXT: |-TemplateArgument template 'S':'ns::S' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.*}} implicit ClassTemplate {{.*}} 'S'
-// CHECK-NEXT: | `-target: ClassTemplateDecl {{.*}} S
-// CHECK-NEXT: `-RecordType {{.*}} 'X<ns::S>'
-// CHECK-NEXT: `-ClassTemplateSpecialization {{.*}} 'X'
+// CHECK-NEXT: `-TemplateSpecializationType {{.*}} 'X<S>' sugar
+// CHECK-NEXT: |-name: 'X' qualified
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} X
+// CHECK-NEXT: |-TemplateArgument template 'S':'ns::S' qualified
+// CHECK-NEXT: | |-UsingShadowDecl {{.*}} implicit ClassTemplate {{.*}} 'S'
+// CHECK-NEXT: | `-target: ClassTemplateDecl {{.*}} S
+// CHECK-NEXT: `-RecordType {{.*}} 'X<ns::S>'
+// CHECK-NEXT: `-ClassTemplateSpecialization {{.*}} 'X'
// TemplateName in DeducedTemplateSpecializationType.
S DeducedTemplateSpecializationT(123);
using C = decltype(DeducedTemplateSpecializationT);
// CHECK: DecltypeType {{.*}}
// CHECK-NEXT: |-DeclRefExpr {{.*}}
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S<int>' sugar
-// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'ns::S<int>' sugar
-// CHECK-NEXT: |-name: 'S':'ns::S' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.+}} 'S'
+// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'S<int>' sugar
+// CHECK-NEXT: |-name: 'S':'ns::S' qualified
+// CHECK-NEXT: | |-UsingShadowDecl {{.+}} 'S'
S2 DeducedTemplateSpecializationT2(123);
using D = decltype(DeducedTemplateSpecializationT2);
// CHECK: DecltypeType {{.*}}
// CHECK-NEXT: |-DeclRefExpr {{.*}}
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S2<int>' sugar
-// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'S2<int>' sugar
-// CHECK-NEXT: |-name: 'S2':'ns::S2' qualified
-// CHECK-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S2'
+// CHECK-NEXT: `-DeducedTemplateSpecializationType {{.*}} 'S2<int>' sugar
+// CHECK-NEXT: |-name: 'S2':'ns::S2' qualified
+// CHECk-NEXT: | |-UsingShadowDecl {{.+}} ClassTemplate {{.+}} 'S2'
diff --git a/clang/test/AST/ast-dump-using.cpp b/clang/test/AST/ast-dump-using.cpp
index 8e5c60d..0c8405b 100644
--- a/clang/test/AST/ast-dump-using.cpp
+++ b/clang/test/AST/ast-dump-using.cpp
@@ -9,19 +9,17 @@ using a::S;
// CHECK: UsingDecl {{.*}} a::S
// CHECK-NEXT: | `-NestedNameSpecifier Namespace {{.*}} 'a'
// CHECK-NEXT: UsingShadowDecl {{.*}} implicit CXXRecord {{.*}} 'S'
-// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+// CHECK-NEXT: `-CXXRecordDecl {{.*}} referenced struct S
typedef S f; // to dump the introduced type
// CHECK: TypedefDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S' sugar
-// CHECK-NEXT: `-UsingType [[TYPE_ADDR:.*]] 'a::S' sugar
-// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR:.*]] 'S'
-// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+// CHECK-NEXT: `-UsingType [[TYPE_ADDR:.*]] 'S' sugar 'a::S'
+// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR:.*]] 'S'
+// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
typedef S e; // check the same UsingType is reused.
// CHECK: TypedefDecl
-// CHECK-NEXT: `-ElaboratedType {{.*}} 'S' sugar
-// CHECK-NEXT: `-UsingType [[TYPE_ADDR]] 'a::S' sugar
-// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR]] 'S'
-// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+// CHECK-NEXT: `-UsingType [[TYPE_ADDR]] 'S' sugar 'a::S'
+// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR]] 'S'
+// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
using a::x;
void foo() {
diff --git a/clang/test/AST/attr-swift_attr.m b/clang/test/AST/attr-swift_attr.m
index 6888745..766da93 100644
--- a/clang/test/AST/attr-swift_attr.m
+++ b/clang/test/AST/attr-swift_attr.m
@@ -43,7 +43,7 @@ typedef struct {
void *ptr;
} SendableStruct;
-// CHECK-LABEL: TypedefDecl {{.*}} SendableStruct 'struct SendableStruct':'SendableStruct'
+// CHECK-LABEL: TypedefDecl {{.*}} SendableStruct 'struct SendableStruct'
// CHECK: SwiftAttrAttr {{.*}} "@Sendable"
@interface TestAttrPlacementInBlock1
diff --git a/clang/test/AST/coroutine-locals-cleanup.cpp b/clang/test/AST/coroutine-locals-cleanup.cpp
index a7f524b0..b0577f9 100644
--- a/clang/test/AST/coroutine-locals-cleanup.cpp
+++ b/clang/test/AST/coroutine-locals-cleanup.cpp
@@ -86,7 +86,7 @@ Task bar() {
// CHECK: ExprWithCleanups {{.*}} 'void'
// CHECK-NEXT: CoawaitExpr
// CHECK-NEXT: CXXBindTemporaryExpr {{.*}} 'Task' (CXXTemporary {{.*}})
-// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter':'Task::Awaiter'
+// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter'
// CHECK: ExprWithCleanups {{.*}} 'bool'
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'bool'
// CHECK-NEXT: MemberExpr {{.*}} .await_ready
@@ -96,7 +96,7 @@ Task bar() {
// CHECK: ExprWithCleanups {{.*}} 'void'
// CHECK-NEXT: CoawaitExpr
// CHECK-NEXT: CXXBindTemporaryExpr {{.*}} 'Task' (CXXTemporary {{.*}})
-// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter':'Task::Awaiter'
+// CHECK: MaterializeTemporaryExpr {{.*}} 'Awaiter'
// CHECK: ExprWithCleanups {{.*}} 'bool'
// CHECK-NEXT: CXXMemberCallExpr {{.*}} 'bool'
// CHECK-NEXT: MemberExpr {{.*}} .await_ready
diff --git a/clang/test/AST/cxx2c-variadic-friends.cpp b/clang/test/AST/cxx2c-variadic-friends.cpp
index fc84e73..6147d7f 100644
--- a/clang/test/AST/cxx2c-variadic-friends.cpp
+++ b/clang/test/AST/cxx2c-variadic-friends.cpp
@@ -70,11 +70,11 @@ template <typename ...Pack> struct Variadic {
// CHECK-LABEL: ClassTemplateDecl {{.*}} S2
// PRINT-LABEL: template <class ...Ts> struct S2 {
template<class ...Ts> struct S2 {
- // CHECK: FriendDecl {{.*}} 'class C<Ts>':'C<Ts>'...
+ // CHECK: FriendDecl {{.*}} 'class C<Ts>'...
// PRINT-NEXT: friend class C<Ts>...;
friend class C<Ts>...;
- // CHECK-NEXT: FriendDecl {{.*}} 'class N::C<Ts>':'C<Ts>'...
+ // CHECK-NEXT: FriendDecl {{.*}} 'class N::C<Ts>'...
// PRINT-NEXT: friend class N::C<Ts>...
friend class N::C<Ts>...;
};
diff --git a/clang/test/AST/deduction-guides.cpp b/clang/test/AST/deduction-guides.cpp
index d96c7e6..5ecb276 100644
--- a/clang/test/AST/deduction-guides.cpp
+++ b/clang/test/AST/deduction-guides.cpp
@@ -30,11 +30,11 @@ struct HasDeductionGuideTypeAlias {
HasDeductionGuideTypeAlias()->HasDeductionGuideTypeAlias<int>;
// The parameter to this one shouldn't be an elaborated type.
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (typename STy::Child) -> HasDeductionGuide<T>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (HasDeductionGuide<T>) -> HasDeductionGuide<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (typename STy::Child) -> PR46111::HasDeductionGuide<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuide> 'auto (PR46111::HasDeductionGuide<T>) -> PR46111::HasDeductionGuide<T>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for HasDeductionGuide> 'auto () -> HasDeductionGuide<int>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (typename STy::Child) -> HasDeductionGuideTypeAlias<T>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (HasDeductionGuideTypeAlias<T>) -> HasDeductionGuideTypeAlias<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (typename STy::Child) -> PR46111::HasDeductionGuideTypeAlias<T>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for HasDeductionGuideTypeAlias> 'auto (PR46111::HasDeductionGuideTypeAlias<T>) -> PR46111::HasDeductionGuideTypeAlias<T>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for HasDeductionGuideTypeAlias> 'auto () -> HasDeductionGuideTypeAlias<int>'
} // namespace PR46111
@@ -64,15 +64,15 @@ namespace PR48177 {
// CHECK: CXXRecordDecl {{.*}} struct Derived
// CHECK: TypeAliasDecl {{.*}} type_alias 'typename Derived<int, 1, int>::type_alias':'int'
-// CHECK-NEXT: ElaboratedType {{.*}} 'typename Derived<int, 1, int>::type_alias' sugar
-// CHECK-NEXT: TypedefType {{.*}} 'PR48177::Base<int>::type_alias' sugar
+// CHECK-NEXT: TypedefType {{.*}} 'typename Derived<int, 1, int>::type_alias' sugar
+// CHECK-NEXT: NestedNameSpecifier TypeSpec 'Derived<int, 1, int>'
// CHECK-NEXT: TypeAlias {{.*}} 'type_alias'
// CHECK-NEXT: SubstTemplateTypeParmType {{.*}} 'int' sugar class depth 0 index 0 A
// CHECK-NEXT: ClassTemplateSpecialization {{.*}} 'Base'
// CHECK-NEXT: BuiltinType {{.*}} 'int'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (Derived<T, S, A> &&, const typename Derived<T, S, A>::type_alias &) -> Derived<T, S, A>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (T) -> Derived<T, S, A>'
-// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (Derived<T, S, A>) -> Derived<T, S, A>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (Derived<T, S, A> &&, const typename Derived<T, S, A>::type_alias &) -> PR48177::Derived<T, S, A>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (T) -> PR48177::Derived<T, S, A>'
+// CHECK: CXXDeductionGuideDecl {{.*}} implicit <deduction guide for Derived> 'auto (PR48177::Derived<T, S, A>) -> PR48177::Derived<T, S, A>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for Derived> 'auto (T, A) -> Derived<T, 1, A>'
// CHECK: CXXDeductionGuideDecl {{.*}} <deduction guide for Derived> 'auto (int, int) -> Derived<int, 1, int>'
diff --git a/clang/test/AST/float16.cpp b/clang/test/AST/float16.cpp
index a9e1144..643d37c 100644
--- a/clang/test/AST/float16.cpp
+++ b/clang/test/AST/float16.cpp
@@ -126,7 +126,7 @@ public:
};
//CHECK: |-CXXRecordDecl {{.*}} referenced class C1 definition
-//CHECK: | |-CXXRecordDecl {{.*}} implicit referenced class C1
+//CHECK: | |-CXXRecordDecl {{.*}} implicit class C1
//CHECK-NEXT: | |-FieldDecl {{.*}} referenced f1c '_Float16'
//CHECK-NEXT: | |-VarDecl {{.*}} used f2c 'const _Float16' static
//CHECK-NEXT: | |-FieldDecl {{.*}} f3c 'volatile _Float16'
diff --git a/clang/test/AST/sourceranges.cpp b/clang/test/AST/sourceranges.cpp
index f78d34c..598a28b 100644
--- a/clang/test/AST/sourceranges.cpp
+++ b/clang/test/AST/sourceranges.cpp
@@ -47,7 +47,7 @@ struct D {
void construct() {
using namespace foo;
A a = A(12);
- // CHECK: CXXConstructExpr {{0x[0-9a-fA-F]+}} <col:9, col:13> 'A':'foo::A' 'void (int){{( __attribute__\(\(thiscall\)\))?}}'
+ // CHECK: CXXConstructExpr {{0x[0-9a-fA-F]+}} <col:9, col:13> 'A' 'void (int){{( __attribute__\(\(thiscall\)\))?}}'
D d = D(12);
// CHECK: CXXConstructExpr {{0x[0-9a-fA-F]+}} <col:9, col:13> 'D' 'void (int){{( __attribute__\(\(thiscall\)\))?}}'
}
@@ -174,7 +174,7 @@ namespace in_class_init {
// CHECK-1Z: CXXRecordDecl {{.*}} struct B definition
struct B {
- // CHECK-1Z: FieldDecl {{.*}} a 'A':'in_class_init::A'
+ // CHECK-1Z: FieldDecl {{.*}} a 'A'
// CHECK-1Z-NEXT: InitListExpr {{.*}} <col:11, col:12
A a = {};
};
@@ -192,7 +192,7 @@ namespace delegating_constructor_init {
// CHECK-1Z: CXXRecordDecl {{.*}} struct C definition
struct C : B {
// CHECK-1Z: CXXConstructorDecl {{.*}} C
- // CHECK-1Z-NEXT: CXXCtorInitializer 'B':'delegating_constructor_init::B'
+ // CHECK-1Z-NEXT: CXXCtorInitializer 'B'
// CHECK-1Z-NEXT: CXXConstructExpr {{.*}} <col:11, col:15
// CHECK-1Z-NEXT: InitListExpr {{.*}} <col:13, col:14
C() : B({}) {};
diff --git a/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp b/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp
index 27604e2..8e8e03c 100644
--- a/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp
+++ b/clang/test/ASTSYCL/ast-dump-sycl-kernel-call-stmt.cpp
@@ -64,10 +64,10 @@ void skep2<KN<2>>(K<2>);
// CHECK-NEXT: | `-FunctionDecl {{.*}} skep2 'void (K<2>)' explicit_instantiation_definition
// CHECK-NEXT: | |-TemplateArgument type 'KN<2>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<2>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<2>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'KN'
// CHECK-NEXT: | |-TemplateArgument type 'K<2>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'K<2>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'K<2>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'K'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} k 'K<2>'
// CHECK-NEXT: | |-SYCLKernelCallStmt {{.*}}
@@ -110,10 +110,10 @@ void skep3<KN<3>>(K<3> k) {
// CHECK-NEXT: | `-Function {{.*}} 'skep3' 'void (K<3>)'
// CHECK-NEXT: |-FunctionDecl {{.*}} skep3 'void (K<3>)' explicit_specialization
// CHECK-NEXT: | |-TemplateArgument type 'KN<3>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<3>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'KN<3>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'KN'
// CHECK-NEXT: | |-TemplateArgument type 'K<3>'
-// CHECK-NEXT: | | `-RecordType {{.*}} 'K<3>'
+// CHECK-NEXT: | | `-RecordType {{.*}} 'K<3>' canonical
// CHECK-NEXT: | | `-ClassTemplateSpecialization {{.*}} 'K'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} k 'K<3>'
// CHECK-NEXT: | |-SYCLKernelCallStmt {{.*}}
diff --git a/clang/test/Analysis/anonymous-decls.cpp b/clang/test/Analysis/anonymous-decls.cpp
index 85449ca..3f972a3 100644
--- a/clang/test/Analysis/anonymous-decls.cpp
+++ b/clang/test/Analysis/anonymous-decls.cpp
@@ -78,12 +78,12 @@ int main() {
// CHECK-NEXT: 8: decomposition-a-b
// CHECK-NEXT: 9: [B3.7]([B3.8])
// CHECK-NEXT: 10: [B3.9]
-// CHECK-NEXT: 11: std::tuple_element<0, std::pair<int, int>>::type a = get<0UL>(decomposition-a-b);
+// CHECK-NEXT: 11: std::tuple_element<0UL, std::pair<int, int>>::type a = get<0UL>(decomposition-a-b);
// CHECK-NEXT: 12: get<1UL>
// CHECK-NEXT: 13: [B3.12] (ImplicitCastExpr, FunctionToPointerDecay, tuple_element<1L, pair<int, int> >::type (*)(pair<int, int> &))
// CHECK-NEXT: 14: decomposition-a-b
// CHECK-NEXT: 15: [B3.13]([B3.14])
// CHECK-NEXT: 16: [B3.15]
-// CHECK-NEXT: 17: std::tuple_element<1, std::pair<int, int>>::type b = get<1UL>(decomposition-a-b);
+// CHECK-NEXT: 17: std::tuple_element<1UL, std::pair<int, int>>::type b = get<1UL>(decomposition-a-b);
// CHECK-NEXT: Preds (1): B1
// CHECK-NEXT: Succs (1): B2
diff --git a/clang/test/Analysis/malloc-checker-arg-uaf.c b/clang/test/Analysis/malloc-checker-arg-uaf.c
new file mode 100644
index 0000000..d6aa856
--- /dev/null
+++ b/clang/test/Analysis/malloc-checker-arg-uaf.c
@@ -0,0 +1,44 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix.Malloc -verify %s
+
+#include "Inputs/system-header-simulator-for-malloc.h"
+
+struct Obj {
+ int field;
+};
+
+void use(void *ptr);
+
+void test_direct_param_uaf() {
+ int *p = (int *)malloc(sizeof(int));
+ free(p);
+ use(p); // expected-warning{{Use of memory after it is released}}
+}
+
+void test_struct_field_uaf() {
+ struct Obj *o = (struct Obj *)malloc(sizeof(struct Obj));
+ free(o);
+ use(&o->field); // expected-warning{{Use of memory after it is released}}
+}
+
+void test_no_warning_const_int() {
+ use((void *)0x1234); // no-warning
+}
+
+void test_no_warning_stack() {
+ int x = 42;
+ use(&x); // no-warning
+}
+
+void test_nested_alloc() {
+ struct Obj *o = (struct Obj *)malloc(sizeof(struct Obj));
+ use(o); // no-warning
+ free(o);
+ use(o); // expected-warning{{Use of memory after it is released}}
+}
+
+void test_nested_field() {
+ struct Obj *o = (struct Obj *)malloc(sizeof(struct Obj));
+ int *f = &o->field;
+ free(o);
+ use(f); // expected-warning{{Use of memory after it is released}}
+}
diff --git a/clang/test/C/C11/n1285_1.c b/clang/test/C/C11/n1285_1.c
index 5010004..25b68e3 100644
--- a/clang/test/C/C11/n1285_1.c
+++ b/clang/test/C/C11/n1285_1.c
@@ -26,16 +26,16 @@ struct X f(void);
// C11-O2-NEXT: [[ENTRY:.*:]]
// C11-O2-NEXT: [[P:%.*]] = alloca ptr, align 8
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X:%.*]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) #[[ATTR5:[0-9]+]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]]) #[[ATTR5:[0-9]+]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @f(ptr dead_on_unwind writable sret([[STRUCT_X]]) align 4 [[REF_TMP]])
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr [[P]], align 8, !tbaa [[TBAA2:![0-9]+]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P]], align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7:![0-9]+]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int func_return(void) {
@@ -79,7 +79,7 @@ int func_return(void) {
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[Q:%.*]] = alloca ptr, align 8
// C11-O2-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: br i1 true, label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]]
// C11-O2: [[COND_TRUE]]:
// C11-O2-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[REF_TMP]], i8 0, i64 20, i1 false)
@@ -92,8 +92,8 @@ int func_return(void) {
// C11-O2-NEXT: [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A1]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Q]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[Q]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[DOTCOMPOUNDLITERAL]], i8 0, i64 20, i1 false)
// C11-O2-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
// C11-O2-NEXT: [[A3:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
@@ -104,7 +104,7 @@ int func_return(void) {
// C11-O2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[Q]], align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !tbaa [[TBAA7]]
// C11-O2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP3]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Q]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[Q]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[ADD]]
//
int ternary(void) {
@@ -133,16 +133,16 @@ int ternary(void) {
// C11-O2-NEXT: [[ENTRY:.*:]]
// C11-O2-NEXT: [[X:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[X]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[REF_TMP]], ptr align 4 [[X]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9:![0-9]+]]
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr @p, align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int comma(void) {
@@ -170,16 +170,16 @@ int comma(void) {
// C11-O2-NEXT: [[ENTRY:.*:]]
// C11-O2-NEXT: [[X:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[X]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[REF_TMP]], ptr align 4 [[X]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9]]
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr @p, align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int cast(void) {
@@ -210,19 +210,19 @@ int cast(void) {
// C11-O2-NEXT: [[X:%.*]] = alloca [[STRUCT_X:%.*]], align 4
// C11-O2-NEXT: [[S:%.*]] = alloca [[STRUCT_X]], align 4
// C11-O2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_X]], align 4
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[X]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[S]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[S]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[X]], ptr align 4 [[S]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9]]
// C11-O2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[REF_TMP]], ptr align 4 [[X]], i64 20, i1 false), !tbaa.struct [[TBAA_STRUCT9]]
// C11-O2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_X]], ptr [[REF_TMP]], i32 0, i32 0
// C11-O2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], ptr [[A]], i64 0, i64 0
// C11-O2-NEXT: store ptr [[ARRAYDECAY]], ptr @p, align 8, !tbaa [[TBAA2]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[REF_TMP]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR5]]
// C11-O2-NEXT: [[TMP0:%.*]] = load ptr, ptr @p, align 8, !tbaa [[TBAA2]]
// C11-O2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA7]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[S]]) #[[ATTR5]]
-// C11-O2-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[X]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[S]]) #[[ATTR5]]
+// C11-O2-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) #[[ATTR5]]
// C11-O2-NEXT: ret i32 [[TMP1]]
//
int assign(void) {
diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGen/builtin_call.cpp
index c266f1a..09be793 100644
--- a/clang/test/CIR/CodeGen/builtin_call.cpp
+++ b/clang/test/CIR/CodeGen/builtin_call.cpp
@@ -111,6 +111,38 @@ void assume(bool arg) {
// OGCG: call void @llvm.assume(i1 %{{.+}})
// OGCG: }
+void *assume_aligned(void *ptr) {
+ return __builtin_assume_aligned(ptr, 16);
+}
+
+// CIR: @_Z14assume_alignedPv
+// CIR: %{{.+}} = cir.assume_aligned %{{.+}} alignment 16 : !cir.ptr<!void>
+// CIR: }
+
+// LLVM: @_Z14assume_alignedPv
+// LLVM: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16) ]
+// LLVM: }
+
+// OGCG: @_Z14assume_alignedPv
+// OGCG: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16) ]
+// OGCG: }
+
+void *assume_aligned_misalignment(void *ptr, unsigned misalignment) {
+ return __builtin_assume_aligned(ptr, 16, misalignment);
+}
+
+// CIR: @_Z27assume_aligned_misalignmentPvj
+// CIR: %{{.+}} = cir.assume_aligned %{{.+}} alignment 16[offset %{{.+}} : !u64i] : !cir.ptr<!void>
+// CIR: }
+
+// LLVM: @_Z27assume_aligned_misalignmentPvj
+// LLVM: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16, i64 %{{.+}}) ]
+// LLVM: }
+
+// OGCG: @_Z27assume_aligned_misalignmentPvj
+// OGCG: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i64 16, i64 %{{.+}}) ]
+// OGCG: }
+
void assume_separate_storage(void *p1, void *p2) {
__builtin_assume_separate_storage(p1, p2);
}
diff --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
index 35a8aa6..8245025 100644
--- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
+++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
@@ -286,3 +286,211 @@ void foo4() {
// CXX_OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
// CXX_OGCG: store i32 %[[B_REAL]], ptr %[[C_REAL_PTR]], align 4
// CXX_OGCG: store i32 %[[B_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo5() {
+ float _Complex a;
+ float b;
+ a += b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
+// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
+// CIR: %[[COMPLEX_B:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_ZERO]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_A]], %[[COMPLEX_B]] : !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_COMPLEX_B:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_B]], 0
+// LLVM: %[[COMPLEX_B:.*]] = insertvalue { float, float } %[[TMP_COMPLEX_B]], float 0.000000e+00, 1
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 1
+// LLVM: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
+// LLVM: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[ADD_IMAG]], 1
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[ADD_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[A_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo6() {
+ int _Complex a;
+ int _Complex b;
+ b *= a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR: %[[MUL_BR_AR:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_REAL]]) : !s32i
+// CIR: %[[MUL_BI_AI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_IMAG]]) : !s32i
+// CIR: %[[MUL_BR_AI:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_IMAG]]) : !s32i
+// CIR: %[[MUL_BI_AR:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_REAL]]) : !s32i
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(sub, %[[MUL_BR_AR]], %[[MUL_BI_AI]]) : !s32i
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(add, %[[MUL_BR_AI]], %[[MUL_BI_AR]]) : !s32i
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
+// LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
+// LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM: %[[MUL_BR_AR:.*]] = mul i32 %[[B_REAL]], %[[A_REAL]]
+// LLVM: %[[MUL_BI_AI:.*]] = mul i32 %[[B_IMAG]], %[[A_IMAG]]
+// LLVM: %[[MUL_BR_AI:.*]] = mul i32 %[[B_REAL]], %[[A_IMAG]]
+// LLVM: %[[MUL_BI_AR:.*]] = mul i32 %[[B_IMAG]], %[[A_REAL]]
+// LLVM: %[[RESULT_REAL:.*]] = sub i32 %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// LLVM: %[[RESULT_IMAG:.*]] = add i32 %[[MUL_BR_AI]], %[[MUL_BI_AR]]
+// LLVM: %[[MUL_A_B:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[MUL_A_B]], i32 %[[RESULT_IMAG]], 1
+// LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4
+// OGCG: %[[MUL_BR_AR:.*]] = mul i32 %[[B_REAL]], %[[A_REAL]]
+// OGCG: %[[MUL_BI_AI:.*]] = mul i32 %[[B_IMAG]], %[[A_IMAG]]
+// OGCG: %[[RESULT_REAL:.*]] = sub i32 %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// OGCG: %[[MUL_BI_AR:.*]] = mul i32 %[[B_IMAG]], %[[A_REAL]]
+// OGCG: %[[MUL_BR_AI:.*]] = mul i32 %[[B_REAL]], %[[A_IMAG]]
+// OGCG: %[[RESULT_IMAG:.*]] = add i32 %[[MUL_BI_AR]], %[[MUL_BR_AI]]
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store i32 %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 4
+// OGCG: store i32 %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 4
+
+void foo7() {
+ float _Complex a;
+ float _Complex b;
+ b *= a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[MUL_BR_AR:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_REAL]]) : !cir.float
+// CIR: %[[MUL_BI_AI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_IMAG]]) : !cir.float
+// CIR: %[[MUL_BR_AI:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_IMAG]]) : !cir.float
+// CIR: %[[MUL_BI_AR:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_REAL]]) : !cir.float
+// CIR: %[[C_REAL:.*]] = cir.binop(sub, %[[MUL_BR_AR]], %[[MUL_BI_AI]]) : !cir.float
+// CIR: %[[C_IMAG:.*]] = cir.binop(add, %[[MUL_BR_AI]], %[[MUL_BI_AR]]) : !cir.float
+// CIR: %[[COMPLEX:.*]] = cir.complex.create %[[C_REAL]], %[[C_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[IS_C_REAL_NAN:.*]] = cir.cmp(ne, %[[C_REAL]], %[[C_REAL]]) : !cir.float, !cir.bool
+// CIR: %[[IS_C_IMAG_NAN:.*]] = cir.cmp(ne, %[[C_IMAG]], %[[C_IMAG]]) : !cir.float, !cir.bool
+// CIR: %[[CONST_FALSE:.*]] = cir.const #false
+// CIR: %[[SELECT_CONDITION:.*]] = cir.select if %[[IS_C_REAL_NAN]] then %[[IS_C_IMAG_NAN]] else %[[CONST_FALSE]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
+// CIR: %[[RESULT:.*]] = cir.ternary(%[[SELECT_CONDITION]], true {
+// CIR: %[[LIBC_COMPLEX:.*]] = cir.call @__mulsc3(%[[B_REAL]], %[[B_IMAG]], %[[A_REAL]], %[[A_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
+// CIR: cir.yield %[[LIBC_COMPLEX]] : !cir.complex<!cir.float>
+// CIR: }, false {
+// CIR: cir.yield %[[COMPLEX]] : !cir.complex<!cir.float>
+// CIR: }) : (!cir.bool) -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[MUL_BR_AR:.*]] = fmul float %[[B_REAL]], %[[A_REAL]]
+// LLVM: %[[MUL_BI_AI:.*]] = fmul float %[[B_IMAG]], %[[A_IMAG]]
+// LLVM: %[[MUL_BR_AI:.*]] = fmul float %[[B_REAL]], %[[A_IMAG]]
+// LLVM: %[[MUL_BI_AR:.*]] = fmul float %[[B_IMAG]], %[[A_REAL]]
+// LLVM: %[[C_REAL:.*]] = fsub float %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// LLVM: %[[C_IMAG:.*]] = fadd float %[[MUL_BR_AI]], %[[MUL_BI_AR]]
+// LLVM: %[[MUL_A_B:.*]] = insertvalue { float, float } {{.*}}, float %[[C_REAL]], 0
+// LLVM: %[[COMPLEX:.*]] = insertvalue { float, float } %[[MUL_A_B]], float %[[C_IMAG]], 1
+// LLVM: %[[IS_C_REAL_NAN:.*]] = fcmp une float %[[C_REAL]], %[[C_REAL]]
+// LLVM: %[[IS_C_IMAG_NAN:.*]] = fcmp une float %[[C_IMAG]], %[[C_IMAG]]
+// LLVM: %[[SELECT_CONDITION:.*]] = and i1 %[[IS_C_REAL_NAN]], %[[IS_C_IMAG_NAN]]
+// LLVM: br i1 %[[SELECT_CONDITION]], label %[[THEN_LABEL:.*]], label %[[ELSE_LABEL:.*]]
+// LLVM: [[THEN_LABEL]]:
+// LLVM: %[[LIBC_COMPLEX:.*]] = call { float, float } @__mulsc3(float %[[B_REAL]], float %[[B_IMAG]], float %[[A_REAL]], float %[[A_IMAG]])
+// LLVM: br label %[[PHI_BRANCH:.*]]
+// LLVM: [[ELSE_LABEL]]:
+// LLVM: br label %[[PHI_BRANCH:]]
+// LLVM: [[PHI_BRANCH:]]:
+// LLVM: %[[RESULT:.*]] = phi { float, float } [ %[[COMPLEX]], %[[ELSE_LABEL]] ], [ %[[LIBC_COMPLEX]], %[[THEN_LABEL]] ]
+// LLVM: br label %[[END_LABEL:.*]]
+// LLVM: [[END_LABEL]]:
+// LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[COMPLEX_CALL_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG: %[[MUL_BR_AR:.*]] = fmul float %[[B_REAL]], %[[A_REAL]]
+// OGCG: %[[MUL_BI_AI:.*]] = fmul float %[[B_IMAG]], %[[A_IMAG]]
+// OGCG: %[[MUL_BR_AI:.*]] = fmul float %[[B_REAL]], %[[A_IMAG]]
+// OGCG: %[[MUL_BI_AR:.*]] = fmul float %[[B_IMAG]], %[[A_REAL]]
+// OGCG: %[[C_REAL:.*]] = fsub float %[[MUL_BR_AR]], %[[MUL_BI_AI]]
+// OGCG: %[[C_IMAG:.*]] = fadd float %[[MUL_BR_AI]], %[[MUL_BI_AR]]
+// OGCG: %[[IS_C_REAL_NAN:.*]] = fcmp uno float %[[C_REAL]], %[[C_REAL]]
+// OGCG: br i1 %[[IS_C_REAL_NAN]], label %[[COMPLEX_IS_IMAG_NAN:.*]], label %[[END_LABEL:.*]], !prof !2
+// OGCG: [[COMPLEX_IS_IMAG_NAN]]:
+// OGCG: %[[IS_C_IMAG_NAN:.*]] = fcmp uno float %[[C_IMAG]], %[[C_IMAG]]
+// OGCG: br i1 %[[IS_C_IMAG_NAN]], label %[[COMPLEX_LIB_CALL:.*]], label %[[END_LABEL]], !prof !2
+// OGCG: [[COMPLEX_LIB_CALL]]:
+// OGCG: %[[CALL_RESULT:.*]] = call{{.*}} <2 x float> @__mulsc3(float noundef %[[B_REAL]], float noundef %[[B_IMAG]], float noundef %[[A_REAL]], float noundef %[[A_IMAG]])
+// OGCG: store <2 x float> %[[CALL_RESULT]], ptr %[[COMPLEX_CALL_ADDR]], align 4
+// OGCG: %[[COMPLEX_CALL_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 0
+// OGCG: %[[COMPLEX_CALL_REAL:.*]] = load float, ptr %[[COMPLEX_CALL_REAL_PTR]], align 4
+// OGCG: %[[COMPLEX_CALL_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 1
+// OGCG: %[[COMPLEX_CALL_IMAG:.*]] = load float, ptr %[[COMPLEX_CALL_IMAG_PTR]], align 4
+// OGCG: br label %[[END_LABEL]]
+// OGCG: [[END_LABEL]]:
+// OGCG: %[[FINAL_REAL:.*]] = phi float [ %[[C_REAL]], %[[ENTRY:.*]] ], [ %[[C_REAL]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_REAL]], %[[COMPLEX_LIB_CALL]] ]
+// OGCG: %[[FINAL_IMAG:.*]] = phi float [ %[[C_IMAG]], %[[ENTRY]] ], [ %[[C_IMAG]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_IMAG]], %[[COMPLEX_LIB_CALL]] ]
+// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store float %[[FINAL_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG: store float %[[FINAL_IMAG]], ptr %[[C_IMAG_PTR]], align 4
diff --git a/clang/test/CIR/CodeGen/module-asm.c b/clang/test/CIR/CodeGen/module-asm.c
new file mode 100644
index 0000000..e6cec5e
--- /dev/null
+++ b/clang/test/CIR/CodeGen/module-asm.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+
+// CHECK: cir.module_asm = [".globl bar", ".globl foo"]
+__asm (".globl bar");
+__asm (".globl foo");
diff --git a/clang/test/CIR/IR/invalid-vtable.cir b/clang/test/CIR/IR/invalid-vtable.cir
new file mode 100644
index 0000000..b3afb581
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-vtable.cir
@@ -0,0 +1,9 @@
+// RUN: cir-opt %s -verify-diagnostics
+
+!s8i = !cir.int<s, 8>
+!u32i = !cir.int<u, 32>
+cir.func @reference_unknown_vtable() {
+ // expected-error @below {{'cir.vtable.address_point' op 'some_vtable' does not reference a valid cir.global}}
+ %0 = cir.vtable.address_point(@some_vtable, address_point = <index = 0, offset = 2>) : !cir.vptr
+ cir.return
+}
diff --git a/clang/test/CIR/IR/vtable-addrpt.cir b/clang/test/CIR/IR/vtable-addrpt.cir
new file mode 100644
index 0000000..0b809cc
--- /dev/null
+++ b/clang/test/CIR/IR/vtable-addrpt.cir
@@ -0,0 +1,23 @@
+// RUN: cir-opt %s | FileCheck %s
+
+// Test the parsing and printing of a constructor that uses a vtable addess_point op.
+
+!u32i = !cir.int<u, 32>
+!u8i = !cir.int<u, 8>
+!rec_anon_struct = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>}>
+!rec_S = !cir.record<struct "S" {!cir.vptr}>
+
+module {
+ cir.global "private" external @_ZTV1S : !rec_anon_struct {alignment = 8 : i64}
+ cir.func @_ZN1SC2Ev(%arg0: !cir.ptr<!rec_S>) {
+ %0 = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["this", init] {alignment = 8 : i64}
+ cir.store %arg0, %0 : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
+ %1 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
+ %2 = cir.vtable.address_point(@_ZTV1S, address_point = <index = 0, offset = 2>) : !cir.vptr
+ %3 = cir.cast(bitcast, %1 : !cir.ptr<!rec_S>), !cir.ptr<!cir.vptr>
+ cir.store align(8) %2, %3 : !cir.vptr, !cir.ptr<!cir.vptr>
+ cir.return
+ }
+}
+
+// CHECK: cir.vtable.address_point(@_ZTV1S, address_point = <index = 0, offset = 2>) : !cir.vptr
diff --git a/clang/test/CIR/Lowering/module-asm.cir b/clang/test/CIR/Lowering/module-asm.cir
new file mode 100644
index 0000000..b802cda
--- /dev/null
+++ b/clang/test/CIR/Lowering/module-asm.cir
@@ -0,0 +1,11 @@
+// RUN: cir-opt %s -cir-to-llvm -o %t.cir
+// RUN: FileCheck %s --input-file=%t.cir
+
+// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering -o %t.ll %s
+// RUN: FileCheck -check-prefix=LLVM --input-file=%t.ll %s
+
+// CHECK: llvm.module_asm = [".globl bar", ".globl foo"]
+// LLVM: module asm ".globl bar"
+// LLVM: module asm ".globl foo"
+module attributes {cir.module_asm = [".globl bar", ".globl foo"]} {
+}
diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt
index 286c9d4..e9f4f83 100644
--- a/clang/test/CMakeLists.txt
+++ b/clang/test/CMakeLists.txt
@@ -26,7 +26,6 @@ llvm_canonicalize_cmake_booleans(
PPC_LINUX_DEFAULT_IEEELONGDOUBLE
LLVM_TOOL_LLVM_DRIVER_BUILD
LLVM_INCLUDE_SPIRV_TOOLS_TESTS
- LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS
)
# Run tests requiring Z3 headers only if LLVM was built with Z3
diff --git a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
index 5c281ac..76e2afb 100644
--- a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
+++ b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
@@ -197,7 +197,7 @@ namespace InhCtor {
// FIXME: Consider reusing the same diagnostic between dependent and non-dependent contexts
typedef int I;
struct UsingInt {
- using I::I; // expected-error {{'InhCtor::I' (aka 'int') is not a class, namespace, or enumeration}}
+ using I::I; // expected-error {{'I' (aka 'int') is not a class, namespace, or enumeration}}
};
template<typename T> struct UsingIntTemplate {
using T::T; // expected-error {{type 'int' cannot be used prior to '::' because it has no members}}
diff --git a/clang/test/CXX/class.access/p6.cpp b/clang/test/CXX/class.access/p6.cpp
index 15f2644..9a8aebe 100644
--- a/clang/test/CXX/class.access/p6.cpp
+++ b/clang/test/CXX/class.access/p6.cpp
@@ -176,7 +176,7 @@ namespace test8 {
};
void test(A &a) {
- if (a) return; // expected-error-re {{'operator void *(A::*)(){{( __attribute__\(\(thiscall\)\))?}} const' is a private member of 'test8::A'}}
+ if (a) return; // expected-error-re {{'operator void *(test8::A::*)(){{( __attribute__\(\(thiscall\)\))?}} const' is a private member of 'test8::A'}}
}
}
diff --git a/clang/test/CXX/class.derived/class.derived.general/p2.cpp b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
index 1423eea..c700e8a 100644
--- a/clang/test/CXX/class.derived/class.derived.general/p2.cpp
+++ b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
@@ -2,7 +2,7 @@
namespace CurrentInstantiation {
template<typename T>
- struct A0 { // expected-note 6{{definition of 'A0<T>' is not complete until the closing '}'}}
+ struct A0 { // expected-note 6{{definition of 'CurrentInstantiation::A0<T>' is not complete until the closing '}'}}
struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
template<typename U>
@@ -26,7 +26,7 @@ namespace CurrentInstantiation {
};
template<typename U>
- struct B5 { // expected-note 2{{definition of 'B5<U>' is not complete until the closing '}'}}
+ struct B5 { // expected-note 2{{definition of 'CurrentInstantiation::A0::B5<U>' is not complete until the closing '}'}}
struct C0 : A0, B5 { }; // expected-error 2{{base class has incomplete type}}
template<typename V>
@@ -63,7 +63,7 @@ namespace CurrentInstantiation {
struct A0<T>::B5<U>::C3 : A0, B5 { };
template<typename T>
- struct A0<T*> { // expected-note 2{{definition of 'A0<T *>' is not complete until the closing '}'}}
+ struct A0<T*> { // expected-note 2{{definition of 'CurrentInstantiation::A0<T *>' is not complete until the closing '}'}}
struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
template<typename U>
@@ -91,7 +91,7 @@ namespace MemberOfCurrentInstantiation {
template<typename U>
struct C : C<U> { }; // expected-error {{base class has incomplete type}}
- // expected-note@-1 {{definition of 'C<U>' is not complete until the closing '}'}}
+ // expected-note@-1 {{definition of 'MemberOfCurrentInstantiation::A0::C<U>' is not complete until the closing '}'}}
};
template<typename T>
diff --git a/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp b/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp
index 8cc9b41..c4aca32 100644
--- a/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp
+++ b/clang/test/CXX/class/class.mem/class.mem.general/p8.cpp
@@ -45,7 +45,7 @@ namespace N1 {
void g0() noexcept(y); // expected-error {{use of undeclared identifier 'y'}}
void f1() noexcept(A::x);
- void g1() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ void g1() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
template<typename U>
void f2() noexcept(x);
@@ -55,13 +55,13 @@ namespace N1 {
template<typename U>
void f3() noexcept(A::x);
template<typename U>
- void g3() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ void g3() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
friend void f4() noexcept(x);
friend void g4() noexcept(y); // expected-error {{use of undeclared identifier 'y'}}
friend void f5() noexcept(A::x);
- friend void g5() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ friend void g5() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
template<typename U>
friend void f6() noexcept(x);
@@ -71,7 +71,7 @@ namespace N1 {
template<typename U>
friend void f7() noexcept(A::x);
template<typename U>
- friend void g7() noexcept(A::y); // expected-error {{no member named 'y' in 'A<T>'}}
+ friend void g7() noexcept(A::y); // expected-error {{no member named 'y' in 'N1::A<T>'}}
static constexpr bool x = true;
};
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
index 8bdd4905..c7b331e 100644
--- a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.type.elab/p3.cpp
@@ -38,7 +38,7 @@ template <> struct B<A> {
void b1(struct B<float>);
void b2(class B<float>);
-void b3(union B<float>); // expected-error {{use of 'B<float>' with tag type that does not match previous declaration}}
+void b3(union B<float>); // expected-error {{use of 'union B<float>' with tag type that does not match previous declaration}}
//void b4(enum B<float>); // this just doesn't parse; you can't template an enum directly
void c1(struct B<float>::Member);
diff --git a/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp b/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp
index 77aef99..ad3b014 100644
--- a/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp
+++ b/clang/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p5-examples.cpp
@@ -22,7 +22,7 @@ void example1() {
// CHECK: VarDecl{{.*}}rca 'const A &'
// CHECK: ImplicitCastExpr{{.*}}'const A' lvalue <DerivedToBase (A)>
// CHECK-NOT: MaterializeTemporaryExpr
- // CHECK: ImplicitCastExpr{{.*}}'const B' lvalue <NoOp>
+ // CHECK: ImplicitCastExpr{{.*}}'const struct B' lvalue <NoOp>
const A& rca = b;
}
diff --git a/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp b/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp
index a06b107..bb1eb73 100644
--- a/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp
+++ b/clang/test/CXX/dcl.decl/dcl.meaning/dcl.mptr/p2.cpp
@@ -59,6 +59,6 @@ namespace TypoCorrection {
int A<T>::template typo<int>::* f();
template<typename T>
- int A<T>::typo<int>::* g(); // expected-error {{no template named 'typo' in 'A<T>'; did you mean 'Typo'?}}
+ int A<T>::typo<int>::* g(); // expected-error {{no template named 'typo' in 'TypoCorrection::A<T>'; did you mean 'Typo'?}}
// expected-error@-1 {{expected unqualified-id}}
}
diff --git a/clang/test/CXX/dcl/dcl.fct/p17.cpp b/clang/test/CXX/dcl/dcl.fct/p17.cpp
index 4a81875..431a8fb 100644
--- a/clang/test/CXX/dcl/dcl.fct/p17.cpp
+++ b/clang/test/CXX/dcl/dcl.fct/p17.cpp
@@ -109,7 +109,7 @@ namespace unconstrained {
template<typename T>
template<typename U>
constexpr auto S<T>::f2(auto x, U u, T t) -> decltype(x + u + t) { return x + u + t; }
- // expected-error@-1 {{out-of-line definition of 'f2' does not match any declaration in 'S<T>'}}
+ // expected-error@-1 {{out-of-line definition of 'f2' does not match any declaration in 'unconstrained::S<T>'}}
// expected-note@#defined-here {{S defined here}}
template<typename T>
diff --git a/clang/test/CXX/drs/cwg0xx.cpp b/clang/test/CXX/drs/cwg0xx.cpp
index 4d4e2f6..9aeee7a 100644
--- a/clang/test/CXX/drs/cwg0xx.cpp
+++ b/clang/test/CXX/drs/cwg0xx.cpp
@@ -859,7 +859,7 @@ namespace cwg54 { // cwg54: 2.8
// expected-error@-1 {{cannot cast 'struct B' to its private base class 'A'}}
// expected-note@#cwg54-B {{declared private here}}
int A::*smab = static_cast<int A::*>(&B::b);
- // expected-error@-1 {{cannot cast 'B' to its private base class 'A'}}
+ // expected-error@-1 {{cannot cast 'cwg54::B' to its private base class 'A'}}
// expected-note@#cwg54-B {{declared private here}}
B &sba = static_cast<B&>(a);
// expected-error@-1 {{cannot cast private base class 'cwg54::A' to 'cwg54::B'}}
@@ -874,7 +874,7 @@ namespace cwg54 { // cwg54: 2.8
V &svb = static_cast<V&>(b);
V *spvb = static_cast<V*>(&b);
int V::*smvb = static_cast<int V::*>(&B::b);
- // expected-error@-1 {{conversion from pointer to member of class 'B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
+ // expected-error@-1 {{conversion from pointer to member of class 'cwg54::B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
B &sbv = static_cast<B&>(v);
// expected-error@-1 {{cannot cast 'struct V' to 'B &' via virtual base 'cwg54::V'}}
B *spbv = static_cast<B*>(&v);
@@ -892,7 +892,7 @@ namespace cwg54 { // cwg54: 2.8
V &cvb = (V&)(b);
V *cpvb = (V*)(&b);
int V::*cmvb = (int V::*)(&B::b);
- // expected-error@-1 {{conversion from pointer to member of class 'B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
+ // expected-error@-1 {{conversion from pointer to member of class 'cwg54::B' to pointer to member of class 'V' via virtual base 'cwg54::V' is not allowed}}
B &cbv = (B&)(v);
// expected-error@-1 {{cannot cast 'struct V' to 'B &' via virtual base 'cwg54::V'}}
B *cpbv = (B*)(&v);
diff --git a/clang/test/CXX/drs/cwg12xx.cpp b/clang/test/CXX/drs/cwg12xx.cpp
index e02a7e1..03a9228 100644
--- a/clang/test/CXX/drs/cwg12xx.cpp
+++ b/clang/test/CXX/drs/cwg12xx.cpp
@@ -81,7 +81,7 @@ void g() {
A b(auto ()->C);
static_assert(sizeof(B ()->C[1] == sizeof(int)), "");
sizeof(auto () -> C[1]);
- // since-cxx11-error@-1 {{function cannot return array type 'C[1]' (aka 'cwg1223::BB[1]')}}
+ // since-cxx11-error@-1 {{function cannot return array type 'C[1]' (aka 'struct BB[1]')}}
}
#endif
} // namespace cwg1223
diff --git a/clang/test/CXX/drs/cwg13xx.cpp b/clang/test/CXX/drs/cwg13xx.cpp
index c4acafd..ad6ee01 100644
--- a/clang/test/CXX/drs/cwg13xx.cpp
+++ b/clang/test/CXX/drs/cwg13xx.cpp
@@ -252,7 +252,7 @@ namespace cwg1330 { // cwg1330: 4 c++11
void (B<P>::*bpf3)() = &B<P>::f;
void (B<P>::*bpf4)() throw() = &B<P>::f;
// cxx98-14-error@-1 {{target exception specification is not superset of source}}
- // since-cxx17-error@-2 {{cannot initialize a variable of type 'void (B<P>::*)() throw()' with an rvalue of type 'void (B<P>::*)() throw(T, typename P::type)': different exception specifications}}
+ // since-cxx17-error@-2 {{cannot initialize a variable of type 'void (B<P>::*)() throw()' with an rvalue of type 'void (B<P>::*)() throw(T, typename cwg1330::P::type)': different exception specifications}}
#if __cplusplus >= 201103L
static_assert(noexcept(B<P>().g()), "");
diff --git a/clang/test/CXX/drs/cwg2149.cpp b/clang/test/CXX/drs/cwg2149.cpp
index 416c895..6b54223 100644
--- a/clang/test/CXX/drs/cwg2149.cpp
+++ b/clang/test/CXX/drs/cwg2149.cpp
@@ -56,22 +56,22 @@ void f() {
// CXX98: VarDecl {{.+}} a 'X[2]'
// CXX98-NEXT: `-InitListExpr {{.+}} 'X[2]'
-// CXX98-NEXT: |-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: |-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 1
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 2
// CXX98-NEXT: | `-IntegerLiteral {{.+}} 'int' 3
-// CXX98-NEXT: `-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: `-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 4
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 5
// CXX98-NEXT: `-IntegerLiteral {{.+}} 'int' 6
// CXX98: VarDecl {{.+}} b 'X[2]'
// CXX98-NEXT: `-InitListExpr {{.+}} 'X[2]'
-// CXX98-NEXT: |-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: |-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 1
// CXX98-NEXT: | |-IntegerLiteral {{.+}} 'int' 2
// CXX98-NEXT: | `-IntegerLiteral {{.+}} 'int' 3
-// CXX98-NEXT: `-InitListExpr {{.+}} 'X':'cwg2149::X'
+// CXX98-NEXT: `-InitListExpr {{.+}} 'X'{{$}}
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 4
// CXX98-NEXT: |-IntegerLiteral {{.+}} 'int' 5
// CXX98-NEXT: `-IntegerLiteral {{.+}} 'int' 6
diff --git a/clang/test/CXX/drs/cwg26xx.cpp b/clang/test/CXX/drs/cwg26xx.cpp
index 426fc6c..bceef64 100644
--- a/clang/test/CXX/drs/cwg26xx.cpp
+++ b/clang/test/CXX/drs/cwg26xx.cpp
@@ -291,12 +291,12 @@ static_assert(__is_same(decltype(i), I<char, 4>));
J j = { "ghi" };
// since-cxx20-error@-1 {{no viable constructor or deduction guide}}
-// since-cxx20-note@#cwg2681-J {{candidate template ignored: could not match 'J<N>' against 'const char *'}}
-// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(J<N>) -> J<N>'}}
+// since-cxx20-note@#cwg2681-J {{candidate template ignored: could not match 'cwg2681::J<N>' against 'const char *'}}
+// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(cwg2681::J<N>) -> cwg2681::J<N>'}}
// since-cxx20-note@#cwg2681-J {{candidate template ignored: could not match 'const unsigned char' against 'const char'}}
-// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(const unsigned char (&)[N]) -> J<N>'}}
+// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J(const unsigned char (&)[N]) -> cwg2681::J<N>'}}
// since-cxx20-note@#cwg2681-J {{candidate function template not viable: requires 0 arguments, but 1 was provided}}
-// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J() -> J<N>'}}
+// since-cxx20-note@#cwg2681-J {{implicit deduction guide declared as 'template <size_t N> J() -> cwg2681::J<N>'}}
#endif
} // namespace cwg2681
diff --git a/clang/test/CXX/drs/cwg28xx.cpp b/clang/test/CXX/drs/cwg28xx.cpp
index b32e649..a6b2b99 100644
--- a/clang/test/CXX/drs/cwg28xx.cpp
+++ b/clang/test/CXX/drs/cwg28xx.cpp
@@ -227,12 +227,12 @@ void f() {
auto L2 = [&](this auto&& self) { (void) &x; };
O1<decltype(L1)>{L1, L1}();
/* since-cxx23-error-re@-1 {{inaccessible due to ambiguity:
- struct cwg2881::O1<class (lambda at {{.+}})> -> A<(lambda at {{.+}})> -> class (lambda at {{.+}})
- struct cwg2881::O1<class (lambda at {{.+}})> -> B<(lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
+ struct cwg2881::O1<class (lambda at {{.+}})> -> A<class (lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::O1<class (lambda at {{.+}})> -> B<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
O1<decltype(L2)>{L2, L2}();
/* since-cxx23-error-re@-1 {{inaccessible due to ambiguity:
- struct cwg2881::O1<class (lambda at {{.+}})> -> A<(lambda at {{.+}})> -> class (lambda at {{.+}})
- struct cwg2881::O1<class (lambda at {{.+}})> -> B<(lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
+ struct cwg2881::O1<class (lambda at {{.+}})> -> A<class (lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::O1<class (lambda at {{.+}})> -> B<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
O2{L1}();
// since-cxx23-error-re@-1 {{invalid explicit object parameter type 'cwg2881::O2<(lambda at {{.+}})>' in lambda with capture; the type must derive publicly from the lambda}}
// since-cxx23-note@#cwg2881-O2 {{declared protected here}}
@@ -277,7 +277,7 @@ struct Indirect : T {
template<typename T>
struct Ambiguous : Indirect<T>, T {
/* since-cxx23-warning-re@-1 {{direct base '(lambda at {{.+}})' is inaccessible due to ambiguity:
- struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<(lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<class (lambda at {{.+}})> -> class (lambda at {{.+}})
struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
// since-cxx23-note-re@#cwg2881-f4 {{in instantiation of template class 'cwg2881::Ambiguous<(lambda at {{.+}})>' requested here}}
// since-cxx34-note-re@#cwg2881-f4-call {{while substituting deduced template arguments into function template 'f4' [with L = (lambda at {{.+}})]}}
@@ -303,7 +303,7 @@ void g() {
// expected-error@-1 {{no matching function for call to 'f4'}}
// expected-note-re@-2 {{while substituting deduced template arguments into function template 'f4' [with L = (lambda at {{.+}})]}}
/* expected-note-re@#cwg2881-f4 {{candidate template ignored: substitution failure [with L = (lambda at {{.+}})]: lambda '(lambda at {{.+}})' is inaccessible due to ambiguity:
- struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<(lambda at {{.+}})> -> class (lambda at {{.+}})
+ struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> Indirect<class (lambda at {{.+}})> -> class (lambda at {{.+}})
struct cwg2881::Ambiguous<class (lambda at {{.+}})> -> class (lambda at {{.+}})}}*/
static_assert(!is_callable<Private<decltype(lambda)>>);
static_assert(!is_callable<Ambiguous<decltype(lambda)>>);
diff --git a/clang/test/CXX/drs/cwg2xx.cpp b/clang/test/CXX/drs/cwg2xx.cpp
index 556407a..37186e3 100644
--- a/clang/test/CXX/drs/cwg2xx.cpp
+++ b/clang/test/CXX/drs/cwg2xx.cpp
@@ -98,8 +98,8 @@ public:
void foo() { Templ<Derived> x(&Derived::func); }
// expected-error@-1 {{no matching constructor for initialization of 'Templ<Derived>'}}
-// expected-note@#cwg203-ex3-Templ {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (Base::*)() const') to 'const Templ<cwg203::ex3::Derived>' for 1st argument}}
-// since-cxx11-note@#cwg203-ex3-Templ {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (Base::*)() const') to 'Templ<cwg203::ex3::Derived>' for 1st argument}}
+// expected-note@#cwg203-ex3-Templ {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (cwg203::ex3::Base::*)() const') to 'const Templ<cwg203::ex3::Derived>' for 1st argument}}
+// since-cxx11-note@#cwg203-ex3-Templ {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int (Derived::*)() const' (aka 'int (cwg203::ex3::Base::*)() const') to 'Templ<cwg203::ex3::Derived>' for 1st argument}}
// expected-note@#cwg203-ex3-Templ-ctor {{candidate template ignored: could not match 'cwg203::ex3::Derived' against 'cwg203::ex3::Base'}}
} // namespace ex3
@@ -690,8 +690,8 @@ namespace cwg244 { // cwg244: 11
void f() {
D_object.~B();
- // expected-error@-1 {{destructor type 'cwg244::B' in object destruction expression does not match the type 'D' of the object being destroyed}}
- // expected-note@#cwg244-B {{type 'cwg244::B' found by destructor name lookup}}
+ // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'D' of the object being destroyed}}
+ // expected-note@#cwg244-B {{type 'B' found by destructor name lookup}}
D_object.B::~B();
D_object.D::~B(); // FIXME: Missing diagnostic for this.
B_ptr->~B();
@@ -1400,7 +1400,7 @@ namespace cwg298 { // cwg298: 3.1
// expected-error@-1 {{a type specifier is required for all declarations}}
B::A() {} // ok
C::~C() {}
- // expected-error@-1 {{destructor cannot be declared using a typedef 'C' (aka 'const cwg298::A') of the class name}}
+ // expected-error@-1 {{destructor cannot be declared using a typedef 'C' (aka 'const A') of the class name}}
typedef struct D E; // #cwg298-E
struct E {};
diff --git a/clang/test/CXX/drs/cwg3xx.cpp b/clang/test/CXX/drs/cwg3xx.cpp
index 8b035cf..5d09697 100644
--- a/clang/test/CXX/drs/cwg3xx.cpp
+++ b/clang/test/CXX/drs/cwg3xx.cpp
@@ -143,7 +143,7 @@ namespace cwg305 { // cwg305: no
void h(B *b) {
struct B {}; // #cwg305-h-B
b->~B();
- // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'B' (aka 'cwg305::A') of the object being destroyed}}
+ // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'B' (aka 'A') of the object being destroyed}}
// expected-note@#cwg305-h-B {{type 'B' found by destructor name lookup}}
}
@@ -1027,7 +1027,7 @@ namespace cwg357 { // cwg357: 2.7
void f() const; // #cwg357-f
};
template<typename T> void A<T>::f() {}
- // expected-error@-1 {{out-of-line definition of 'f' does not match any declaration in 'A<T>'}}
+ // expected-error@-1 {{out-of-line definition of 'f' does not match any declaration in 'cwg357::A<T>'}}
// expected-note@#cwg357-A {{defined here}}
// expected-note@#cwg357-f {{member declaration does not match because it is const qualified}}
@@ -1800,8 +1800,8 @@ namespace cwg399 { // cwg399: 11
void f() {
D_object.~B();
- // expected-error@-1 {{destructor type 'cwg399::B' in object destruction expression does not match the type 'D' of the object being destroyed}}
- // expected-note@#cwg399-B {{type 'cwg399::B' found by destructor name lookup}}
+ // expected-error@-1 {{destructor type 'B' in object destruction expression does not match the type 'D' of the object being destroyed}}
+ // expected-note@#cwg399-B {{type 'B' found by destructor name lookup}}
D_object.B::~B();
D_object.D::~B(); // FIXME: Missing diagnostic for this.
B_ptr->~B();
diff --git a/clang/test/CXX/drs/cwg4xx.cpp b/clang/test/CXX/drs/cwg4xx.cpp
index dc53ae7..8497f97 100644
--- a/clang/test/CXX/drs/cwg4xx.cpp
+++ b/clang/test/CXX/drs/cwg4xx.cpp
@@ -1185,7 +1185,7 @@ namespace cwg480 { // cwg480: 2.7
extern int D::*c;
int A::*d = static_cast<int A::*>(c);
- // expected-error@-1 {{conversion from pointer to member of class 'D' to pointer to member of class 'A' via virtual base 'cwg480::B' is not allowed}}
+ // expected-error@-1 {{conversion from pointer to member of class 'cwg480::D' to pointer to member of class 'A' via virtual base 'cwg480::B' is not allowed}}
D *e;
A *f = e;
diff --git a/clang/test/CXX/drs/cwg6xx.cpp b/clang/test/CXX/drs/cwg6xx.cpp
index e2eb009..659fa49 100644
--- a/clang/test/CXX/drs/cwg6xx.cpp
+++ b/clang/test/CXX/drs/cwg6xx.cpp
@@ -383,7 +383,7 @@ namespace cwg635 { // cwg635: 2.7
template<typename T> template<typename U> D<T>::D() {}
template<typename T> D<T>::D<T>() {} // #cwg635-D-T
// expected-error@#cwg635-D-T {{out-of-line constructor for 'D' cannot have template arguments}}
- // expected-error@#cwg635-D-T {{redefinition of 'D<T>'}}
+ // expected-error@#cwg635-D-T {{redefinition of 'cwg635::D<T>'}}
// expected-note@#cwg635-D {{previous definition is here}}
} // namespace cwg635
@@ -895,12 +895,12 @@ namespace cwg666 { // cwg666: 2.8
template<int> int f();
template<typename T> int f() {
T::type *p = 0;
- // expected-error@-1 {{missing 'typename' prior to dependent type name 'Y::type'}}
+ // expected-error@-1 {{missing 'typename' prior to dependent type name 'cwg666::Y::type'}}
// expected-note@#cwg666-f-Y {{in instantiation of function template specialization 'cwg666::f<cwg666::Y>' requested here}}
int a(T::type);
- // expected-error@-1 {{missing 'typename' prior to dependent type name 'Y::type'}}
+ // expected-error@-1 {{missing 'typename' prior to dependent type name 'cwg666::Y::type'}}
return f<T::type>();
- // expected-error@-1 {{missing 'typename' prior to dependent type name 'Y::type'}}
+ // expected-error@-1 {{missing 'typename' prior to dependent type name 'cwg666::Y::type'}}
}
struct X { static const int type = 0; };
struct Y { typedef int type; };
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
index dc0e842..31587a9 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
@@ -160,7 +160,7 @@ namespace std_example {
template<typename T> concept C2 =
requires(T x) {
{*x} -> same_as<typename T::inner>;
- // expected-note@-1{{because type constraint 'same_as<int, typename T2::inner>' was not satisfied:}}
+ // expected-note@-1{{because type constraint 'same_as<int, typename std_example::T2::inner>' was not satisfied:}}
// expected-note@-2{{because '*x' would be invalid: indirection requires pointer operand ('int' invalid)}}
};
diff --git a/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp b/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp
index d192070..a32295e 100644
--- a/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp
+++ b/clang/test/CXX/over/over.match/over.match.funcs/over.match.class.deduct/p2.cpp
@@ -37,7 +37,7 @@ namespace std {
namespace p0702r1 {
template<typename T> struct X { // expected-note {{candidate}} expected-note {{implicit deduction guide}}
X(std::initializer_list<T>); // expected-note {{candidate template ignored: could not match 'std::initializer_list<T>' against 'Z'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> X(std::initializer_list<T>) -> X<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename T> X(std::initializer_list<T>) -> p0702r1::X<T>'}}
};
X xi = {0};
diff --git a/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp b/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp
index 01ce33b..4b93d86 100644
--- a/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp
+++ b/clang/test/CXX/temp/temp.arg/temp.arg.nontype/p5.cpp
@@ -238,9 +238,9 @@ namespace pointer_to_member_data {
X0<&Y::y> x0a;
X0<&Y::x> x0b;
#if __cplusplus <= 201402L
- // expected-error@-2 {{non-type template argument of type 'int Y::*' (aka 'int X::*') cannot be converted to a value of type 'int Y::*'}}
+ // expected-error@-2 {{non-type template argument of type 'int Y::*' (aka 'int pointer_to_member_data::X::*') cannot be converted to a value of type 'int Y::*'}}
#else
- // expected-error@-4 {{conversion from 'int Y::*' (aka 'int X::*') to 'int Y::*' is not allowed in a converted constant expression}}
+ // expected-error@-4 {{conversion from 'int Y::*' (aka 'int pointer_to_member_data::X::*') to 'int Y::*' (aka 'int pointer_to_member_data::Y::*') is not allowed in a converted constant expression}}
#endif
// Test qualification conversions
diff --git a/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp b/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp
index 9a8148b..499e6ab 100644
--- a/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp
+++ b/clang/test/CXX/temp/temp.constr/temp.constr.decl/func-template-decl.cpp
@@ -54,7 +54,7 @@ struct TA { // #defined-here
template <unsigned N>
template <template <unsigned> class TT> int TA<N>::A() { return sizeof(TT<N>); }
-// expected-error@-1{{out-of-line definition of 'A' does not match any declaration in 'TA<N>'}}
+// expected-error@-1{{out-of-line definition of 'A' does not match any declaration in 'diag::TA<N>'}}
// expected-note@#defined-here{{defined here}}
} // end namespace diag
diff --git a/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp b/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp
index 2da0382..5345b08 100644
--- a/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp
+++ b/clang/test/CXX/temp/temp.decls/temp.class.spec/p6.cpp
@@ -38,7 +38,7 @@ A<short>::C::B<int*> absip;
template<typename T, typename U>
struct Outer {
template<typename X, typename Y> struct Inner;
- template<typename Y> struct Inner<T, Y> {}; // expected-note{{previous declaration of class template partial specialization 'Inner<int, Y>' is here}}
+ template<typename Y> struct Inner<T, Y> {}; // expected-note{{previous declaration of class template partial specialization 'Outer<int, int>::Inner<int, Y>' is here}}
template<typename Y> struct Inner<U, Y> {}; // expected-error{{cannot be redeclared}}
};
@@ -80,7 +80,7 @@ namespace print_dependent_TemplateSpecializationType {
template <class T, class U> struct Foo {
template <unsigned long, class X, class Y> struct Bar;
template <class Y> struct Bar<0, T, Y> {};
- // expected-note-re@-1 {{previous declaration {{.*}} 'Bar<0, int, Y>' is here}}
+ // expected-note-re@-1 {{previous declaration {{.*}} 'print_dependent_TemplateSpecializationType::Foo<int, int>::Bar<0, int, Y>' is here}}
template <class Y> struct Bar<0, U, Y> {};
// expected-error@-1 {{partial specialization 'Bar<0, int, Y>' cannot be redeclared}}
};
diff --git a/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp b/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp
index 7d5be01..1eb35ab 100644
--- a/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp
+++ b/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p3-0x.cpp
@@ -78,9 +78,9 @@ namespace std_example {
template<class T> struct A { // expected-note {{candidate}} expected-note {{implicit deduction guide}}
template<class U>
A(T &&, U &&, int *); // expected-note {{[with T = int, U = int] not viable: expects an rvalue}} \
- // expected-note {{implicit deduction guide declared as 'template <class T, class U> A(T &&, U &&, int *) -> A<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <class T, class U> A(T &&, U &&, int *) -> std_example::A<T>'}}
A(T &&, int *); // expected-note {{requires 2}} \
- // expected-note {{implicit deduction guide declared as 'template <class T> A(T &&, int *) -> A<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <class T> A(T &&, int *) -> std_example::A<T>'}}
};
template<class T> A(T &&, int *) -> A<T>; // expected-note {{requires 2}}
diff --git a/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp b/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp
index 83144a4..238490c 100644
--- a/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp
+++ b/clang/test/CXX/temp/temp.param/p15-cxx0x.cpp
@@ -97,14 +97,14 @@ template<unsigned N, typename...Ts> struct drop {
using T1 = take<3, int, char, double, long>::type; // expected-note {{previous}}
// FIXME: Desguar the types on the RHS in this diagnostic.
// desired-error {{'types<void, void, void, void>' vs 'types<int, char, double, (no argument)>'}}
-using T1 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename inner<_>::type, typename inner<_>::type, typename inner<_>::type, (no argument)>'}}
+using T1 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename ParameterPackExpansions::wrap<int>::inner<_>::type, typename ParameterPackExpansions::wrap<char>::inner<_>::type, typename ParameterPackExpansions::wrap<double>::inner<_>::type, (no argument)>'}}
using D1 = drop<3, int, char, double, long>::type;
using D1 = types<long>;
using T2 = take<4, int, char, double, long>::type; // expected-note {{previous}}
// FIXME: Desguar the types on the RHS in this diagnostic.
// desired-error {{'types<void, void, void, void>' vs 'types<int, char, double, long>'}}
-using T2 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename inner<_>::type, typename inner<_>::type, typename inner<_>::type, typename inner<_>::type>'}}
+using T2 = types<void, void, void, void>; // expected-error {{'types<void, void, void, void>' vs 'types<typename ParameterPackExpansions::wrap<int>::inner<_>::type, typename ParameterPackExpansions::wrap<char>::inner<_>::type, typename ParameterPackExpansions::wrap<double>::inner<_>::type, typename ParameterPackExpansions::wrap<long>::inner<_>::type>'}}
using T2 = types<int, char, double, long>;
using D2 = drop<4, int, char, double, long>::type;
using D2 = types<>;
diff --git a/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp b/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp
index f32f49e..75805a7 100644
--- a/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp
+++ b/clang/test/CXX/temp/temp.res/temp.dep/temp.dep.type/p4.cpp
@@ -326,52 +326,52 @@ namespace N0 {
// None of the following should be found in the current instantiation.
new M4; // expected-error{{unknown type name 'M4'}}
- new B::M4; // expected-error{{no type named 'M4' in 'B<T>'}}
+ new B::M4; // expected-error{{no type named 'M4' in 'N0::B<T>'}}
new A::M4; // expected-error{{no type named 'M4' in 'N0::A'}}
new B::A::M4; // expected-error{{no type named 'M4' in 'N0::A'}}
x4; // expected-error{{use of undeclared identifier 'x4'}}
- B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
f4(); // expected-error{{use of undeclared identifier 'f4'}}
- B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- this->x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- this->B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ this->x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ this->B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
this->A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
this->B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- this->f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- this->B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ this->f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ this->B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
this->A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
this->B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- a->x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- a->B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ a->x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ a->B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
a->A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
a->B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- a->f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- a->B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ a->f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ a->B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
a->A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
a->B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- (*this).x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- (*this).B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ (*this).x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ (*this).B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
(*this).A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
(*this).B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- (*this).f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- (*this).B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ (*this).f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ (*this).B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
(*this).A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
(*this).B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
- b.x4; // expected-error{{no member named 'x4' in 'B<T>'}}
- b.B::x4; // expected-error{{no member named 'x4' in 'B<T>'}}
+ b.x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
+ b.B::x4; // expected-error{{no member named 'x4' in 'N0::B<T>'}}
b.A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
b.B::A::x4; // expected-error{{no member named 'x4' in 'N0::A'}}
- b.f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
- b.B::f4(); // expected-error{{no member named 'f4' in 'B<T>'}}
+ b.f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
+ b.B::f4(); // expected-error{{no member named 'f4' in 'N0::B<T>'}}
b.A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
b.B::A::f4(); // expected-error{{no member named 'f4' in 'N0::A'}}
}
@@ -424,7 +424,7 @@ namespace N2 {
void not_instantiated(A *a, B *b) {
b->x; // expected-error{{no member named 'x' in 'N2::A::B'}}
b->B::x; // expected-error{{no member named 'x' in 'N2::A::B'}}
- a->B::C::x; // expected-error{{no member named 'x' in 'A<T>'}}
+ a->B::C::x; // expected-error{{no member named 'x' in 'N2::A<T>'}}
}
};
diff --git a/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c b/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c
index daed3baf..ccfdc1a 100644
--- a/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c
+++ b/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -no-enable-noundef-analysis %s -o - -emit-llvm | FileCheck %s
-// XFAIL: target={{(aarch64|arm64).*}}, target=x86_64-pc-windows-msvc, target=x86_64-{{(pc|w64)}}-windows-gnu
+// XFAIL: target={{(aarch64|arm64).*}}, target=x86_64-pc-windows-msvc, target=x86_64-{{(pc|w64)}}-windows-gnu, target=x86_64-pc-windows-cygnus
// PR1513
diff --git a/clang/test/CodeGen/AArch64/ABI-align-packed.c b/clang/test/CodeGen/AArch64/ABI-align-packed.c
index 0349ebc..09f9180 100644
--- a/clang/test/CodeGen/AArch64/ABI-align-packed.c
+++ b/clang/test/CodeGen/AArch64/ABI-align-packed.c
@@ -72,9 +72,9 @@ __attribute__((noinline)) void named_arg_non_packed_struct(double d0, double d1,
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -89,7 +89,7 @@ void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_NON_PACKED_STRUCT:%.*]] = alloca [[STRUCT_NON_PACKED_STRUCT:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_NON_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -97,7 +97,7 @@ void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_NON_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_non_packed_struct(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(16) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_NON_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_non_packed_struct() {
struct non_packed_struct s_non_packed_struct;
@@ -127,9 +127,9 @@ __attribute__((noinline)) void named_arg_packed_struct(double d0, double d1, dou
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_packed_struct(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -144,7 +144,7 @@ void variadic_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PACKED_STRUCT:%.*]] = alloca [[STRUCT_PACKED_STRUCT:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -152,7 +152,7 @@ void variadic_packed_struct(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PACKED_STRUCT]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_packed_struct(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PACKED_STRUCT]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_packed_struct() {
struct packed_struct s_packed_struct;
@@ -182,9 +182,9 @@ __attribute__((noinline)) void named_arg_packed_member(double d0, double d1, dou
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_packed_member(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -199,7 +199,7 @@ void variadic_packed_member(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PACKED_MEMBER:%.*]] = alloca [[STRUCT_PACKED_MEMBER:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PACKED_MEMBER]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -207,7 +207,7 @@ void variadic_packed_member(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PACKED_MEMBER]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_packed_member(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PACKED_MEMBER]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_packed_member() {
struct packed_member s_packed_member;
@@ -237,9 +237,9 @@ __attribute__((noinline)) void named_arg_aligned_struct_8(double d0, double d1,
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -254,7 +254,7 @@ void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_ALIGNED_STRUCT_8:%.*]] = alloca [[STRUCT_ALIGNED_STRUCT_8:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -262,7 +262,7 @@ void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_aligned_struct_8(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(16) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_ALIGNED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_aligned_struct_8() {
struct aligned_struct_8 s_aligned_struct_8;
@@ -292,9 +292,9 @@ __attribute__((noinline)) void named_arg_aligned_member_8(double d0, double d1,
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -309,7 +309,7 @@ void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_ALIGNED_MEMBER_8:%.*]] = alloca [[STRUCT_ALIGNED_MEMBER_8:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_MEMBER_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -317,7 +317,7 @@ void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_ALIGNED_MEMBER_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_aligned_member_8(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(16) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_ALIGNED_MEMBER_8]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_aligned_member_8() {
struct aligned_member_8 s_aligned_member_8;
@@ -347,9 +347,9 @@ __attribute__((noinline)) void named_arg_pragma_packed_struct_8(double d0, doubl
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -364,7 +364,7 @@ void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PRAGMA_PACKED_STRUCT_8:%.*]] = alloca [[STRUCT_PRAGMA_PACKED_STRUCT_8:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -372,7 +372,7 @@ void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_8]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_pragma_packed_struct_8(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_8]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_pragma_packed_struct_8() {
struct pragma_packed_struct_8 s_pragma_packed_struct_8;
@@ -402,9 +402,9 @@ __attribute__((noinline)) void named_arg_pragma_packed_struct_4(double d0, doubl
// CHECK-SAME: (double [[D0:%.*]], double [[D1:%.*]], double [[D2:%.*]], double [[D3:%.*]], double [[D4:%.*]], double [[D5:%.*]], double [[D6:%.*]], double [[D7:%.*]], double [[D8:%.*]], ...) local_unnamed_addr #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
double d4, double d5, double d6, double d7,
@@ -419,7 +419,7 @@ void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
// CHECK-SAME: () local_unnamed_addr #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[S_PRAGMA_PACKED_STRUCT_4:%.*]] = alloca [[STRUCT_PRAGMA_PACKED_STRUCT_4:%.*]], align 16
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
// CHECK-NEXT: call void (i32, ...) @init(i32 noundef 1, ptr noundef nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
// CHECK-NEXT: [[DOTFCA_0_LOAD:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_4]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD]], 0
@@ -427,7 +427,7 @@ void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
// CHECK-NEXT: [[DOTFCA_0_LOAD3:%.*]] = load <8 x i16>, ptr [[S_PRAGMA_PACKED_STRUCT_4]], align 16
// CHECK-NEXT: [[DOTFCA_0_INSERT4:%.*]] = insertvalue [1 x <8 x i16>] poison, <8 x i16> [[DOTFCA_0_LOAD3]], 0
// CHECK-NEXT: call void (double, double, double, double, double, double, double, double, double, ...) @variadic_pragma_packed_struct_4(double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, double poison, [1 x <8 x i16>] alignstack(8) [[DOTFCA_0_INSERT4]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S_PRAGMA_PACKED_STRUCT_4]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void test_pragma_packed_struct_4() {
struct pragma_packed_struct_4 s_pragma_packed_struct_4;
diff --git a/clang/test/CodeGen/AArch64/byval-temp.c b/clang/test/CodeGen/AArch64/byval-temp.c
index 5033b6c..353bfa7 100644
--- a/clang/test/CodeGen/AArch64/byval-temp.c
+++ b/clang/test/CodeGen/AArch64/byval-temp.c
@@ -47,13 +47,13 @@ void example(void) {
// CHECK-O3-NEXT: %[[byvaltemp1:[0-9A-Za-z-]+]] = alloca %struct.large, align 8
//
// Mark the start of the lifetime for `l`
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %[[l]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr %[[l]])
//
// First, memset `l` to 0.
// CHECK-O3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 %[[l]], i8 0, i64 64, i1 false)
//
// Lifetime of the first temporary starts here and ends right after the call.
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %[[byvaltemp]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr %[[byvaltemp]])
//
// Then, memcpy `l` to the temporary stack space.
// CHECK-O3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[byvaltemp]], ptr align 8 %[[l]], i64 64, i1 false)
@@ -61,16 +61,16 @@ void example(void) {
// CHECK-O3-NEXT: call void @pass_large(ptr dead_on_return noundef %[[byvaltemp]])
//
// The lifetime of the temporary used to pass a pointer to the struct ends here.
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %[[byvaltemp]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr %[[byvaltemp]])
//
// Now, do the same for the second call, using the second temporary alloca.
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %[[byvaltemp1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr %[[byvaltemp1]])
// CHECK-O3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[byvaltemp1]], ptr align 8 %[[l]], i64 64, i1 false)
// CHECK-O3-NEXT: call void @pass_large(ptr dead_on_return noundef %[[byvaltemp1]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %[[byvaltemp1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr %[[byvaltemp1]])
//
// Mark the end of the lifetime of `l`.
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %l)
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr %l)
// CHECK-O3-NEXT: ret void
void example_BitInt(void) {
@@ -101,20 +101,20 @@ void example_BitInt(void) {
// CHECK-O3-NEXT: [[L:%.*]] = alloca i256, align 16
// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i256, align 16
// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP1:%.*]] = alloca i256, align 16
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[L]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr [[L]])
// CHECK-O3-NEXT: store i256 0, ptr [[L]], align 16, !tbaa [[TBAA6:![0-9]+]]
// CHECK-O3-NEXT: [[TMP0:%.*]] = load i256, ptr [[L]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: [[LOADEDV:%.*]] = trunc i256 [[TMP0]] to i129
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr [[INDIRECT_ARG_TEMP]])
// CHECK-O3-NEXT: [[STOREDV:%.*]] = sext i129 [[LOADEDV]] to i256
// CHECK-O3-NEXT: store i256 [[STOREDV]], ptr [[INDIRECT_ARG_TEMP]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr dead_on_return noundef [[INDIRECT_ARG_TEMP]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr [[INDIRECT_ARG_TEMP]])
// CHECK-O3-NEXT: [[TMP1:%.*]] = load i256, ptr [[L]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: [[LOADEDV1:%.*]] = trunc i256 [[TMP1]] to i129
-// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(ptr [[INDIRECT_ARG_TEMP1]])
// CHECK-O3-NEXT: [[STOREDV1:%.*]] = sext i129 [[LOADEDV1]] to i256
// CHECK-O3-NEXT: store i256 [[STOREDV1]], ptr [[INDIRECT_ARG_TEMP1]], align 16, !tbaa [[TBAA6]]
// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr dead_on_return noundef [[INDIRECT_ARG_TEMP1]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
-// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[L]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(ptr [[L]])
diff --git a/clang/test/CodeGen/AArch64/pure-scalable-args.c b/clang/test/CodeGen/AArch64/pure-scalable-args.c
index 48988f7a..d34c7f9 100644
--- a/clang/test/CodeGen/AArch64/pure-scalable-args.c
+++ b/clang/test/CodeGen/AArch64/pure-scalable-args.c
@@ -329,7 +329,7 @@ void test_pass_variadic(PST *p, PST *q) {
// CHECK-AAPCS: call void (<vscale x 16 x i1>, <vscale x 2 x double>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 16 x i8>, <vscale x 16 x i1>, ...) @pass_variadic_callee(<vscale x 16 x i1> %1, <vscale x 2 x double> %cast.scalable1, <vscale x 4 x float> %cast.scalable2, <vscale x 4 x float> %cast.scalable3, <vscale x 16 x i8> %cast.scalable4, <vscale x 16 x i1> %12, ptr dead_on_return noundef nonnull %byval-temp)
// CHECK-DARWIN: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(96) %byval-temp, ptr noundef nonnull align 16 dereferenceable(96) %p, i64 96, i1 false)
-// CHECK-DARWIN: call void @llvm.lifetime.start.p0(i64 96, ptr nonnull %byval-temp1)
+// CHECK-DARWIN: call void @llvm.lifetime.start.p0(ptr nonnull %byval-temp1)
// CHECK-DARWIN: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(96) %byval-temp1, ptr noundef nonnull align 16 dereferenceable(96) %q, i64 96, i1 false)
// CHECK-DARWIN: call void (ptr, ...) @pass_variadic_callee(ptr dead_on_return noundef nonnull %byval-temp, ptr dead_on_return noundef nonnull %byval-temp1)
@@ -392,7 +392,7 @@ void test_va_arg(int n, ...) {
// CHECK-AAPCS: define dso_local void @test_va_arg(i32 noundef %n, ...)
// CHECK-AAPCS-NEXT: entry:
// CHECK-AAPCS-NEXT: %ap = alloca %struct.__va_list, align 8
-// CHECK-AAPCS-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %ap)
+// CHECK-AAPCS-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %ap)
// CHECK-AAPCS-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
// CHECK-AAPCS-NEXT: %gr_offs_p = getelementptr inbounds nuw i8, ptr %ap, i64 24
// CHECK-AAPCS-NEXT: %gr_offs = load i32, ptr %gr_offs_p, align 8
@@ -435,14 +435,14 @@ void test_va_arg(int n, ...) {
// CHECK-AAPCS-NEXT: %3 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
// CHECK-AAPCS-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %v.sroa.43.0.copyload, i64 0)
// CHECK-AAPCS-NEXT: call void @use1(<vscale x 16 x i1> noundef %3, <vscale x 4 x float> noundef %cast.scalable2)
-// CHECK-AAPCS-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %ap)
+// CHECK-AAPCS-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %ap)
// CHECK-AAPCS-NEXT: ret void
// CHECK-AAPCS-NEXT: }
// CHECK-DARWIN: define void @test_va_arg(i32 noundef %n, ...)
// CHECK-DARWIN-NEXT: entry:
// CHECK-DARWIN-NEXT: %ap = alloca ptr, align 8
-// CHECK-DARWIN-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap)
+// CHECK-DARWIN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %ap)
// CHECK-DARWIN-NEXT: call void @llvm.va_start.p0(ptr nonnull %ap)
// CHECK-DARWIN-NEXT: %argp.cur = load ptr, ptr %ap, align 8
// CHECK-DARWIN-NEXT: %argp.next = getelementptr inbounds nuw i8, ptr %argp.cur, i64 8
@@ -456,7 +456,7 @@ void test_va_arg(int n, ...) {
// CHECK-DARWIN-NEXT: %1 = bitcast <vscale x 2 x i8> %cast.scalable to <vscale x 16 x i1>
// CHECK-DARWIN-NEXT: %cast.scalable2 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %v.sroa.43.0.copyload, i64 0)
// CHECK-DARWIN-NEXT: call void @use1(<vscale x 16 x i1> noundef %1, <vscale x 4 x float> noundef %cast.scalable2)
-// CHECK-DARWIN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
+// CHECK-DARWIN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %ap)
// CHECK-DARWIN-NEXT: ret void
// CHECK-DARWIN-NEXT: }
diff --git a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
index d244a8b..4a1185d0 100644
--- a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
+++ b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
@@ -86,10 +86,10 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK-NEXT: entry:
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
// CHECK-NEXT: [[X:%.*]] = tail call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
// CHECK-NEXT: call void @f3(ptr dead_on_return noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: ret void
// CHECK128-LABEL: declare void @f3(<16 x i8> noundef)
diff --git a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
index d42ecb6..6211b60 100644
--- a/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
+++ b/clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
@@ -73,10 +73,10 @@ typedef svint16_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK128-NEXT: ret void
// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16
// CHECKWIDE-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
-// CHECKWIDE-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECKWIDE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]]
// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(ptr dead_on_return noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
-// CHECKWIDE-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
+// CHECKWIDE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: ret void
void g(vec2 x) { f(x); } // OK
#endif
diff --git a/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp b/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
index 4c91fef..c5b863b 100644
--- a/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
+++ b/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
@@ -10,10 +10,10 @@ vint32m1_t Baz();
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[REF_TMP:%.*]] = alloca <vscale x 2 x i32>, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]]) #[[ATTR3:[0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[REF_TMP]]) #[[ATTR3]]
-// CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr [[REF_TMP]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR3]]
+// CHECK: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) #[[ATTR3]]
//
vint32m1_t Test() {
const vint32m1_t &a = Baz();
diff --git a/clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c b/clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c
new file mode 100644
index 0000000..88447f7
--- /dev/null
+++ b/clang/test/CodeGen/WebAssembly/builtins-test-fp-sig.c
@@ -0,0 +1,70 @@
+// RUN: %clang_cc1 -triple wasm32-unknown-unknown -target-feature +gc -O3 -emit-llvm -DSINGLE_VALUE -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY-SV
+// RUN: %clang_cc1 -triple wasm64-unknown-unknown -target-feature +gc -O3 -emit-llvm -DSINGLE_VALUE -o - %s | FileCheck %s -check-prefixes WEBASSEMBLY,WEBASSEMBLY-SV
+// RUN: %clang_cc1 -triple wasm64-unknown-unknown -target-feature +gc -target-abi experimental-mv -O3 -emit-llvm -o - %s 2>&1 | FileCheck %s -check-prefixes WEBASSEMBLY
+// RUN: not %clang_cc1 -triple wasm64-unknown-unknown -O3 -emit-llvm -o - %s 2>&1 | FileCheck %s -check-prefixes MISSING-GC
+
+void use(int);
+
+typedef void (*Fvoid)(void);
+void test_function_pointer_signature_void(Fvoid func) {
+ // MISSING-GC: error: '__builtin_wasm_test_function_pointer_signature' needs target feature gc
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef float (*Ffloats)(float, double, int);
+void test_function_pointer_signature_floats(Ffloats func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float poison, token poison, float poison, double poison, i32 poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef void (*Fpointers)(Fvoid, Ffloats, void*, int*, int***, char[5]);
+void test_function_pointer_signature_pointers(Fpointers func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr poison, ptr poison, ptr poison, ptr poison, ptr poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef void (*FVarArgs)(int, ...);
+void test_function_pointer_signature_varargs(FVarArgs func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, i32 poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef __externref_t (*FExternRef)(__externref_t, __externref_t);
+void test_function_pointer_externref(FExternRef func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(10) poison, token poison, ptr addrspace(10) poison, ptr addrspace(10) poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+typedef __funcref Fpointers (*FFuncRef)(__funcref Fvoid, __funcref Ffloats);
+void test_function_pointer_funcref(FFuncRef func) {
+ // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(20) poison, token poison, ptr addrspace(20) poison, ptr addrspace(20) poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+#ifdef SINGLE_VALUE
+// Some tests that we get struct ABIs correct. There is no special code in
+// __builtin_wasm_test_function_pointer_signature for this, it gets handled by
+// the normal type lowering code.
+// Single element structs are unboxed, multi element structs are passed on
+// stack.
+typedef struct {double x;} (*Fstructs1)(struct {double x;}, struct {float x;}, struct {double x; float y;});
+void test_function_pointer_structs1(Fstructs1 func) {
+ // WEBASSEMBLY-SV: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double poison, token poison, double poison, float poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+// Two element return struct ==> return ptr on stack
+typedef struct {double x; double y;} (*Fstructs2)(void);
+void test_function_pointer_structs2(Fstructs2 func) {
+ // WEBASSEMBLY-SV: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+
+// Return union ==> return ptr on stack, one element union => unboxed
+typedef union {double x; float y;} (*FUnions)(union {double x; float y;}, union {double x;});
+void test_function_pointer_unions(FUnions func) {
+ // WEBASSEMBLY-SV: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr poison, ptr poison, double poison)
+ use(__builtin_wasm_test_function_pointer_signature(func));
+}
+#endif
diff --git a/clang/test/CodeGen/X86/avx-builtins.c b/clang/test/CodeGen/X86/avx-builtins.c
index a6e70aae..28cad00 100644
--- a/clang/test/CodeGen/X86/avx-builtins.c
+++ b/clang/test/CodeGen/X86/avx-builtins.c
@@ -20,12 +20,14 @@ __m256d test_mm256_add_pd(__m256d A, __m256d B) {
// CHECK: fadd <4 x double>
return _mm256_add_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_add_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-4.0, -5.0, +6.0, +7.0}), -8.0, -10.0, +12.0, +14.0));
__m256 test_mm256_add_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_add_ps
// CHECK: fadd <8 x float>
return _mm256_add_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_add_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}), -8.0f, -10.0f, +12.0f, +14.0f, +14.0f, +12.0f, -10.0f, -8.0f));
__m256d test_mm256_addsub_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_addsub_pd
@@ -44,12 +46,14 @@ __m256d test_mm256_and_pd(__m256d A, __m256d B) {
// CHECK: and <4 x i64>
return _mm256_and_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_and_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), -0.0, -0.0, +0.0, +7.0));
__m256 test_mm256_and_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_and_ps
// CHECK: and <8 x i32>
return _mm256_and_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_and_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -0.0f, -0.0f, +0.0f, +7.0f, +7.0f, +0.0f, -0.0f, -0.0f));
__m256d test_mm256_andnot_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_andnot_pd
@@ -57,6 +61,7 @@ __m256d test_mm256_andnot_pd(__m256d A, __m256d B) {
// CHECK: and <4 x i64>
return _mm256_andnot_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_andnot_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), +0.0, +0.0, +0.0, +0.0));
__m256 test_mm256_andnot_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_andnot_ps
@@ -64,6 +69,7 @@ __m256 test_mm256_andnot_ps(__m256 A, __m256 B) {
// CHECK: and <8 x i32>
return _mm256_andnot_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_andnot_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f));
__m256d test_mm256_blend_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_blend_pd
@@ -141,11 +147,13 @@ __m256 test_mm256_castpd_ps(__m256d A) {
// CHECK-LABEL: test_mm256_castpd_ps
return _mm256_castpd_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_castpd_ps((__m256d){-1.0, +2.0, +4.0, -6.0}), +0.0f, -1.875f, +0.0f, +2.0f, +0.0f, +2.25f, 0.0f, -2.375f));
__m256i test_mm256_castpd_si256(__m256d A) {
// CHECK-LABEL: test_mm256_castpd_si256
return _mm256_castpd_si256(A);
}
+TEST_CONSTEXPR(match_m256i(_mm256_castpd_si256((__m256d){-1.0, +2.0, -3.0, +4.0}), 0xBFF0000000000000ULL, 0x4000000000000000ULL, 0xC008000000000000ULL, 0x4010000000000000ULL));
__m256d test_mm256_castpd128_pd256(__m128d A) {
// CHECK-LABEL: test_mm256_castpd128_pd256
@@ -159,16 +167,19 @@ __m128d test_mm256_castpd256_pd128(__m256d A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 0, i32 1>
return _mm256_castpd256_pd128(A);
}
+TEST_CONSTEXPR(match_m128d(_mm256_castpd256_pd128((__m256d){-1.0, +2.0, -3.0, +4.0}), -1.0, +2.0));
__m256d test_mm256_castps_pd(__m256 A) {
// CHECK-LABEL: test_mm256_castps_pd
return _mm256_castps_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_castps_pd((__m256){0.0f, -1.0f, 0.0f, 4.0f, 0.0f, -2.0f, 0.0f, 6.0f}), -0.0078125, 512.0, -2.0, +8192.0));
__m256i test_mm256_castps_si256(__m256 A) {
// CHECK-LABEL: test_mm256_castps_si256
return _mm256_castps_si256(A);
}
+TEST_CONSTEXPR(match_m256i(_mm256_castps_si256((__m256){1.0f, -2.0f, -4.0f, 8.0f, -16.0f, +16.0f, +32.0f, -32.0f}), 0xC00000003F800000ULL, 0x41000000c0800000ULL, 0x41800000C1800000ULL, 0xC200000042000000ULL));
__m256 test_mm256_castps128_ps256(__m128 A) {
// CHECK-LABEL: test_mm256_castps128_ps256
@@ -182,6 +193,7 @@ __m128 test_mm256_castps256_ps128(__m256 A) {
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm256_castps256_ps128(A);
}
+TEST_CONSTEXPR(match_m128(_mm256_castps256_ps128((__m256){1.0f, -2.0f, -4.0f, 8.0f, -16.0f, +16.0f, +32.0f, -32.0f}), 1.0f, -2.0f, -4.0f, 8.0f));
__m256i test_mm256_castsi128_si256(__m128i A) {
// CHECK-LABEL: test_mm256_castsi128_si256
@@ -194,17 +206,20 @@ __m256d test_mm256_castsi256_pd(__m256i A) {
// CHECK-LABEL: test_mm256_castsi256_pd
return _mm256_castsi256_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_castsi256_pd((__m256i)(__v4du){0x4070000000000000ULL, 0xC000000000000000ULL, 0xBFF0000000000000ULL, 0xC008000000000000ULL}), 256.0, -2.0, -1.0, -3.0));
__m256 test_mm256_castsi256_ps(__m256i A) {
// CHECK-LABEL: test_mm256_castsi256_ps
return _mm256_castsi256_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_castsi256_ps((__m256i)(__v4du){0x42000000c1800000ULL, 0x43000000c2800000ULL, 0x41000000c0800000ULL, 0xC00000003F800000ULL}), -16.0f, 32.0f, -64.0f, 128.0f, -4.0f, 8.0f, 1.0f, -2.0f));
__m128i test_mm256_castsi256_si128(__m256i A) {
// CHECK-LABEL: test_mm256_castsi256_si128
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 0, i32 1>
return _mm256_castsi256_si128(A);
}
+TEST_CONSTEXPR(match_m128i(_mm256_castsi256_si128((__m256i)(__v4du){0xBFF0000000000000ULL, 0x4070000000000000ULL, 0xC000000000000000ULL, 0xC008000000000000ULL}), 0xBFF0000000000000ULL, 0x4070000000000000ULL));
__m256d test_mm256_ceil_pd(__m256d x) {
// CHECK-LABEL: test_mm256_ceil_pd
@@ -973,12 +988,14 @@ __m256d test_mm256_div_pd(__m256d A, __m256d B) {
// CHECK: fdiv <4 x double>
return _mm256_div_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_div_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-1.0, +1.0, -1.0, +1.0}), +4.0, -5.0, -6.0, +7.0));
__m256 test_mm256_div_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_div_ps
// CHECK: fdiv <8 x float>
return _mm256_div_ps(A, B);
}
+TEST_CONSTEXPR(match_m256( _mm256_div_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-1.0f, +1.0f, -1.0f, +1.0f, +1.0f, -1.0f, +1.0f, -1.0f}), +4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, +4.0f));
__m256 test_mm256_dp_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_dp_ps
@@ -1258,18 +1275,21 @@ __m256d test_mm256_movedup_pd(__m256d A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
return _mm256_movedup_pd(A);
}
+TEST_CONSTEXPR(match_m256d(_mm256_movedup_pd((__m256d){+7.0, -7.0, -42.0, +42.0}), +7.0, +7.0, -42.0, -42.0));
__m256 test_mm256_movehdup_ps(__m256 A) {
// CHECK-LABEL: test_mm256_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
return _mm256_movehdup_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_movehdup_ps((__m256){+1.0f,-1.0f,+2.0f,+4.0f,+8.0f,-8.0f,-3.0f,+3.0f}), -1.0f, -1.0f, +4.0f, +4.0f, -8.0f, -8.0f, +3.0f, +3.0f));
__m256 test_mm256_moveldup_ps(__m256 A) {
// CHECK-LABEL: test_mm256_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
return _mm256_moveldup_ps(A);
}
+TEST_CONSTEXPR(match_m256(_mm256_moveldup_ps((__m256){+1.0f,-1.0f,+2.0f,+4.0f,+8.0f,-8.0f,-3.0f,+3.0f}), +1.0f, +1.0f, +2.0f, +2.0f, +8.0f, +8.0f, -3.0f, -3.0f));
int test_mm256_movemask_pd(__m256d A) {
// CHECK-LABEL: test_mm256_movemask_pd
@@ -1288,24 +1308,28 @@ __m256d test_mm256_mul_pd(__m256d A, __m256d B) {
// CHECK: fmul <4 x double>
return _mm256_mul_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_mul_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-4.0, -5.0, +6.0, +7.0}), +16.0, +25.0, +36.0, +49.0));
__m256 test_mm256_mul_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_mul_ps
// CHECK: fmul <8 x float>
return _mm256_mul_ps(A, B);
}
+TEST_CONSTEXPR(match_m256( _mm256_mul_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}), +16.0f, +25.0f, +36.0f, +49.0f, +49.0f, +36.0f, +25.0f, +16.0f));
__m256d test_mm256_or_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_or_pd
// CHECK: or <4 x i64>
return _mm256_or_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_or_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), -4.0, -5.0, -6.0, +7.0));
__m256 test_mm256_or_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_or_ps
// CHECK: or <8 x i32>
return _mm256_or_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_or_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, -4.0f));
__m128d test_mm_permute_pd(__m128d A) {
// CHECK-LABEL: test_mm_permute_pd
@@ -1924,12 +1948,14 @@ __m256d test_mm256_sub_pd(__m256d A, __m256d B) {
// CHECK: fsub <4 x double>
return _mm256_sub_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d( _mm256_sub_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){-0.0, +0.0, +2.0, -1.0}), -4.0, -5.0, 4.0, 8.0));
__m256 test_mm256_sub_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_sub_ps
// CHECK: fsub <8 x float>
return _mm256_sub_ps(A, B);
}
+TEST_CONSTEXPR(match_m256( _mm256_sub_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){-0.0f, +0.0f, +2.0f, -1.0f, -1.0f, +2.0f, +0.0f, -0.0f}), -4.0f, -5.0f, 4.0f, 8.0f, 8.0f, 4.0f, -5.0f, -4.0f));
int test_mm_testc_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_testc_pd
@@ -2053,36 +2079,42 @@ __m256d test_mm256_unpackhi_pd(__m256d A, __m256d B) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
return _mm256_unpackhi_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_unpackhi_pd((__m256d){+1.0, +2.0, +3.0, +4.0}, (__m256d){+5.0, +6.0, +7.0, +8.0}), +2.0, +6.0, +4.0, +8.0));
__m256 test_mm256_unpackhi_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_unpackhi_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
return _mm256_unpackhi_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_unpackhi_ps((__m256){+0.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f}, (__m256){+10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f, +17.0f}), +2.0f, +12.0f, +3.0f, +13.0f, +6.0f, +16.0f, +7.0f, +17.0f));
__m256d test_mm256_unpacklo_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
return _mm256_unpacklo_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_unpacklo_pd((__m256d){+1.0, +2.0, +3.0, +4.0}, (__m256d){+5.0, +6.0, +7.0, +8.0}), +1.0, +5.0, +3.0, +7.0));
__m256 test_mm256_unpacklo_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_unpacklo_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
return _mm256_unpacklo_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_unpacklo_ps((__m256){+0.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f}, (__m256){+10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f, +17.0f}), +0.0f, +10.0f, +1.0f, +11.0f, +4.0f, +14.0f, +5.0f, +15.0f));
__m256d test_mm256_xor_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_xor_pd
// CHECK: xor <4 x i64>
return _mm256_xor_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_xor_pd((__m256d){-4.0, -5.0, +6.0, +7.0}, (__m256d){+0.0, -0.0, -0.0, +7.0}), -4.0, +5.0, -6.0, +0.0));
__m256 test_mm256_xor_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_xor_ps
// CHECK: xor <8 x i32>
return _mm256_xor_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_xor_ps((__m256){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m256){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, +5.0f, -6.0f, +0.0f, +0.0f, -6.0f, +5.0f, -4.0f));
void test_mm256_zeroall(void) {
// CHECK-LABEL: test_mm256_zeroall
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index 27da56f..adbb854 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -9,6 +9,7 @@
#include <immintrin.h>
+#include "builtin_test_helpers.h"
// NOTE: This should match the tests in llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -97,6 +98,7 @@ __m256i test_mm256_and_si256(__m256i a, __m256i b) {
// CHECK: and <4 x i64>
return _mm256_and_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_and_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, 0, 0, -1));
__m256i test_mm256_andnot_si256(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_andnot_si256
@@ -104,6 +106,7 @@ __m256i test_mm256_andnot_si256(__m256i a, __m256i b) {
// CHECK: and <4 x i64>
return _mm256_andnot_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_andnot_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, 0, -1, 0));
__m256i test_mm256_avg_epu8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_avg_epu8
@@ -890,12 +893,14 @@ __m256i test_mm256_mulhi_epu16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhi_epu16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhi_epu16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, -32, 0, 25, 4, -28, 0, 17, 8, -24, 0, 9, 12, 5, 14, 1));
__m256i test_mm256_mulhi_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mulhi_epi16
// CHECK: call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhi_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhi_epi16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1));
__m256i test_mm256_mulhrs_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mulhrs_epi16
@@ -908,6 +913,7 @@ __m256i test_mm256_mullo_epi16(__m256i a, __m256i b) {
// CHECK: mul <16 x i16>
return _mm256_mullo_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mullo_epi16((__m256i)(__v16hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16}, (__m256i)(__v16hi){-32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -32, 60, 84, -104, -120, 132, 140, -144, -144, 140, 132, -120, -104, -84, -60, -32));
__m256i test_mm256_mullo_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mullo_epi32
@@ -920,6 +926,7 @@ __m256i test_mm256_or_si256(__m256i a, __m256i b) {
// CHECK: or <4 x i64>
return _mm256_or_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_or_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, -1, -1, -1));
__m256i test_mm256_packs_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_packs_epi16
@@ -1336,3 +1343,4 @@ __m256i test_mm256_xor_si256(__m256i a, __m256i b) {
// CHECK: xor <4 x i64>
return _mm256_xor_si256(a, b);
}
+TEST_CONSTEXPR(match_v4di(_mm256_xor_si256((__m256i)(__v4di){0, -1, 0, -1}, (__m256i)(__v4di){0, 0, -1, -1}), 0, -1, -1, 0));
diff --git a/clang/test/CodeGen/X86/avx512-reduceIntrin.c b/clang/test/CodeGen/X86/avx512-reduceIntrin.c
index 2ceac3a..4069b46 100644
--- a/clang/test/CodeGen/X86/avx512-reduceIntrin.c
+++ b/clang/test/CodeGen/X86/avx512-reduceIntrin.c
@@ -1,162 +1,174 @@
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
long long test_mm512_reduce_add_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_add_epi64(
-// CHECK: call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_add_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_add_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_add_epi64((__m512i)(__v8di){-4, -3, -2, -1, 0, 1, 2, 3}) == -4);
long long test_mm512_reduce_mul_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_mul_epi64(
-// CHECK: call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_mul_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_mul_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_mul_epi64((__m512i)(__v8di){1, 2, 3, 4, 5, 6, 7, 8}) == 40320);
long long test_mm512_reduce_or_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_or_epi64(
-// CHECK: call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_or_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_or_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_or_epi64((__m512i)(__v8di){0x100, 0x200, 0x400, 0x800, 0, 0, 0, 0}) == 0xF00);
long long test_mm512_reduce_and_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_and_epi64(
-// CHECK: call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_and_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_and_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_and_epi64((__m512i)(__v8di){0xFFFF, 0xFF00, 0x00FF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF00, 0x00FF}) == 0x0000);
long long test_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_add_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_add_epi64(__M, __W);
}
long long test_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_mul_epi64(__M, __W);
}
long long test_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_and_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_and_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_and_epi64(__M, __W);
}
long long test_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_or_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_or_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_or_epi64(__M, __W);
}
int test_mm512_reduce_add_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_add_epi32(
-// CHECK: call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_add_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_add_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_add_epi32((__m512i)(__v16si){-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}) == -8);
int test_mm512_reduce_mul_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_mul_epi32(
-// CHECK: call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_mul_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_mul_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_mul_epi32((__m512i)(__v16si){1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 1, 1, -3, 1, 1}) == -36);
int test_mm512_reduce_or_epi32(__m512i __W){
-// CHECK: call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_or_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_or_epi32((__m512i)(__v16si){0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0, 0, 0, 0, 0, 0, 0, 0}) == 0xFF);
int test_mm512_reduce_and_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_and_epi32(
-// CHECK: call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_and_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_and_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_and_epi32((__m512i)(__v16si){0xFF, 0xF0, 0x0F, 0xFF, 0xFF, 0xFF, 0xF0, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xF0, 0x0F, 0x0F}) == 0x00);
int test_mm512_mask_reduce_add_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_add_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_add_epi32(__M, __W);
}
int test_mm512_mask_reduce_mul_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_mul_epi32(__M, __W);
}
int test_mm512_mask_reduce_and_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_and_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_and_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_and_epi32(__M, __W);
}
int test_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_or_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_or_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_or_epi32(__M, __W);
}
double test_mm512_reduce_add_pd(__m512d __W, double ExtraAddOp){
-// CHECK-LABEL: @test_mm512_reduce_add_pd(
+// CHECK-LABEL: test_mm512_reduce_add_pd
// CHECK-NOT: reassoc
-// CHECK: call reassoc double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
// CHECK-NOT: reassoc
return _mm512_reduce_add_pd(__W) + ExtraAddOp;
}
double test_mm512_reduce_mul_pd(__m512d __W, double ExtraMulOp){
-// CHECK-LABEL: @test_mm512_reduce_mul_pd(
+// CHECK-LABEL: test_mm512_reduce_mul_pd
// CHECK-NOT: reassoc
-// CHECK: call reassoc double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
// CHECK-NOT: reassoc
return _mm512_reduce_mul_pd(__W) * ExtraMulOp;
}
float test_mm512_reduce_add_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_add_ps(
-// CHECK: call reassoc float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_add_ps
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
return _mm512_reduce_add_ps(__W);
}
float test_mm512_reduce_mul_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_mul_ps(
-// CHECK: call reassoc float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_mul_ps
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
return _mm512_reduce_mul_ps(__W);
}
double test_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_add_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call reassoc double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> %{{.*}})
return _mm512_mask_reduce_add_pd(__M, __W);
}
double test_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call reassoc double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
+// CHECK: call reassoc {{.*}}double @llvm.vector.reduce.fmul.v8f64(double 1.000000e+00, <8 x double> %{{.*}})
return _mm512_mask_reduce_mul_pd(__M, __W);
}
float test_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_add_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_add_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> {{.*}}, <16 x float> {{.*}}
-// CHECK: call reassoc float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fadd.v16f32(float -0.000000e+00, <16 x float> %{{.*}})
return _mm512_mask_reduce_add_ps(__M, __W);
}
float test_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_mul_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_mul_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> {{.*}}, <16 x float> %{{.*}}
-// CHECK: call reassoc float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
+// CHECK: call reassoc {{.*}}float @llvm.vector.reduce.fmul.v16f32(float 1.000000e+00, <16 x float> %{{.*}})
return _mm512_mask_reduce_mul_ps(__M, __W);
}
diff --git a/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c b/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
index 3e33ec5..0110079 100644
--- a/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
+++ b/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
@@ -1,164 +1,175 @@
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -O0 -triple=i386-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
long long test_mm512_reduce_max_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epi64(
-// CHECK: call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_max_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epi64((__m512i)(__v8di){-4, -3, -2, -1, 0, 1, 2, 3}) == 3);
unsigned long long test_mm512_reduce_max_epu64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epu64(
-// CHECK: call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epu64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_max_epu64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epu64((__m512i)(__v8du){0, 1, 2, 3, 4, 5, 6, 7}) == 7);
double test_mm512_reduce_max_pd(__m512d __W, double ExtraAddOp){
-// CHECK-LABEL: @test_mm512_reduce_max_pd(
+// CHECK-LABEL: test_mm512_reduce_max_pd
// CHECK-NOT: nnan
-// CHECK: call nnan double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
// CHECK-NOT: nnan
return _mm512_reduce_max_pd(__W) + ExtraAddOp;
}
long long test_mm512_reduce_min_epi64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epi64(
-// CHECK: call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epi64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_min_epi64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epi64((__m512i)(__v8di){-4, -3, -2, -1, 0, 1, 2, 3}) == -4);
unsigned long long test_mm512_reduce_min_epu64(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epu64(
-// CHECK: call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epu64
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
return _mm512_reduce_min_epu64(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epu64((__m512i)(__v8du){0, 1, 2, 3, 4, 5, 6, 7}) == 0);
double test_mm512_reduce_min_pd(__m512d __W, double ExtraMulOp){
-// CHECK-LABEL: @test_mm512_reduce_min_pd(
+// CHECK-LABEL: test_mm512_reduce_min_pd
// CHECK-NOT: nnan
-// CHECK: call nnan double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
// CHECK-NOT: nnan
return _mm512_reduce_min_pd(__W) * ExtraMulOp;
}
long long test_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_max_epi64(__M, __W);
}
unsigned long test_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epu64(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epu64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_max_epu64(__M, __W);
}
double test_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_max_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call nnan double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmax.v8f64(<8 x double> %{{.*}})
return _mm512_mask_reduce_max_pd(__M, __W);
}
long long test_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epi64(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epi64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_min_epi64(__M, __W);
}
unsigned long long test_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epu64(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epu64
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
-// CHECK: call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
+// CHECK: call {{.*}}i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %{{.*}})
return _mm512_mask_reduce_min_epu64(__M, __W);
}
double test_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_pd(
+// CHECK-LABEL: test_mm512_mask_reduce_min_pd
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK: call nnan double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
+// CHECK: call nnan {{.*}}double @llvm.vector.reduce.fmin.v8f64(<8 x double> %{{.*}})
return _mm512_mask_reduce_min_pd(__M, __W);
}
int test_mm512_reduce_max_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epi32(
-// CHECK: call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_max_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epi32((__m512i)(__v16si){-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}) == 7);
unsigned int test_mm512_reduce_max_epu32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_max_epu32(
-// CHECK: call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_epu32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_max_epu32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_max_epu32((__m512i)(__v16su){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) == 15);
float test_mm512_reduce_max_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_max_ps(
-// CHECK: call nnan float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_max_ps
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
return _mm512_reduce_max_ps(__W);
}
int test_mm512_reduce_min_epi32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epi32(
-// CHECK: call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epi32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_min_epi32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epi32((__m512i)(__v16si){-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}) == -8);
unsigned int test_mm512_reduce_min_epu32(__m512i __W){
-// CHECK-LABEL: @test_mm512_reduce_min_epu32(
-// CHECK: call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_epu32
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
return _mm512_reduce_min_epu32(__W);
}
+TEST_CONSTEXPR(_mm512_reduce_min_epu32((__m512i)(__v16su){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) == 0);
float test_mm512_reduce_min_ps(__m512 __W){
-// CHECK-LABEL: @test_mm512_reduce_min_ps(
-// CHECK: call nnan float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
+// CHECK-LABEL: test_mm512_reduce_min_ps
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
return _mm512_reduce_min_ps(__W);
}
int test_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_max_epi32(__M, __W);
}
unsigned int test_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_epu32(
+// CHECK-LABEL: test_mm512_mask_reduce_max_epu32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_max_epu32(__M, __W);
}
float test_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_max_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
-// CHECK: call nnan float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmax.v16f32(<16 x float> %{{.*}})
return _mm512_mask_reduce_max_ps(__M, __W);
}
int test_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epi32(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epi32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_min_epi32(__M, __W);
}
unsigned int test_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_epu32(
+// CHECK-LABEL: test_mm512_mask_reduce_min_epu32
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
-// CHECK: call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
+// CHECK: call {{.*}}i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %{{.*}})
return _mm512_mask_reduce_min_epu32(__M, __W);
}
float test_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_ps(
+// CHECK-LABEL: test_mm512_mask_reduce_min_ps
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
-// CHECK: call nnan float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
+// CHECK: call nnan {{.*}}float @llvm.vector.reduce.fmin.v16f32(<16 x float> %{{.*}})
return _mm512_mask_reduce_min_ps(__M, __W);
}
-
diff --git a/clang/test/CodeGen/X86/avx512bitalg-builtins.c b/clang/test/CodeGen/X86/avx512bitalg-builtins.c
index c80fb5e..30d364a2 100644
--- a/clang/test/CodeGen/X86/avx512bitalg-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bitalg-builtins.c
@@ -1,54 +1,60 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m512i test_mm512_popcnt_epi16(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi16
+ // CHECK-LABEL: test_mm512_popcnt_epi16
// CHECK: @llvm.ctpop.v32i16
return _mm512_popcnt_epi16(__A);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_popcnt_epi16((__m512i)(__v32hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025, +5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 15, 14, 1, 0, 8, 1, 9, 2, 2, 4, 2, 6, 2, 9, 2, 2, 15, 14, 1, 0, 8, 1, 9, 2, 2, 4, 2, 6, 2, 9, 2));
__m512i test_mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi16
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi16
// CHECK: @llvm.ctpop.v32i16
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_popcnt_epi16(__A, __U, __B);
}
__m512i test_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi16
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi16
// CHECK: @llvm.ctpop.v32i16
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_popcnt_epi16(__U, __B);
}
__m512i test_mm512_popcnt_epi8(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi8
+ // CHECK-LABEL: test_mm512_popcnt_epi8
// CHECK: @llvm.ctpop.v64i8
return _mm512_popcnt_epi8(__A);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_popcnt_epi8((__m512i)(__v64qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3));
__m512i test_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi8
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi8
// CHECK: @llvm.ctpop.v64i8
// CHECK: select <64 x i1> %{{[0-9]+}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_popcnt_epi8(__A, __U, __B);
}
__m512i test_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi8
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi8
// CHECK: @llvm.ctpop.v64i8
// CHECK: select <64 x i1> %{{[0-9]+}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_popcnt_epi8(__U, __B);
}
__mmask64 test_mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm512_mask_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.512
// CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_bitshuffle_epi64_mask(__U, __A, __B);
}
__mmask64 test_mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm512_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.512
return _mm512_bitshuffle_epi64_mask(__A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index 1d18ca8..37765eb 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -3,6 +3,7 @@
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__mmask32 test_knot_mask32(__mmask32 a) {
// CHECK-LABEL: @test_knot_mask32
@@ -823,6 +824,7 @@ __m512i test_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
//CHECK: mul <32 x i16>
return _mm512_mullo_epi16(__A, __B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mullo_epi16((__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -64, 124, 180, -232, -280, 324, 364, -400, -432, 460, 484, -504, -520, 532, 540, -544, -544, 540, 532, -520, -504, 484, 460, -432, -400, 364, 324, -280, -232, -180, -124, -64));
__m512i test_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
//CHECK-LABEL: @test_mm512_mask_mullo_epi16
@@ -1331,29 +1333,36 @@ __m512i test_mm512_mulhi_epi16(__m512i __A, __m512i __B) {
// CHECK: @llvm.x86.avx512.pmulh.w.512
return _mm512_mulhi_epi16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhi_epi16((__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1));
+
__m512i test_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhi_epi16(__W,__U,__A,__B);
}
+
__m512i test_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhi_epi16(__U,__A,__B);
}
+
__m512i test_mm512_mulhi_epu16(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mulhi_epu16
// CHECK: @llvm.x86.avx512.pmulhu.w.512
return _mm512_mulhi_epu16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhi_epu16((__m512i)(__v32hi){+1, -2, +3, -4, +5, -6, +7, -8, +9, -10, +11, -12, +13, -14, +15, -16, +17, -18, +19, -20, +21, -22, +23, -24, +25, -26, +27, -28, +29, -30, +31, -32}, (__m512i)(__v32hi){-64, -62, +60, +58, -56, -54, +52, +50, -48, -46, +44, +42, -40, -38, +36, +34, -32, -30, +28, +26, -24, -22, +20, +18, -16, -14, +12, +10, -8, +6, -4, +2}), 0, -64, 0, 57, 4, -60, 0, 49, 8, -56, 0, 41, 12, -52, 0, 33, 16, -48, 0, 25, 20, -44, 0, 17, 24, -40, 0, 9, 28, 5, 30, 1));
+
__m512i test_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_mulhi_epu16
// CHECK: @llvm.x86.avx512.pmulhu.w.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhi_epu16(__W,__U,__A,__B);
}
+
__m512i test_mm512_maskz_mulhi_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_mulhi_epu16
// CHECK: @llvm.x86.avx512.pmulhu.w.512
diff --git a/clang/test/CodeGen/X86/avx512dq-builtins.c b/clang/test/CodeGen/X86/avx512dq-builtins.c
index 1ebd369..1c01695 100644
--- a/clang/test/CodeGen/X86/avx512dq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512dq-builtins.c
@@ -1,17 +1,21 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__mmask8 test_knot_mask8(__mmask8 a) {
- // CHECK-LABEL: @test_knot_mask8
+ // CHECK-LABEL: test_knot_mask8
// CHECK: [[IN:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[NOT:%.*]] = xor <8 x i1> [[IN]], splat (i1 true)
return _knot_mask8(a);
}
__mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kand_mask8
+ // CHECK-LABEL: test_kand_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = and <8 x i1> [[LHS]], [[RHS]]
@@ -21,7 +25,7 @@ __mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
}
__mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kandn_mask8
+ // CHECK-LABEL: test_kandn_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[NOT:%.*]] = xor <8 x i1> [[LHS]], splat (i1 true)
@@ -32,7 +36,7 @@ __mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
}
__mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kor_mask8
+ // CHECK-LABEL: test_kor_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -42,7 +46,7 @@ __mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m5
}
__mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxnor_mask8
+ // CHECK-LABEL: test_kxnor_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[NOT:%.*]] = xor <8 x i1> [[LHS]], splat (i1 true)
@@ -53,7 +57,7 @@ __mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
}
__mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kxor_mask8
+ // CHECK-LABEL: test_kxor_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = xor <8 x i1> [[LHS]], [[RHS]]
@@ -63,7 +67,7 @@ __mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
}
unsigned char test_kortestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestz_mask8_u8
+ // CHECK-LABEL: test_kortestz_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[OR:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -76,7 +80,7 @@ unsigned char test_kortestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m5
}
unsigned char test_kortestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_kortestc_mask8_u8
+ // CHECK-LABEL: test_kortestc_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[OR:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -89,7 +93,7 @@ unsigned char test_kortestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m5
}
unsigned char test_kortest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_kortest_mask8_u8
+ // CHECK-LABEL: test_kortest_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[OR:%.*]] = or <8 x i1> [[LHS]], [[RHS]]
@@ -109,7 +113,7 @@ unsigned char test_kortest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestz_mask8_u8
+ // CHECK-LABEL: test_ktestz_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestz.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -119,7 +123,7 @@ unsigned char test_ktestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
unsigned char test_ktestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestc_mask8_u8
+ // CHECK-LABEL: test_ktestc_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -129,7 +133,7 @@ unsigned char test_ktestc_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
unsigned char test_ktest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_ktest_mask8_u8
+ // CHECK-LABEL: test_ktest_mask8_u8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -143,7 +147,7 @@ unsigned char test_ktest_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i
}
unsigned char test_ktestz_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestz_mask16_u8
+ // CHECK-LABEL: test_ktestz_mask16_u8
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestz.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -153,7 +157,7 @@ unsigned char test_ktestz_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktestc_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
- // CHECK-LABEL: @test_ktestc_mask16_u8
+ // CHECK-LABEL: test_ktestc_mask16_u8
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -163,7 +167,7 @@ unsigned char test_ktestc_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m51
}
unsigned char test_ktest_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, unsigned char *CF) {
- // CHECK-LABEL: @test_ktest_mask16_u8
+ // CHECK-LABEL: test_ktest_mask16_u8
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call i32 @llvm.x86.avx512.ktestc.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -177,7 +181,7 @@ unsigned char test_ktest_mask16_u8(__m512i __A, __m512i __B, __m512i __C, __m512
}
__mmask8 test_kadd_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kadd_mask8
+ // CHECK-LABEL: test_kadd_mask8
// CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = call <8 x i1> @llvm.x86.avx512.kadd.b(<8 x i1> [[LHS]], <8 x i1> [[RHS]])
@@ -187,7 +191,7 @@ __mmask8 test_kadd_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
}
__mmask16 test_kadd_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
- // CHECK-LABEL: @test_kadd_mask16
+ // CHECK-LABEL: test_kadd_mask16
// CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: [[RES:%.*]] = call <16 x i1> @llvm.x86.avx512.kadd.w(<16 x i1> [[LHS]], <16 x i1> [[RHS]])
@@ -197,79 +201,80 @@ __mmask16 test_kadd_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
}
__mmask8 test_kshiftli_mask8(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftli_mask8
+ // CHECK-LABEL: test_kshiftli_mask8
// CHECK: [[VAL:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = shufflevector <8 x i1> zeroinitializer, <8 x i1> [[VAL]], <8 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13>
return _mm512_mask_cmpneq_epu64_mask(_kshiftli_mask8(_mm512_cmpneq_epu64_mask(A, B), 2), C, D);
}
__mmask8 test_kshiftri_mask8(__m512i A, __m512i B, __m512i C, __m512i D) {
- // CHECK-LABEL: @test_kshiftri_mask8
+ // CHECK-LABEL: test_kshiftri_mask8
// CHECK: [[VAL:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: [[RES:%.*]] = shufflevector <8 x i1> [[VAL]], <8 x i1> zeroinitializer, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
return _mm512_mask_cmpneq_epu64_mask(_kshiftri_mask8(_mm512_cmpneq_epu64_mask(A, B), 2), C, D);
}
unsigned int test_cvtmask8_u32(__m512i A, __m512i B) {
- // CHECK-LABEL: @test_cvtmask8_u32
+ // CHECK-LABEL: test_cvtmask8_u32
// CHECK: zext i8 %{{.*}} to i32
return _cvtmask8_u32(_mm512_cmpneq_epu64_mask(A, B));
}
__mmask8 test_cvtu32_mask8(__m512i A, __m512i B, unsigned int C) {
- // CHECK-LABEL: @test_cvtu32_mask8
+ // CHECK-LABEL: test_cvtu32_mask8
// CHECK: trunc i32 %{{.*}} to i8
return _mm512_mask_cmpneq_epu64_mask(_cvtu32_mask8(C), A, B);
}
__mmask8 test_load_mask8(__mmask8 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_load_mask8
+ // CHECK-LABEL: test_load_mask8
// CHECK: [[LOAD:%.*]] = load i8, ptr %{{.*}}
return _mm512_mask_cmpneq_epu64_mask(_load_mask8(A), B, C);
}
void test_store_mask8(__mmask8 *A, __m512i B, __m512i C) {
- // CHECK-LABEL: @test_store_mask8
+ // CHECK-LABEL: test_store_mask8
// CHECK: store i8 %{{.*}}, ptr %{{.*}}
_store_mask8(A, _mm512_cmpneq_epu64_mask(B, C));
}
__m512i test_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mullo_epi64
+ // CHECK-LABEL: test_mm512_mullo_epi64
// CHECK: mul <8 x i64>
return (__m512i) _mm512_mullo_epi64(__A, __B);
}
__m512i test_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_mask_mullo_epi64
+ // CHECK-LABEL: test_mm512_mask_mullo_epi64
// CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return (__m512i) _mm512_mask_mullo_epi64(__W, __U, __A, __B);
}
__m512i test_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
- // CHECK-LABEL: @test_mm512_maskz_mullo_epi64
+ // CHECK-LABEL: test_mm512_maskz_mullo_epi64
// CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return (__m512i) _mm512_maskz_mullo_epi64(__U, __A, __B);
}
__m512d test_mm512_xor_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_xor_pd
+ // CHECK-LABEL: test_mm512_xor_pd
// CHECK: xor <8 x i64>
return (__m512d) _mm512_xor_pd(__A, __B);
}
__m512d test_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_xor_pd
+ // CHECK-LABEL: test_mm512_mask_xor_pd
// CHECK: xor <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
return (__m512d) _mm512_mask_xor_pd(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_xor_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), -4.0, +5.0, -6.0, +0.0, +0.0, -6.0, +5.0, -4.0));
__m512d test_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_xor_pd
+ // CHECK-LABEL: test_mm512_maskz_xor_pd
// CHECK: xor <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -277,13 +282,14 @@ __m512d test_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_xor_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_xor_ps
+ // CHECK-LABEL: test_mm512_xor_ps
// CHECK: xor <16 x i32>
return (__m512) _mm512_xor_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_xor_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, +5.0f, -6.0f, +0.0f, +0.0f, -6.0f, +5.0f, -4.0f, -4.0f, +5.0f, -6.0f, +0.0f, +0.0f, -6.0f, +5.0f, -4.0f));
__m512 test_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_xor_ps
+ // CHECK-LABEL: test_mm512_mask_xor_ps
// CHECK: xor <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -291,7 +297,7 @@ __m512 test_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B
}
__m512 test_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_xor_ps
+ // CHECK-LABEL: test_mm512_maskz_xor_ps
// CHECK: xor <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -299,13 +305,14 @@ __m512 test_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_or_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_or_pd
+ // CHECK-LABEL: test_mm512_or_pd
// CHECK: or <8 x i64>
return (__m512d) _mm512_or_pd(__A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_or_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), -4.0, -5.0, -6.0, +7.0, +7.0, -6.0, -5.0, -4.0));
__m512d test_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_or_pd
+ // CHECK-LABEL: test_mm512_mask_or_pd
// CHECK: or <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -313,7 +320,7 @@ __m512d test_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d _
}
__m512d test_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_or_pd
+ // CHECK-LABEL: test_mm512_maskz_or_pd
// CHECK: or <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -321,13 +328,14 @@ __m512d test_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_or_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_or_ps
+ // CHECK-LABEL: test_mm512_or_ps
// CHECK: or <16 x i32>
return (__m512) _mm512_or_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_or_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, -4.0f, -4.0f, -5.0f, -6.0f, +7.0f, +7.0f, -6.0f, -5.0f, -4.0f));
__m512 test_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_or_ps
+ // CHECK-LABEL: test_mm512_mask_or_ps
// CHECK: or <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -335,7 +343,7 @@ __m512 test_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
}
__m512 test_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_or_ps
+ // CHECK-LABEL: test_mm512_maskz_or_ps
// CHECK: or <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -343,13 +351,14 @@ __m512 test_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_and_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_and_pd
+ // CHECK-LABEL: test_mm512_and_pd
// CHECK: and <8 x i64>
return (__m512d) _mm512_and_pd(__A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_and_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), -0.0, -0.0, +0.0, +7.0, +7.0, +0.0, -0.0, -0.0));
__m512d test_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_and_pd
+ // CHECK-LABEL: test_mm512_mask_and_pd
// CHECK: and <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -357,7 +366,7 @@ __m512d test_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d
}
__m512d test_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_and_pd
+ // CHECK-LABEL: test_mm512_maskz_and_pd
// CHECK: and <8 x i64>
// CHECK: %[[MASK:.*]] = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: select <8 x i1> %[[MASK]], <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -365,13 +374,14 @@ __m512d test_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_and_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_and_ps
+ // CHECK-LABEL: test_mm512_and_ps
// CHECK: and <16 x i32>
return (__m512) _mm512_and_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_and_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), -0.0f, -0.0f, +0.0f, +7.0f, +7.0f, +0.0f, -0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +7.0f, +7.0f, +0.0f, -0.0f, -0.0f));
__m512 test_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_and_ps
+ // CHECK-LABEL: test_mm512_mask_and_ps
// CHECK: and <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -379,7 +389,7 @@ __m512 test_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B
}
__m512 test_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_and_ps
+ // CHECK-LABEL: test_mm512_maskz_and_ps
// CHECK: and <16 x i32>
// CHECK: %[[MASK:.*]] = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: select <16 x i1> %[[MASK]], <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -387,14 +397,15 @@ __m512 test_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_andnot_pd (__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_andnot_pd
+ // CHECK-LABEL: test_mm512_andnot_pd
// CHECK: xor <8 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <8 x i64>
return (__m512d) _mm512_andnot_pd(__A, __B);
}
+TEST_CONSTEXPR(match_m512d(_mm512_andnot_pd((__m512d){-4.0, -5.0, +6.0, +7.0, +7.0, +6.0, -5.0, -4.0}, (__m512d){+0.0, -0.0, -0.0, +7.0, +7.0, -0.0, -0.0, +0.0}), +0.0, +0.0, +0.0, +0.0, +0.0, +0.0, +0.0, +0.0));
__m512d test_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_andnot_pd
+ // CHECK-LABEL: test_mm512_mask_andnot_pd
// CHECK: xor <8 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -402,7 +413,7 @@ __m512d test_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m51
}
__m512d test_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_andnot_pd
+ // CHECK-LABEL: test_mm512_maskz_andnot_pd
// CHECK: xor <8 x i64> %{{.*}}, splat (i64 -1)
// CHECK: and <8 x i64> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
@@ -410,14 +421,15 @@ __m512d test_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_andnot_ps (__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_andnot_ps
+ // CHECK-LABEL: test_mm512_andnot_ps
// CHECK: xor <16 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <16 x i32>
return (__m512) _mm512_andnot_ps(__A, __B);
}
+TEST_CONSTEXPR(match_m512(_mm512_andnot_ps((__m512){-4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f, -4.0f, -5.0f, +6.0f, +7.0f, +7.0f, +6.0f, -5.0f, -4.0f}, (__m512){+0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f, +0.0f, -0.0f, -0.0f, +7.0f, +7.0f, -0.0f, -0.0f, +0.0f}), +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f));
__m512 test_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_andnot_ps
+ // CHECK-LABEL: test_mm512_mask_andnot_ps
// CHECK: xor <16 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -425,7 +437,7 @@ __m512 test_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512
}
__m512 test_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_andnot_ps
+ // CHECK-LABEL: test_mm512_maskz_andnot_ps
// CHECK: xor <16 x i32> %{{.*}}, splat (i32 -1)
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
@@ -433,491 +445,491 @@ __m512 test_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
}
__m512i test_mm512_cvtpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtpd_epi64
+ // CHECK-LABEL: test_mm512_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_cvtpd_epi64(__A);
}
__m512i test_mm512_mask_cvtpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_mask_cvtpd_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_maskz_cvtpd_epi64(__U, __A);
}
__m512i test_mm512_cvt_roundpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_cvt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_cvt_roundpd_epi64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_mask_cvt_roundpd_epi64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvtpd2qq.512
return _mm512_maskz_cvt_roundpd_epi64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvtpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtpd_epu64
+ // CHECK-LABEL: test_mm512_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_cvtpd_epu64(__A);
}
__m512i test_mm512_mask_cvtpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_mask_cvtpd_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_maskz_cvtpd_epu64(__U, __A);
}
__m512i test_mm512_cvt_roundpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_cvt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_cvt_roundpd_epu64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_mask_cvt_roundpd_epu64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvtpd2uqq.512
return _mm512_maskz_cvt_roundpd_epu64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvtps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtps_epi64
+ // CHECK-LABEL: test_mm512_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_cvtps_epi64(__A);
}
__m512i test_mm512_mask_cvtps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_mask_cvtps_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_maskz_cvtps_epi64(__U, __A);
}
__m512i test_mm512_cvt_roundps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundps_epi64
+ // CHECK-LABEL: test_mm512_cvt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_cvt_roundps_epi64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_mask_cvt_roundps_epi64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvtps2qq.512
return _mm512_maskz_cvt_roundps_epi64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvtps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtps_epu64
+ // CHECK-LABEL: test_mm512_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_cvtps_epu64(__A);
}
__m512i test_mm512_mask_cvtps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_mask_cvtps_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvtps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_maskz_cvtps_epu64(__U, __A);
}
__m512i test_mm512_cvt_roundps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundps_epu64
+ // CHECK-LABEL: test_mm512_cvt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_cvt_roundps_epu64(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvt_roundps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_mask_cvt_roundps_epu64(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvt_roundps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvtps2uqq.512
return _mm512_maskz_cvt_roundps_epu64(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_cvtepi64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepi64_pd
+ // CHECK-LABEL: test_mm512_cvtepi64_pd
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x double>
return _mm512_cvtepi64_pd(__A);
}
__m512d test_mm512_mask_cvtepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepi64_pd
+ // CHECK-LABEL: test_mm512_mask_cvtepi64_pd
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvtepi64_pd(__W, __U, __A);
}
__m512d test_mm512_maskz_cvtepi64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepi64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvtepi64_pd
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvtepi64_pd(__U, __A);
}
__m512d test_mm512_cvt_roundepi64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepi64_pd
+ // CHECK-LABEL: test_mm512_cvt_roundepi64_pd
// CHECK: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
return _mm512_cvt_roundepi64_pd(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_cvt_roundepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepi64_pd
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepi64_pd
// CHECK: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_cvt_roundepi64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepi64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepi64_pd
// CHECK: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvt_roundepi64_pd(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_cvtepi64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepi64_ps
+ // CHECK-LABEL: test_mm512_cvtepi64_ps
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x float>
return _mm512_cvtepi64_ps(__A);
}
__m256 test_mm512_mask_cvtepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepi64_ps
+ // CHECK-LABEL: test_mm512_mask_cvtepi64_ps
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvtepi64_ps(__W, __U, __A);
}
__m256 test_mm512_maskz_cvtepi64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepi64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvtepi64_ps
// CHECK: sitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvtepi64_ps(__U, __A);
}
__m256 test_mm512_cvt_roundepi64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepi64_ps
+ // CHECK-LABEL: test_mm512_cvt_roundepi64_ps
// CHECK: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
return _mm512_cvt_roundepi64_ps(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_mask_cvt_roundepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepi64_ps
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepi64_ps
// CHECK: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_maskz_cvt_roundepi64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepi64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepi64_ps
// CHECK: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvt_roundepi64_ps(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvttpd_epi64
+ // CHECK-LABEL: test_mm512_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_cvttpd_epi64(__A);
}
__m512i test_mm512_mask_cvttpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_mask_cvttpd_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvttpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_maskz_cvttpd_epi64(__U, __A);
}
__m512i test_mm512_cvtt_roundpd_epi64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_cvtt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_cvtt_roundpd_epi64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundpd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_mask_cvtt_roundpd_epi64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundpd_epi64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundpd_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundpd_epi64
// CHECK: @llvm.x86.avx512.mask.cvttpd2qq.512
return _mm512_maskz_cvtt_roundpd_epi64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvttpd_epu64
+ // CHECK-LABEL: test_mm512_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_cvttpd_epu64(__A);
}
__m512i test_mm512_mask_cvttpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_mask_cvttpd_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvttpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_maskz_cvttpd_epu64(__U, __A);
}
__m512i test_mm512_cvtt_roundpd_epu64(__m512d __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_cvtt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_cvtt_roundpd_epu64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundpd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_mask_cvtt_roundpd_epu64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundpd_epu64(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundpd_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundpd_epu64
// CHECK: @llvm.x86.avx512.mask.cvttpd2uqq.512
return _mm512_maskz_cvtt_roundpd_epu64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvttps_epi64
+ // CHECK-LABEL: test_mm512_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_cvttps_epi64(__A);
}
__m512i test_mm512_mask_cvttps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_mask_cvttps_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvttps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_maskz_cvttps_epi64(__U, __A);
}
__m512i test_mm512_cvtt_roundps_epi64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundps_epi64
+ // CHECK-LABEL: test_mm512_cvtt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_cvtt_roundps_epi64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundps_epi64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_mask_cvtt_roundps_epi64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundps_epi64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundps_epi64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundps_epi64
// CHECK: @llvm.x86.avx512.mask.cvttps2qq.512
return _mm512_maskz_cvtt_roundps_epi64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_cvttps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvttps_epu64
+ // CHECK-LABEL: test_mm512_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_cvttps_epu64(__A);
}
__m512i test_mm512_mask_cvttps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvttps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_mask_cvttps_epu64(__W, __U, __A);
}
__m512i test_mm512_maskz_cvttps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvttps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvttps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_maskz_cvttps_epu64(__U, __A);
}
__m512i test_mm512_cvtt_roundps_epu64(__m256 __A) {
- // CHECK-LABEL: @test_mm512_cvtt_roundps_epu64
+ // CHECK-LABEL: test_mm512_cvtt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_cvtt_roundps_epu64(__A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_mask_cvtt_roundps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtt_roundps_epu64
+ // CHECK-LABEL: test_mm512_mask_cvtt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_mask_cvtt_roundps_epu64(__W, __U, __A, _MM_FROUND_NO_EXC);
}
__m512i test_mm512_maskz_cvtt_roundps_epu64(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtt_roundps_epu64
+ // CHECK-LABEL: test_mm512_maskz_cvtt_roundps_epu64
// CHECK: @llvm.x86.avx512.mask.cvttps2uqq.512
return _mm512_maskz_cvtt_roundps_epu64(__U, __A, _MM_FROUND_NO_EXC);
}
__m512d test_mm512_cvtepu64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepu64_pd
+ // CHECK-LABEL: test_mm512_cvtepu64_pd
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x double>
return _mm512_cvtepu64_pd(__A);
}
__m512d test_mm512_mask_cvtepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepu64_pd
+ // CHECK-LABEL: test_mm512_mask_cvtepu64_pd
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvtepu64_pd(__W, __U, __A);
}
__m512d test_mm512_maskz_cvtepu64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepu64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvtepu64_pd
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvtepu64_pd(__U, __A);
}
__m512d test_mm512_cvt_roundepu64_pd(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepu64_pd
+ // CHECK-LABEL: test_mm512_cvt_roundepu64_pd
// CHECK: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
return _mm512_cvt_roundepu64_pd(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_cvt_roundepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepu64_pd
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepu64_pd
// CHECK: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_cvt_roundepu64_pd(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepu64_pd
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepu64_pd
// CHECK: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_cvt_roundepu64_pd(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_cvtepu64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvtepu64_ps
+ // CHECK-LABEL: test_mm512_cvtepu64_ps
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x float>
return _mm512_cvtepu64_ps(__A);
}
__m256 test_mm512_mask_cvtepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvtepu64_ps
+ // CHECK-LABEL: test_mm512_mask_cvtepu64_ps
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvtepu64_ps(__W, __U, __A);
}
__m256 test_mm512_maskz_cvtepu64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvtepu64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvtepu64_ps
// CHECK: uitofp <8 x i64> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvtepu64_ps(__U, __A);
}
__m256 test_mm512_cvt_roundepu64_ps(__m512i __A) {
- // CHECK-LABEL: @test_mm512_cvt_roundepu64_ps
+ // CHECK-LABEL: test_mm512_cvt_roundepu64_ps
// CHECK: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
return _mm512_cvt_roundepu64_ps(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_mask_cvt_roundepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_cvt_roundepu64_ps
+ // CHECK-LABEL: test_mm512_mask_cvt_roundepu64_ps
// CHECK: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m256 test_mm512_maskz_cvt_roundepu64_ps(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_cvt_roundepu64_ps
+ // CHECK-LABEL: test_mm512_maskz_cvt_roundepu64_ps
// CHECK: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_cvt_roundepu64_ps(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_range_pd(__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_range_pd
+ // CHECK-LABEL: test_mm512_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_range_pd(__A, __B, 4);
}
__m512d test_mm512_mask_range_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_range_pd
+ // CHECK-LABEL: test_mm512_mask_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_mask_range_pd(__W, __U, __A, __B, 4);
}
__m512d test_mm512_maskz_range_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_pd
+ // CHECK-LABEL: test_mm512_maskz_range_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_maskz_range_pd(__U, __A, __B, 4);
}
__m512d test_mm512_range_round_pd(__m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_range_round_pd
+ // CHECK-LABEL: test_mm512_range_round_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_range_round_pd(__A, __B, 4, 8);
}
__m512d test_mm512_mask_range_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_mask_range_round_pd
+ // CHECK-LABEL: test_mm512_mask_range_round_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_mask_range_round_pd(__W, __U, __A, __B, 4, 8);
}
__m512d test_mm512_maskz_range_round_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_pd
+ // CHECK-LABEL: test_mm512_maskz_range_round_pd
// CHECK: @llvm.x86.avx512.mask.range.pd.512
return _mm512_maskz_range_round_pd(__U, __A, __B, 4, 8);
}
__m128d test_mm512_range_round_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_range_round_sd
+ // CHECK-LABEL: test_mm512_range_round_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_range_round_sd(__A, __B, 4, 8);
}
@@ -929,31 +941,31 @@ __m128d test_mm512_mask_range_round_sd(__m128d __W, __mmask8 __U, __m128d __A, _
}
__m128d test_mm512_maskz_range_round_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_sd
+ // CHECK-LABEL: test_mm512_maskz_range_round_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_maskz_range_round_sd(__U, __A, __B, 4, 8);
}
__m128 test_mm512_range_round_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm512_range_round_ss
+ // CHECK-LABEL: test_mm512_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_range_round_ss(__A, __B, 4, 8);
}
__m128 test_mm512_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm512_mask_range_round_ss
+ // CHECK-LABEL: test_mm512_mask_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_mask_range_round_ss(__W, __U, __A, __B, 4, 8);
}
__m128 test_mm512_maskz_range_round_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_ss
+ // CHECK-LABEL: test_mm512_maskz_range_round_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_maskz_range_round_ss(__U, __A, __B, 4, 8);
}
__m128d test_mm_range_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_range_sd
+ // CHECK-LABEL: test_mm_range_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_range_sd(__A, __B, 4);
}
@@ -965,558 +977,558 @@ __m128d test_mm_mask_range_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __
}
__m128d test_mm_maskz_range_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_range_sd
+ // CHECK-LABEL: test_mm_maskz_range_sd
// CHECK: @llvm.x86.avx512.mask.range.sd
return _mm_maskz_range_sd(__U, __A, __B, 4);
}
__m128 test_mm_range_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_range_ss
+ // CHECK-LABEL: test_mm_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_range_ss(__A, __B, 4);
}
__m128 test_mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_range_ss
+ // CHECK-LABEL: test_mm_mask_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_mask_range_ss(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_range_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_range_ss
+ // CHECK-LABEL: test_mm_maskz_range_ss
// CHECK: @llvm.x86.avx512.mask.range.ss
return _mm_maskz_range_ss(__U, __A, __B, 4);
}
__m512 test_mm512_range_ps(__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_range_ps
+ // CHECK-LABEL: test_mm512_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_range_ps(__A, __B, 4);
}
__m512 test_mm512_mask_range_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_range_ps
+ // CHECK-LABEL: test_mm512_mask_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_mask_range_ps(__W, __U, __A, __B, 4);
}
__m512 test_mm512_maskz_range_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_ps
+ // CHECK-LABEL: test_mm512_maskz_range_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_maskz_range_ps(__U, __A, __B, 4);
}
__m512 test_mm512_range_round_ps(__m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_range_round_ps
+ // CHECK-LABEL: test_mm512_range_round_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_range_round_ps(__A, __B, 4, 8);
}
__m512 test_mm512_mask_range_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_mask_range_round_ps
+ // CHECK-LABEL: test_mm512_mask_range_round_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_mask_range_round_ps(__W, __U, __A, __B, 4, 8);
}
__m512 test_mm512_maskz_range_round_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- // CHECK-LABEL: @test_mm512_maskz_range_round_ps
+ // CHECK-LABEL: test_mm512_maskz_range_round_ps
// CHECK: @llvm.x86.avx512.mask.range.ps.512
return _mm512_maskz_range_round_ps(__U, __A, __B, 4, 8);
}
__m512d test_mm512_reduce_pd(__m512d __A) {
- // CHECK-LABEL: @test_mm512_reduce_pd
+ // CHECK-LABEL: test_mm512_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_reduce_pd(__A, 4);
}
__m512d test_mm512_mask_reduce_pd(__m512d __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_pd
+ // CHECK-LABEL: test_mm512_mask_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_mask_reduce_pd(__W, __U, __A, 4);
}
__m512d test_mm512_maskz_reduce_pd(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_pd
+ // CHECK-LABEL: test_mm512_maskz_reduce_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_maskz_reduce_pd(__U, __A, 4);
}
__m512 test_mm512_reduce_ps(__m512 __A) {
- // CHECK-LABEL: @test_mm512_reduce_ps
+ // CHECK-LABEL: test_mm512_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_reduce_ps(__A, 4);
}
__m512 test_mm512_mask_reduce_ps(__m512 __W, __mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_ps
+ // CHECK-LABEL: test_mm512_mask_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_mask_reduce_ps(__W, __U, __A, 4);
}
__m512 test_mm512_maskz_reduce_ps(__mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_ps
+ // CHECK-LABEL: test_mm512_maskz_reduce_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_maskz_reduce_ps(__U, __A, 4);
}
__m512d test_mm512_reduce_round_pd(__m512d __A) {
- // CHECK-LABEL: @test_mm512_reduce_round_pd
+ // CHECK-LABEL: test_mm512_reduce_round_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_reduce_round_pd(__A, 4, 8);
}
__m512d test_mm512_mask_reduce_round_pd(__m512d __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_round_pd
+ // CHECK-LABEL: test_mm512_mask_reduce_round_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_mask_reduce_round_pd(__W, __U, __A, 4, 8);
}
__m512d test_mm512_maskz_reduce_round_pd(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_round_pd
+ // CHECK-LABEL: test_mm512_maskz_reduce_round_pd
// CHECK: @llvm.x86.avx512.mask.reduce.pd.512
return _mm512_maskz_reduce_round_pd(__U, __A, 4, 8);
}
__m512 test_mm512_reduce_round_ps(__m512 __A) {
- // CHECK-LABEL: @test_mm512_reduce_round_ps
+ // CHECK-LABEL: test_mm512_reduce_round_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_reduce_round_ps(__A, 4, 8);
}
__m512 test_mm512_mask_reduce_round_ps(__m512 __W, __mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_reduce_round_ps
+ // CHECK-LABEL: test_mm512_mask_reduce_round_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_mask_reduce_round_ps(__W, __U, __A, 4, 8);
}
__m512 test_mm512_maskz_reduce_round_ps(__mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_maskz_reduce_round_ps
+ // CHECK-LABEL: test_mm512_maskz_reduce_round_ps
// CHECK: @llvm.x86.avx512.mask.reduce.ps.512
return _mm512_maskz_reduce_round_ps(__U, __A, 4, 8);
}
__m128 test_mm_reduce_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_reduce_ss
+ // CHECK-LABEL: test_mm_reduce_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_reduce_ss(__A, __B, 4);
}
__m128 test_mm_mask_reduce_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_ss
+ // CHECK-LABEL: test_mm_mask_reduce_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_mask_reduce_ss(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_reduce_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_ss
+ // CHECK-LABEL: test_mm_maskz_reduce_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_maskz_reduce_ss(__U, __A, __B, 4);
}
__m128 test_mm_reduce_round_ss(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_reduce_round_ss
+ // CHECK-LABEL: test_mm_reduce_round_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_reduce_round_ss(__A, __B, 4, 8);
}
__m128 test_mm_mask_reduce_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_round_ss
+ // CHECK-LABEL: test_mm_mask_reduce_round_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_mask_reduce_round_ss(__W, __U, __A, __B, 4, 8);
}
__m128 test_mm_maskz_reduce_round_ss(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_round_ss
+ // CHECK-LABEL: test_mm_maskz_reduce_round_ss
// CHECK: @llvm.x86.avx512.mask.reduce.ss
return _mm_maskz_reduce_round_ss(__U, __A, __B, 4, 8);
}
__m128d test_mm_reduce_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_reduce_sd
+ // CHECK-LABEL: test_mm_reduce_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_reduce_sd(__A, __B, 4);
}
__m128d test_mm_mask_reduce_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_sd
+ // CHECK-LABEL: test_mm_mask_reduce_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_mask_reduce_sd(__W, __U, __A, __B, 4);
}
__m128d test_mm_maskz_reduce_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_sd
+ // CHECK-LABEL: test_mm_maskz_reduce_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_maskz_reduce_sd(__U, __A, __B, 4);
}
__m128d test_mm_reduce_round_sd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_reduce_round_sd
+ // CHECK-LABEL: test_mm_reduce_round_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_reduce_round_sd(__A, __B, 4, 8);
}
__m128d test_mm_mask_reduce_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_reduce_round_sd
+ // CHECK-LABEL: test_mm_mask_reduce_round_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_mask_reduce_round_sd(__W, __U, __A, __B, 4, 8);
}
__m128d test_mm_maskz_reduce_round_sd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_reduce_round_sd
+ // CHECK-LABEL: test_mm_maskz_reduce_round_sd
// CHECK: @llvm.x86.avx512.mask.reduce.sd
return _mm_maskz_reduce_round_sd(__U, __A, __B, 4, 8);
}
__mmask16 test_mm512_movepi32_mask(__m512i __A) {
- // CHECK-LABEL: @test_mm512_movepi32_mask
+ // CHECK-LABEL: test_mm512_movepi32_mask
// CHECK: [[CMP:%.*]] = icmp slt <16 x i32> %{{.*}}, zeroinitializer
return _mm512_movepi32_mask(__A);
}
__m512i test_mm512_movm_epi32(__mmask16 __A) {
- // CHECK-LABEL: @test_mm512_movm_epi32
+ // CHECK-LABEL: test_mm512_movm_epi32
// CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
// CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i32>
return _mm512_movm_epi32(__A);
}
__m512i test_mm512_movm_epi64(__mmask8 __A) {
- // CHECK-LABEL: @test_mm512_movm_epi64
+ // CHECK-LABEL: test_mm512_movm_epi64
// CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
// CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i64>
return _mm512_movm_epi64(__A);
}
__mmask8 test_mm512_movepi64_mask(__m512i __A) {
- // CHECK-LABEL: @test_mm512_movepi64_mask
+ // CHECK-LABEL: test_mm512_movepi64_mask
// CHECK: [[CMP:%.*]] = icmp slt <8 x i64> %{{.*}}, zeroinitializer
return _mm512_movepi64_mask(__A);
}
__m512 test_mm512_broadcast_f32x2(__m128 __A) {
- // CHECK-LABEL: @test_mm512_broadcast_f32x2
+ // CHECK-LABEL: test_mm512_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_f32x2(__A);
}
__m512 test_mm512_mask_broadcast_f32x2(__m512 __O, __mmask16 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_f32x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcast_f32x2(__O, __M, __A);
}
__m512 test_mm512_maskz_broadcast_f32x2(__mmask16 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_f32x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcast_f32x2(__M, __A);
}
__m512 test_mm512_broadcast_f32x8(float const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_f32x8
+ // CHECK-LABEL: test_mm512_broadcast_f32x8
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm512_broadcast_f32x8(_mm256_loadu_ps(__A));
}
__m512 test_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, float const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_f32x8
+ // CHECK-LABEL: test_mm512_mask_broadcast_f32x8
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcast_f32x8(__O, __M, _mm256_loadu_ps(__A));
}
__m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_f32x8
+ // CHECK-LABEL: test_mm512_maskz_broadcast_f32x8
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcast_f32x8(__M, _mm256_loadu_ps(__A));
}
__m512d test_mm512_broadcast_f64x2(double const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_f64x2
+ // CHECK-LABEL: test_mm512_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_f64x2(_mm_loadu_pd(__A));
}
__m512d test_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, double const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_f64x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
__m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_f64x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
__m512i test_mm512_broadcast_i32x2(__m128i __A) {
- // CHECK-LABEL: @test_mm512_broadcast_i32x2
+ // CHECK-LABEL: test_mm512_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_i32x2(__A);
}
__m512i test_mm512_mask_broadcast_i32x2(__m512i __O, __mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_i32x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcast_i32x2(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_i32x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_broadcast_i32x2(__M, __A);
}
__m512i test_mm512_broadcast_i32x8(__m256i const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_i32x8
+ // CHECK-LABEL: test_mm512_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm512_broadcast_i32x8(_mm256_loadu_si256(__A));
}
__m512i test_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_i32x8
+ // CHECK-LABEL: test_mm512_mask_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcast_i32x8(__O, __M, _mm256_loadu_si256(__A));
}
__m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_i32x8
+ // CHECK-LABEL: test_mm512_maskz_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_broadcast_i32x8(__M, _mm256_loadu_si256(__A));
}
__m512i test_mm512_broadcast_i64x2(__m128i const* __A) {
- // CHECK-LABEL: @test_mm512_broadcast_i64x2
+ // CHECK-LABEL: test_mm512_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_i64x2(_mm_loadu_si128(__A));
}
__m512i test_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm512_mask_broadcast_i64x2
+ // CHECK-LABEL: test_mm512_mask_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_broadcast_i64x2(__O, __M, _mm_loadu_si128(__A));
}
__m512i test_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm512_maskz_broadcast_i64x2
+ // CHECK-LABEL: test_mm512_maskz_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A));
}
__m256 test_mm512_extractf32x8_ps(__m512 __A) {
- // CHECK-LABEL: @test_mm512_extractf32x8_ps
+ // CHECK-LABEL: test_mm512_extractf32x8_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extractf32x8_ps(__A, 1);
}
__m256 test_mm512_mask_extractf32x8_ps(__m256 __W, __mmask8 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_extractf32x8_ps
+ // CHECK-LABEL: test_mm512_mask_extractf32x8_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_extractf32x8_ps(__W, __U, __A, 1);
}
__m256 test_mm512_maskz_extractf32x8_ps(__mmask8 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_maskz_extractf32x8_ps
+ // CHECK-LABEL: test_mm512_maskz_extractf32x8_ps
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_extractf32x8_ps(__U, __A, 1);
}
__m128d test_mm512_extractf64x2_pd(__m512d __A) {
- // CHECK-LABEL: @test_mm512_extractf64x2_pd
+ // CHECK-LABEL: test_mm512_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
return _mm512_extractf64x2_pd(__A, 3);
}
__m128d test_mm512_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_extractf64x2_pd
+ // CHECK-LABEL: test_mm512_mask_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_mask_extractf64x2_pd(__W, __U, __A, 3);
}
__m128d test_mm512_maskz_extractf64x2_pd(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_maskz_extractf64x2_pd
+ // CHECK-LABEL: test_mm512_maskz_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_maskz_extractf64x2_pd(__U, __A, 3);
}
__m256i test_mm512_extracti32x8_epi32(__m512i __A) {
- // CHECK-LABEL: @test_mm512_extracti32x8_epi32
+ // CHECK-LABEL: test_mm512_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extracti32x8_epi32(__A, 1);
}
__m256i test_mm512_mask_extracti32x8_epi32(__m256i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_extracti32x8_epi32
+ // CHECK-LABEL: test_mm512_mask_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_mask_extracti32x8_epi32(__W, __U, __A, 1);
}
__m256i test_mm512_maskz_extracti32x8_epi32(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_extracti32x8_epi32
+ // CHECK-LABEL: test_mm512_maskz_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_maskz_extracti32x8_epi32(__U, __A, 1);
}
__m128i test_mm512_extracti64x2_epi64(__m512i __A) {
- // CHECK-LABEL: @test_mm512_extracti64x2_epi64
+ // CHECK-LABEL: test_mm512_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
return _mm512_extracti64x2_epi64(__A, 3);
}
__m128i test_mm512_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_extracti64x2_epi64
+ // CHECK-LABEL: test_mm512_mask_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_mask_extracti64x2_epi64(__W, __U, __A, 3);
}
__m128i test_mm512_maskz_extracti64x2_epi64(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_extracti64x2_epi64
+ // CHECK-LABEL: test_mm512_maskz_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_maskz_extracti64x2_epi64(__U, __A, 3);
}
__m512 test_mm512_insertf32x8(__m512 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm512_insertf32x8
+ // CHECK-LABEL: test_mm512_insertf32x8
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
return _mm512_insertf32x8(__A, __B, 1);
}
__m512 test_mm512_mask_insertf32x8(__m512 __W, __mmask16 __U, __m512 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm512_mask_insertf32x8
+ // CHECK-LABEL: test_mm512_mask_insertf32x8
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_insertf32x8(__W, __U, __A, __B, 1);
}
__m512 test_mm512_maskz_insertf32x8(__mmask16 __U, __m512 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm512_maskz_insertf32x8
+ // CHECK-LABEL: test_mm512_maskz_insertf32x8
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_insertf32x8(__U, __A, __B, 1);
}
__m512d test_mm512_insertf64x2(__m512d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_insertf64x2
+ // CHECK-LABEL: test_mm512_insertf64x2
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
return _mm512_insertf64x2(__A, __B, 3);
}
__m512d test_mm512_mask_insertf64x2(__m512d __W, __mmask8 __U, __m512d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_mask_insertf64x2
+ // CHECK-LABEL: test_mm512_mask_insertf64x2
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_insertf64x2(__W, __U, __A, __B, 3);
}
__m512d test_mm512_maskz_insertf64x2(__mmask8 __U, __m512d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm512_maskz_insertf64x2
+ // CHECK-LABEL: test_mm512_maskz_insertf64x2
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_insertf64x2(__U, __A, __B, 3);
}
__m512i test_mm512_inserti32x8(__m512i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm512_inserti32x8
+ // CHECK-LABEL: test_mm512_inserti32x8
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
return _mm512_inserti32x8(__A, __B, 1);
}
__m512i test_mm512_mask_inserti32x8(__m512i __W, __mmask16 __U, __m512i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm512_mask_inserti32x8
+ // CHECK-LABEL: test_mm512_mask_inserti32x8
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_inserti32x8(__W, __U, __A, __B, 1);
}
__m512i test_mm512_maskz_inserti32x8(__mmask16 __U, __m512i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm512_maskz_inserti32x8
+ // CHECK-LABEL: test_mm512_maskz_inserti32x8
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_inserti32x8(__U, __A, __B, 1);
}
__m512i test_mm512_inserti64x2(__m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_inserti64x2
+ // CHECK-LABEL: test_mm512_inserti64x2
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
return _mm512_inserti64x2(__A, __B, 1);
}
__m512i test_mm512_mask_inserti64x2(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_mask_inserti64x2
+ // CHECK-LABEL: test_mm512_mask_inserti64x2
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_inserti64x2(__W, __U, __A, __B, 1);
}
__m512i test_mm512_maskz_inserti64x2(__mmask8 __U, __m512i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm512_maskz_inserti64x2
+ // CHECK-LABEL: test_mm512_maskz_inserti64x2
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_inserti64x2(__U, __A, __B, 1);
}
__mmask8 test_mm512_mask_fpclass_pd_mask(__mmask8 __U, __m512d __A) {
- // CHECK-LABEL: @test_mm512_mask_fpclass_pd_mask
+ // CHECK-LABEL: test_mm512_mask_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.512
return _mm512_mask_fpclass_pd_mask(__U, __A, 4);
}
__mmask8 test_mm512_fpclass_pd_mask(__m512d __A) {
- // CHECK-LABEL: @test_mm512_fpclass_pd_mask
+ // CHECK-LABEL: test_mm512_fpclass_pd_mask
// CHECK: @llvm.x86.avx512.fpclass.pd.512
return _mm512_fpclass_pd_mask(__A, 4);
}
__mmask16 test_mm512_mask_fpclass_ps_mask(__mmask16 __U, __m512 __A) {
- // CHECK-LABEL: @test_mm512_mask_fpclass_ps_mask
+ // CHECK-LABEL: test_mm512_mask_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.512
return _mm512_mask_fpclass_ps_mask(__U, __A, 4);
}
__mmask16 test_mm512_fpclass_ps_mask(__m512 __A) {
- // CHECK-LABEL: @test_mm512_fpclass_ps_mask
+ // CHECK-LABEL: test_mm512_fpclass_ps_mask
// CHECK: @llvm.x86.avx512.fpclass.ps.512
return _mm512_fpclass_ps_mask(__A, 4);
}
__mmask8 test_mm_fpclass_sd_mask(__m128d __A) {
- // CHECK-LABEL: @test_mm_fpclass_sd_mask
+ // CHECK-LABEL: test_mm_fpclass_sd_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.sd
return _mm_fpclass_sd_mask (__A, 2);
}
__mmask8 test_mm_mask_fpclass_sd_mask(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_sd_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_sd_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.sd
return _mm_mask_fpclass_sd_mask (__U, __A, 2);
}
__mmask8 test_mm_fpclass_ss_mask(__m128 __A) {
- // CHECK-LABEL: @test_mm_fpclass_ss_mask
+ // CHECK-LABEL: test_mm_fpclass_ss_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.ss
return _mm_fpclass_ss_mask ( __A, 2);
}
__mmask8 test_mm_mask_fpclass_ss_mask(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_fpclass_ss_mask
+ // CHECK-LABEL: test_mm_mask_fpclass_ss_mask
// CHECK: @llvm.x86.avx512.mask.fpclass.ss
return _mm_mask_fpclass_ss_mask (__U, __A, 2);
}
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 84e700c..8c446f5 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -154,6 +154,7 @@ __m512 test_mm512_add_ps(__m512 a, __m512 b)
// CHECK: fadd <16 x float>
return _mm512_add_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_add_ps((__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}, (__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}), -2.0f, -4.0f, -6.0f, -8.0f, -10.0f, -12.0f, -14.0f, -16.0f, +2.0f, +4.0f, +6.0f, +8.0f, +10.0f, +12.0f, +14.0f, +16.0f));
__m512d test_mm512_add_pd(__m512d a, __m512d b)
{
@@ -161,6 +162,7 @@ __m512d test_mm512_add_pd(__m512d a, __m512d b)
// CHECK: fadd <8 x double>
return _mm512_add_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_add_pd((__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}, (__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}), -2.0, -4.0, -6.0, -8.0, +2.0, +4.0, +6.0, +8.0));
__m512 test_mm512_mul_ps(__m512 a, __m512 b)
{
@@ -168,6 +170,7 @@ __m512 test_mm512_mul_ps(__m512 a, __m512 b)
// CHECK: fmul <16 x float>
return _mm512_mul_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_mul_ps((__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}, (__m512){-1.0f, -2.0f, -3.0f, -4.0f, -5.0f, -6.0f, -7.0f, -8.0f, +1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f}), +1.0f, +4.0f, +9.0f, +16.0f, +25.0f, +36.0f, +49.0f, +64.0f, +1.0f, +4.0f, +9.0f, +16.0f, +25.0f, +36.0f, +49.0f, +64.0f));
__m512d test_mm512_mul_pd(__m512d a, __m512d b)
{
@@ -175,6 +178,7 @@ __m512d test_mm512_mul_pd(__m512d a, __m512d b)
// CHECK: fmul <8 x double>
return _mm512_mul_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_mul_pd((__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}, (__m512d){-1.0, -2.0, -3.0, -4.0, +1.0, +2.0, +3.0, +4.0}), +1.0, +4.0, +9.0, +16.0, +1.0, +4.0, +9.0, +16.0));
void test_mm512_storeu_si512 (void *__P, __m512i __A)
{
@@ -435,6 +439,22 @@ __m512d test_mm512_set1_pd(double d)
// CHECK: insertelement <8 x double> {{.*}}, i32 7
return _mm512_set1_pd(d);
}
+TEST_CONSTEXPR(match_m512d(_mm512_set1_pd(-100.0), -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0));
+
+__m512 test_mm512_set1_ps(float d)
+{
+ // CHECK-LABEL: test_mm512_set1_ps
+ // CHECK: insertelement <16 x float> {{.*}}, i32 0
+ // CHECK: insertelement <16 x float> {{.*}}, i32 1
+ // CHECK: insertelement <16 x float> {{.*}}, i32 2
+ // CHECK: insertelement <16 x float> {{.*}}, i32 3
+ // CHECK: insertelement <16 x float> {{.*}}, i32 4
+ // CHECK: insertelement <16 x float> {{.*}}, i32 5
+ // CHECK: insertelement <16 x float> {{.*}}, i32 6
+ // CHECK: insertelement <16 x float> {{.*}}, i32 15
+ return _mm512_set1_ps(d);
+}
+TEST_CONSTEXPR(match_m512(_mm512_set1_ps(-55.0f), -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f, -55.0f));
__mmask16 test_mm512_knot(__mmask16 a)
{
@@ -1261,6 +1281,7 @@ __m512d test_mm512_unpackhi_pd(__m512d a, __m512d b)
// CHECK: shufflevector <8 x double> {{.*}} <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
return _mm512_unpackhi_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_unpackhi_pd((__m512d){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}, (__m512d){9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0}), +2.0, +10.0, +4.0, +12.0, +6.0, +14.0, +8.0, +16.0));
__m512d test_mm512_unpacklo_pd(__m512d a, __m512d b)
{
@@ -1268,6 +1289,7 @@ __m512d test_mm512_unpacklo_pd(__m512d a, __m512d b)
// CHECK: shufflevector <8 x double> {{.*}} <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
return _mm512_unpacklo_pd(a, b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_unpacklo_pd((__m512d){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}, (__m512d){9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0}), +1.0, +9.0, +3.0, +11.0, +5.0, +13.0, +7.0, +15.0));
__m512 test_mm512_unpackhi_ps(__m512 a, __m512 b)
{
@@ -1275,6 +1297,7 @@ __m512 test_mm512_unpackhi_ps(__m512 a, __m512 b)
// CHECK: shufflevector <16 x float> {{.*}} <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
return _mm512_unpackhi_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_unpackhi_ps((__m512){0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f}, (__m512){16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f}), +2.0f, +18.0f, +3.0f, +19.0f, +6.0f, +22.0f, +7.0f, +23.0f, +10.0f, +26.0f, +11.0f, +27.0f, +14.0f, +30.0f, +15.0f, +31.0f));
__m512 test_mm512_unpacklo_ps(__m512 a, __m512 b)
{
@@ -1282,6 +1305,7 @@ __m512 test_mm512_unpacklo_ps(__m512 a, __m512 b)
// CHECK: shufflevector <16 x float> {{.*}} <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
return _mm512_unpacklo_ps(a, b);
}
+TEST_CONSTEXPR(match_m512(_mm512_unpacklo_ps((__m512){0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f}, (__m512){16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f}), +0.0f, +16.0f, +1.0f, +17.0f, +4.0f, +20.0f, +5.0f, +21.0f, +8.0f, +24.0f, +9.0f, +25.0f, +12.0f, +28.0f, +13.0f, +29.0f));
__mmask16 test_mm512_cmp_round_ps_mask(__m512 a, __m512 b) {
// CHECK-LABEL: test_mm512_cmp_round_ps_mask
@@ -2811,36 +2835,42 @@ __m512i test_mm512_and_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m512i _
// CHECK: and <16 x i32>
return _mm512_and_epi32(__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_and_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1));
__m512i test_mm512_and_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_and_epi64
// CHECK: and <8 x i64>
return _mm512_and_epi64(__a, __b);
}
+TEST_CONSTEXPR(match_v8di(_mm512_and_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, 0, -1, 0, 0, 0, -1));
__m512i test_mm512_or_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_or_epi32
// CHECK: or <16 x i32>
return _mm512_or_epi32(__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_or_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1, 0, -1, -1, -1));
__m512i test_mm512_or_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_or_epi64
// CHECK: or <8 x i64>
return _mm512_or_epi64(__a, __b);
}
+TEST_CONSTEXPR(match_v8di(_mm512_or_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, -1, 0, -1, -1, -1));
__m512i test_mm512_xor_epi32(__m512i __src,__mmask16 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_xor_epi32
// CHECK: xor <16 x i32>
return _mm512_xor_epi32(__a, __b);
}
+TEST_CONSTEXPR(match_v16si(_mm512_xor_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0));
__m512i test_mm512_xor_epi64(__m512i __src,__mmask8 __k, __m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_xor_epi64
// CHECK: xor <8 x i64>
return _mm512_xor_epi64(__a, __b);
}
+TEST_CONSTEXPR(match_v8di(_mm512_xor_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, -1, -1, 0, 0, -1, -1, 0));
__m512i test_mm512_maskz_andnot_epi32 (__mmask16 __k,__m512i __A, __m512i __B){
// CHECK-LABEL: test_mm512_maskz_andnot_epi32
@@ -2869,6 +2899,7 @@ __m512i test_mm512_andnot_si512(__m512i __A, __m512i __B)
return _mm512_andnot_si512(__A, __B);
}
+TEST_CONSTEXPR(match_v8di(_mm512_andnot_si512((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, -1, 0, 0, -1, 0, 0));
__m512i test_mm512_andnot_epi32(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_andnot_epi32
@@ -2876,6 +2907,7 @@ __m512i test_mm512_andnot_epi32(__m512i __A, __m512i __B) {
// CHECK: and <16 x i32> %{{.*}}, %{{.*}}
return _mm512_andnot_epi32(__A,__B);
}
+TEST_CONSTEXPR(match_v16si(_mm512_andnot_epi32((__m512i)(__v16si){0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v16si){0, 0, -1, -1, 0, -1, 0, -1, 0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, -1, 0, 0));
__m512i test_mm512_maskz_andnot_epi64 (__mmask8 __k,__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_maskz_andnot_epi64
@@ -2900,6 +2932,7 @@ __m512i test_mm512_andnot_epi64(__m512i __A, __m512i __B) {
// CHECK: and <8 x i64> %{{.*}}, %{{.*}}
return _mm512_andnot_epi64(__A,__B);
}
+TEST_CONSTEXPR(match_v8di(_mm512_andnot_epi64((__m512i)(__v8di){0, -1, 0, -1, 0, 0, -1, -1}, (__m512i)(__v8di){0, 0, -1, -1, 0, -1, 0, -1}), 0, 0, -1, 0, 0, -1, 0, 0));
__m512i test_mm512_maskz_sub_epi32 (__mmask16 __k,__m512i __A, __m512i __B) {
//CHECK-LABEL: test_mm512_maskz_sub_epi32
@@ -3542,6 +3575,7 @@ __m512d test_mm512_div_pd(__m512d __a, __m512d __b) {
// CHECK: fdiv <8 x double>
return _mm512_div_pd(__a,__b);
}
+TEST_CONSTEXPR(match_m512d(_mm512_div_pd((__m512d){+8.0, +6.0, +4.0, +2.0, -8.0, -6.0, -4.0, -2.0}, (__m512d){+2.0, +2.0, +2.0, +2.0, -2.0, -2.0, -2.0, -2.0}), +4.0, +3.0, +2.0, +1.0, +4.0, +3.0, +2.0, +1.0));
__m512d test_mm512_mask_div_pd(__m512d __w, __mmask8 __u, __m512d __a, __m512d __b) {
// CHECK-LABEL: test_mm512_mask_div_pd
// CHECK: fdiv <8 x double> %{{.*}}, %{{.*}}
@@ -3576,6 +3610,7 @@ __m512 test_mm512_div_ps(__m512 __A, __m512 __B) {
// CHECK: fdiv <16 x float>
return _mm512_div_ps(__A,__B);
}
+TEST_CONSTEXPR(match_m512(_mm512_div_ps((__m512){+16.0f, +14.0f, +12.0f, +10.0f, +8.0f, +6.0f, +4.0f, +2.0f, -16.0f, -14.0f, -12.0f, -10.0f, -8.0f, -6.0f, -4.0f, -2.0f}, (__m512){+2.0f, +2.0f, +2.0f, +2.0f, +2.0f, +2.0f, +2.0f, +2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f, -2.0f}), +8.0f, +7.0f, +6.0f, +5.0f, +4.0f, +3.0f, +2.0f, +1.0f, +8.0f, +7.0f, +6.0f, +5.0f, +4.0f, +3.0f, +2.0f, +1.0f));
__m512 test_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: test_mm512_mask_div_ps
// CHECK: fdiv <16 x float> %{{.*}}, %{{.*}}
@@ -4386,6 +4421,7 @@ __m512d test_mm512_movedup_pd(__m512d __A) {
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
return _mm512_movedup_pd(__A);
}
+TEST_CONSTEXPR(match_m512d(_mm512_movedup_pd((__m512d){-1.0, +2.0, +3.0, +4.0, -5.0, -6.0, +7.0, +8.0}), -1.0, -1.0, +3.0, +3.0, -5.0, -5.0, +7.0, +7.0));
__m512d test_mm512_mask_movedup_pd(__m512d __W, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_mask_movedup_pd
@@ -8682,6 +8718,7 @@ __m512 test_mm512_movehdup_ps(__m512 __A) {
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
return _mm512_movehdup_ps(__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_movehdup_ps((__m512){+1.0f,-1.0f,+2.0f,-2.0f,+3.0f,-3.0f,+4.0f,-4.0f,+5.0f,-5.0f,+6.0f,-6.0f,+7.0f,-7.0f,+8.0f,-8.0f}), -1.0f, -1.0f, -2.0f, -2.0f, -3.0f, -3.0f, -4.0f, -4.0f, -5.0f, -5.0f, -6.0f, -6.0f, -7.0f, -7.0f, -8.0f, -8.0f));
__m512 test_mm512_mask_movehdup_ps(__m512 __W, __mmask16 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_movehdup_ps
@@ -8702,6 +8739,7 @@ __m512 test_mm512_moveldup_ps(__m512 __A) {
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
return _mm512_moveldup_ps(__A);
}
+TEST_CONSTEXPR(match_m512(_mm512_moveldup_ps((__m512){+1.0f,-1.0f,+2.0f,-2.0f,+3.0f,-3.0f,+4.0f,-4.0f,+5.0f,-5.0f,+6.0f,-6.0f,+7.0f,-7.0f,+8.0f,-8.0f}), +1.0f, +1.0f, +2.0f, +2.0f, +3.0f, +3.0f, +4.0f, +4.0f, +5.0f, +5.0f, +6.0f, +6.0f, +7.0f, +7.0f, +8.0f, +8.0f));
__m512 test_mm512_mask_moveldup_ps(__m512 __W, __mmask16 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_moveldup_ps
@@ -9024,6 +9062,7 @@ __m512i test_mm512_set1_epi8(char d)
// CHECK: insertelement <64 x i8> {{.*}}, i32 63
return _mm512_set1_epi8(d);
}
+TEST_CONSTEXPR(match_v64qi(_mm512_set1_epi8(127), 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127));
__m512i test_mm512_set1_epi16(short d)
{
@@ -9039,6 +9078,37 @@ __m512i test_mm512_set1_epi16(short d)
// CHECK: insertelement <32 x i16> {{.*}}, i32 31
return _mm512_set1_epi16(d);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_set1_epi16(-511), -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511, -511));
+
+__m512i test_mm512_set1_epi32(int d)
+{
+ // CHECK-LABEL: test_mm512_set1_epi32
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 0
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 1
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 2
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 3
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 4
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 5
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 6
+ // CHECK: insertelement <16 x i32> {{.*}}, i32 15
+ return _mm512_set1_epi32(d);
+}
+TEST_CONSTEXPR(match_v16si(_mm512_set1_epi32(99), 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99));
+
+__m512i test_mm512_set1_epi64(long long d)
+{
+ // CHECK-LABEL: test_mm512_set1_epi64
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 0
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 1
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 2
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 3
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 4
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 5
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 6
+ // CHECK: insertelement <8 x i64> {{.*}}, i32 7
+ return _mm512_set1_epi64(d);
+}
+TEST_CONSTEXPR(match_v8di(_mm512_set1_epi64(-42), -42, -42, -42, -42, -42, -42, -42, -42));
__m512i test_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
{
diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c
index 1c2d467..ac7aa3e 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins.c
@@ -1,602 +1,603 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s
#include <immintrin.h>
__mmask8 test_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epu32_mask
+ // CHECK-LABEL: test_mm_cmpeq_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: shufflevector <4 x i1> %{{.*}}, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return (__mmask8)_mm_cmpeq_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epu64_mask
+ // CHECK-LABEL: test_mm_cmpeq_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: shufflevector <2 x i1> %{{.*}}, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
return (__mmask8)_mm_cmpeq_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm_cmpge_epi32_mask
// CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epi32_mask
// CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm_cmpge_epi64_mask
// CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epi64_mask
// CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpge_epi32_mask
// CHECK: icmp sge <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epi32_mask
// CHECK: icmp sge <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpge_epi64_mask
// CHECK: icmp sge <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epi64_mask
// CHECK: icmp sge <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm_cmpge_epu32_mask
// CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epu32_mask
// CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpge_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm_cmpge_epu64_mask
// CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpge_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpge_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpge_epu64_mask
// CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpge_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm256_cmpge_epu32_mask
// CHECK: icmp uge <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epu32_mask
// CHECK: icmp uge <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpge_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm256_cmpge_epu64_mask
// CHECK: icmp uge <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpge_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpge_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpge_epu64_mask
// CHECK: icmp uge <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpge_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm_cmpgt_epu32_mask
// CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epu32_mask
// CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm_cmpgt_epu64_mask
// CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epu64_mask
// CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpgt_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epu32_mask
// CHECK: icmp ugt <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epu32_mask
// CHECK: icmp ugt <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpgt_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epu64_mask
// CHECK: icmp ugt <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epu64_mask
// CHECK: icmp ugt <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epi32_mask
+ // CHECK-LABEL: test_mm_cmple_epi32_mask
// CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epi32_mask
// CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epi64_mask
+ // CHECK-LABEL: test_mm_cmple_epi64_mask
// CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epi64_mask
// CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epi32_mask
+ // CHECK-LABEL: test_mm256_cmple_epi32_mask
// CHECK: icmp sle <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epi32_mask
// CHECK: icmp sle <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epi64_mask
+ // CHECK-LABEL: test_mm256_cmple_epi64_mask
// CHECK: icmp sle <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epi64_mask
// CHECK: icmp sle <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epu32_mask
+ // CHECK-LABEL: test_mm_cmple_epu32_mask
// CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epu32_mask
// CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmple_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmple_epu64_mask
+ // CHECK-LABEL: test_mm_cmple_epu64_mask
// CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmple_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmple_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmple_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmple_epu64_mask
// CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmple_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epu32_mask
+ // CHECK-LABEL: test_mm256_cmple_epu32_mask
// CHECK: icmp ule <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epu32_mask
// CHECK: icmp ule <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmple_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmple_epu64_mask
+ // CHECK-LABEL: test_mm256_cmple_epu64_mask
// CHECK: icmp ule <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmple_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmple_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmple_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmple_epu64_mask
// CHECK: icmp ule <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmple_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm_cmplt_epi32_mask
// CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epi32_mask
// CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm_cmplt_epi64_mask
// CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epi64_mask
// CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm256_cmplt_epi32_mask
// CHECK: icmp slt <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epi32_mask
// CHECK: icmp slt <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm256_cmplt_epi64_mask
// CHECK: icmp slt <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epi64_mask
// CHECK: icmp slt <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm_cmplt_epu32_mask
// CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epu32_mask
// CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmplt_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm_cmplt_epu64_mask
// CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmplt_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmplt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmplt_epu64_mask
// CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmplt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm256_cmplt_epu32_mask
// CHECK: icmp ult <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epu32_mask
// CHECK: icmp ult <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmplt_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm256_cmplt_epu64_mask
// CHECK: icmp ult <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmplt_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmplt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmplt_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmplt_epu64_mask
// CHECK: icmp ult <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmplt_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm_cmpneq_epi32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epi32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm_cmpneq_epi64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epi64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epi64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epi32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epi32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epi64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epi64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epi64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm_cmpneq_epu32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epu32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epu32_mask
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpneq_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm_cmpneq_epu64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpneq_epu64_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpneq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmpneq_epu64_mask
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpneq_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epu32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epu32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epu32_mask
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epu32_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpneq_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm256_cmpneq_epu64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpneq_epu64_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpneq_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpneq_epu64_mask
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpneq_epu64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmp_eq_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_eq_epi32_mask
+ // CHECK-LABEL: test_mm_cmp_eq_epi32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epi32_mask(__a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm_mask_cmp_lt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_lt_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmp_lt_epi32_mask
// CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epi32_mask(__u, __a, __b, _MM_CMPINT_LT);
}
__mmask8 test_mm_cmp_lt_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_lt_epi64_mask
+ // CHECK-LABEL: test_mm_cmp_lt_epi64_mask
// CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epi64_mask(__a, __b, _MM_CMPINT_LT);
}
__mmask8 test_mm_mask_cmp_eq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_eq_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmp_eq_epi64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epi64_mask(__u, __a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm256_cmp_eq_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_eq_epi32_mask
+ // CHECK-LABEL: test_mm256_cmp_eq_epi32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epi32_mask(__a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm256_mask_cmp_le_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_le_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_le_epi32_mask
// CHECK: icmp sle <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epi32_mask(__u, __a, __b, _MM_CMPINT_LE);
}
__mmask8 test_mm256_cmp_eq_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_eq_epi64_mask
+ // CHECK-LABEL: test_mm256_cmp_eq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epi64_mask(__a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm256_mask_cmp_eq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_eq_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_eq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epi64_mask(__u, __a, __b, _MM_CMPINT_EQ);
}
__mmask8 test_mm_cmp_epu32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epu32_mask
+ // CHECK-LABEL: test_mm_cmp_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epu32_mask(__a, __b, 0);
}
__mmask8 test_mm_mask_cmp_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epu32_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epu32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epu32_mask(__u, __a, __b, 0);
}
__mmask8 test_mm_cmp_epu64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmp_epu64_mask
+ // CHECK-LABEL: test_mm_cmp_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmp_epu64_mask(__a, __b, 0);
}
__mmask8 test_mm_mask_cmp_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmp_epu64_mask
+ // CHECK-LABEL: test_mm_mask_cmp_epu64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmp_epu64_mask(__u, __a, __b, 0);
}
__mmask8 test_mm256_cmp_epu32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epu32_mask
+ // CHECK-LABEL: test_mm256_cmp_epu32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epu32_mask(__a, __b, 0);
}
__mmask8 test_mm256_mask_cmp_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epu32_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epu32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epu32_mask(__u, __a, __b, 0);
}
__mmask8 test_mm256_cmp_epu64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmp_epu64_mask
+ // CHECK-LABEL: test_mm256_cmp_epu64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmp_epu64_mask(__a, __b, 0);
}
__mmask8 test_mm256_mask_cmp_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_epu64_mask
+ // CHECK-LABEL: test_mm256_mask_cmp_epu64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmp_epu64_mask(__u, __a, __b, 0);
@@ -604,14 +605,14 @@ __mmask8 test_mm256_mask_cmp_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b)
__m256i test_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_add_epi32
+ //CHECK-LABEL: test_mm256_mask_add_epi32
//CHECK: add <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_add_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_add_epi32
+ //CHECK-LABEL: test_mm256_maskz_add_epi32
//CHECK: add <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_add_epi32(__U, __A, __B);
@@ -619,14 +620,14 @@ __m256i test_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_add_epi64
+ //CHECK-LABEL: test_mm256_mask_add_epi64
//CHECK: add <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_add_epi64(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_add_epi64
+ //CHECK-LABEL: test_mm256_maskz_add_epi64
//CHECK: add <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_add_epi64 (__U,__A,__B);
@@ -634,14 +635,14 @@ __m256i test_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_sub_epi32
+ //CHECK-LABEL: test_mm256_mask_sub_epi32
//CHECK: sub <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sub_epi32 (__W,__U,__A,__B);
}
__m256i test_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_sub_epi32
+ //CHECK-LABEL: test_mm256_maskz_sub_epi32
//CHECK: sub <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sub_epi32 (__U,__A,__B);
@@ -649,14 +650,14 @@ __m256i test_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_sub_epi64
+ //CHECK-LABEL: test_mm256_mask_sub_epi64
//CHECK: sub <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sub_epi64 (__W,__U,__A,__B);
}
__m256i test_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_sub_epi64
+ //CHECK-LABEL: test_mm256_maskz_sub_epi64
//CHECK: sub <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sub_epi64 (__U,__A,__B);
@@ -664,7 +665,7 @@ __m256i test_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
__m128i test_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_add_epi32
+ //CHECK-LABEL: test_mm_mask_add_epi32
//CHECK: add <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_add_epi32(__W,__U,__A,__B);
@@ -672,7 +673,7 @@ __m128i test_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i test_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_add_epi32
+ //CHECK-LABEL: test_mm_maskz_add_epi32
//CHECK: add <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_add_epi32 (__U,__A,__B);
@@ -680,14 +681,14 @@ __m128i test_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_add_epi64
+ //CHECK-LABEL: test_mm_mask_add_epi64
//CHECK: add <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_add_epi64 (__W,__U,__A,__B);
}
__m128i test_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_add_epi64
+ //CHECK-LABEL: test_mm_maskz_add_epi64
//CHECK: add <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_add_epi64 (__U,__A,__B);
@@ -695,14 +696,14 @@ __m128i test_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_sub_epi32
+ //CHECK-LABEL: test_mm_mask_sub_epi32
//CHECK: sub <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sub_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_sub_epi32
+ //CHECK-LABEL: test_mm_maskz_sub_epi32
//CHECK: sub <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sub_epi32(__U, __A, __B);
@@ -710,14 +711,14 @@ __m128i test_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
__m128i test_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_sub_epi64
+ //CHECK-LABEL: test_mm_mask_sub_epi64
//CHECK: sub <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sub_epi64 (__W, __U, __A, __B);
}
__m128i test_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_sub_epi64
+ //CHECK-LABEL: test_mm_maskz_sub_epi64
//CHECK: sub <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sub_epi64 (__U, __A, __B);
@@ -725,7 +726,7 @@ __m128i test_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
__m256i test_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
__m256i __Y) {
- //CHECK-LABEL: @test_mm256_mask_mul_epi32
+ //CHECK-LABEL: test_mm256_mask_mul_epi32
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
@@ -736,7 +737,7 @@ __m256i test_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
}
__m256i test_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) {
- //CHECK-LABEL: @test_mm256_maskz_mul_epi32
+ //CHECK-LABEL: test_mm256_maskz_mul_epi32
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <4 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <4 x i64> %{{.*}}, splat (i64 32)
@@ -749,7 +750,7 @@ __m256i test_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) {
__m128i test_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
__m128i __Y) {
- //CHECK-LABEL: @test_mm_mask_mul_epi32
+ //CHECK-LABEL: test_mm_mask_mul_epi32
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
@@ -760,7 +761,7 @@ __m128i test_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
}
__m128i test_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) {
- //CHECK-LABEL: @test_mm_maskz_mul_epi32
+ //CHECK-LABEL: test_mm_maskz_mul_epi32
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: ashr <2 x i64> %{{.*}}, splat (i64 32)
//CHECK: shl <2 x i64> %{{.*}}, splat (i64 32)
@@ -772,7 +773,7 @@ __m128i test_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) {
__m256i test_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
__m256i __Y) {
- //CHECK-LABEL: @test_mm256_mask_mul_epu32
+ //CHECK-LABEL: test_mm256_mask_mul_epu32
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
@@ -781,7 +782,7 @@ __m256i test_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
}
__m256i test_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) {
- //CHECK-LABEL: @test_mm256_maskz_mul_epu32
+ //CHECK-LABEL: test_mm256_maskz_mul_epu32
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <4 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
@@ -791,7 +792,7 @@ __m256i test_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) {
__m128i test_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
__m128i __Y) {
- //CHECK-LABEL: @test_mm_mask_mul_epu32
+ //CHECK-LABEL: test_mm_mask_mul_epu32
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
@@ -800,7 +801,7 @@ __m128i test_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
}
__m128i test_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) {
- //CHECK-LABEL: @test_mm_maskz_mul_epu32
+ //CHECK-LABEL: test_mm_maskz_mul_epu32
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: and <2 x i64> %{{.*}}, splat (i64 4294967295)
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
@@ -809,7 +810,7 @@ __m128i test_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) {
}
__m128i test_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_mullo_epi32
+ //CHECK-LABEL: test_mm_maskz_mullo_epi32
//CHECK: mul <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_mullo_epi32(__M, __A, __B);
@@ -817,14 +818,14 @@ __m128i test_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
__m128i test_mm_mask_mullo_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
__m128i __B) {
- //CHECK-LABEL: @test_mm_mask_mullo_epi32
+ //CHECK-LABEL: test_mm_mask_mullo_epi32
//CHECK: mul <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_mullo_epi32(__W, __M, __A, __B);
}
__m256i test_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_mullo_epi32
+ //CHECK-LABEL: test_mm256_maskz_mullo_epi32
//CHECK: mul <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_mullo_epi32(__M, __A, __B);
@@ -832,61 +833,61 @@ __m256i test_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
__m256i test_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
__m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_mullo_epi32
+ //CHECK-LABEL: test_mm256_mask_mullo_epi32
//CHECK: mul <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_mullo_epi32(__W, __M, __A, __B);
}
__m256i test_mm256_and_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_and_epi32
+ //CHECK-LABEL: test_mm256_and_epi32
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
return _mm256_and_epi32(__A, __B);
}
__m256i test_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_and_epi32
+ //CHECK-LABEL: test_mm256_mask_and_epi32
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_and_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_and_epi32
+ //CHECK-LABEL: test_mm256_maskz_and_epi32
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_and_epi32(__U, __A, __B);
}
__m128i test_mm_and_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_and_epi32
+ //CHECK-LABEL: test_mm_and_epi32
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
return _mm_and_epi32(__A, __B);
}
__m128i test_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_and_epi32
+ //CHECK-LABEL: test_mm_mask_and_epi32
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_and_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_and_epi32
+ //CHECK-LABEL: test_mm_maskz_and_epi32
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_and_epi32(__U, __A, __B);
}
__m256i test_mm256_andnot_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_andnot_epi32
+ //CHECK-LABEL: test_mm256_andnot_epi32
//CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
return _mm256_andnot_epi32(__A, __B);
}
__m256i test_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_andnot_epi32
+ //CHECK-LABEL: test_mm256_mask_andnot_epi32
//CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
@@ -894,7 +895,7 @@ __m256i test_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __
}
__m256i test_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_andnot_epi32
+ //CHECK-LABEL: test_mm256_maskz_andnot_epi32
//CHECK: xor <8 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
@@ -902,14 +903,14 @@ __m256i test_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
}
__m128i test_mm_andnot_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_andnot_epi32
+ //CHECK-LABEL: test_mm_andnot_epi32
//CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
return _mm_andnot_epi32(__A, __B);
}
__m128i test_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_andnot_epi32
+ //CHECK-LABEL: test_mm_mask_andnot_epi32
//CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
@@ -917,7 +918,7 @@ __m128i test_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m12
}
__m128i test_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_andnot_epi32
+ //CHECK-LABEL: test_mm_maskz_andnot_epi32
//CHECK: xor <4 x i32> %{{.*}}, splat (i32 -1)
//CHECK: and <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
@@ -925,134 +926,134 @@ __m128i test_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
}
__m256i test_mm256_or_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_or_epi32
+ //CHECK-LABEL: test_mm256_or_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
return _mm256_or_epi32(__A, __B);
}
__m256i test_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_or_epi32
+ //CHECK-LABEL: test_mm256_mask_or_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_or_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_or_epi32
+ //CHECK-LABEL: test_mm256_maskz_or_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_or_epi32(__U, __A, __B);
}
__m128i test_mm_or_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_or_epi32
+ //CHECK-LABEL: test_mm_or_epi32
//CHECK: or <4 x i32> %{{.*}}, %{{.*}}
return _mm_or_epi32(__A, __B);
}
__m128i test_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_or_epi32
+ //CHECK-LABEL: test_mm_mask_or_epi32
//CHECK: or <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_or_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_or_epi32
+ //CHECK-LABEL: test_mm_maskz_or_epi32
//CHECK: or <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_or_epi32(__U, __A, __B);
}
__m256i test_mm256_xor_epi32 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_xor_epi32
+ //CHECK-LABEL: test_mm256_xor_epi32
//CHECK: or <8 x i32> %{{.*}}, %{{.*}}
return _mm256_xor_epi32(__A, __B);
}
__m256i test_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_xor_epi32
+ //CHECK-LABEL: test_mm256_mask_xor_epi32
//CHECK: xor <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_xor_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_xor_epi32
+ //CHECK-LABEL: test_mm256_maskz_xor_epi32
//CHECK: xor <8 x i32> %{{.*}}, %{{.*}}
//CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_xor_epi32(__U, __A, __B);
}
__m128i test_mm_xor_epi32 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_xor_epi32
+ //CHECK-LABEL: test_mm_xor_epi32
//CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
return _mm_xor_epi32(__A, __B);
}
__m128i test_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_xor_epi32
+ //CHECK-LABEL: test_mm_mask_xor_epi32
//CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_xor_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_xor_epi32
+ //CHECK-LABEL: test_mm_maskz_xor_epi32
//CHECK: xor <4 x i32> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_xor_epi32(__U, __A, __B);
}
__m256i test_mm256_and_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_and_epi64
+ //CHECK-LABEL: test_mm256_and_epi64
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
return _mm256_and_epi64(__A, __B);
}
__m256i test_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_and_epi64
+ //CHECK-LABEL: test_mm256_mask_and_epi64
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_and_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_and_epi64
+ //CHECK-LABEL: test_mm256_maskz_and_epi64
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_and_epi64(__U, __A, __B);
}
__m128i test_mm_and_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_and_epi64
+ //CHECK-LABEL: test_mm_and_epi64
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
return _mm_and_epi64(__A, __B);
}
__m128i test_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_and_epi64
+ //CHECK-LABEL: test_mm_mask_and_epi64
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_and_epi64(__W,__U, __A, __B);
}
__m128i test_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_and_epi64
+ //CHECK-LABEL: test_mm_maskz_and_epi64
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_and_epi64(__U, __A, __B);
}
__m256i test_mm256_andnot_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_andnot_epi64
+ //CHECK-LABEL: test_mm256_andnot_epi64
//CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
return _mm256_andnot_epi64(__A, __B);
}
__m256i test_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_andnot_epi64
+ //CHECK-LABEL: test_mm256_mask_andnot_epi64
//CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -1060,7 +1061,7 @@ __m256i test_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __
}
__m256i test_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_andnot_epi64
+ //CHECK-LABEL: test_mm256_maskz_andnot_epi64
//CHECK: xor <4 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -1068,14 +1069,14 @@ __m256i test_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
}
__m128i test_mm_andnot_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_andnot_epi64
+ //CHECK-LABEL: test_mm_andnot_epi64
//CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
return _mm_andnot_epi64(__A, __B);
}
__m128i test_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_andnot_epi64
+ //CHECK-LABEL: test_mm_mask_andnot_epi64
//CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
@@ -1083,7 +1084,7 @@ __m128i test_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m12
}
__m128i test_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_andnot_epi64
+ //CHECK-LABEL: test_mm_maskz_andnot_epi64
//CHECK: xor <2 x i64> %{{.*}}, splat (i64 -1)
//CHECK: and <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
@@ -1091,87 +1092,87 @@ __m128i test_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
}
__m256i test_mm256_or_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_or_epi64
+ //CHECK-LABEL: test_mm256_or_epi64
//CHECK: or <4 x i64> %{{.*}}, %{{.*}}
return _mm256_or_epi64(__A, __B);
}
__m256i test_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_or_epi64
+ //CHECK-LABEL: test_mm256_mask_or_epi64
//CHECK: or <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_or_epi64(__W,__U, __A, __B);
}
__m256i test_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_or_epi64
+ //CHECK-LABEL: test_mm256_maskz_or_epi64
//CHECK: or <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_or_epi64(__U, __A, __B);
}
__m128i test_mm_or_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_or_epi64
+ //CHECK-LABEL: test_mm_or_epi64
//CHECK: or <2 x i64> %{{.*}}, %{{.*}}
return _mm_or_epi64(__A, __B);
}
__m128i test_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_or_epi64
+ //CHECK-LABEL: test_mm_mask_or_epi64
//CHECK: or <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_or_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_or_epi64
+ //CHECK-LABEL: test_mm_maskz_or_epi64
//CHECK: or <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_or_epi64( __U, __A, __B);
}
__m256i test_mm256_xor_epi64 (__m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_xor_epi64
+ //CHECK-LABEL: test_mm256_xor_epi64
//CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
return _mm256_xor_epi64(__A, __B);
}
__m256i test_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_mask_xor_epi64
+ //CHECK-LABEL: test_mm256_mask_xor_epi64
//CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_xor_epi64(__W,__U, __A, __B);
}
__m256i test_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- //CHECK-LABEL: @test_mm256_maskz_xor_epi64
+ //CHECK-LABEL: test_mm256_maskz_xor_epi64
//CHECK: xor <4 x i64> %{{.*}}, %{{.*}}
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_xor_epi64(__U, __A, __B);
}
__m128i test_mm_xor_epi64 (__m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_xor_epi64
+ //CHECK-LABEL: test_mm_xor_epi64
//CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
return _mm_xor_epi64(__A, __B);
}
__m128i test_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_mask_xor_epi64
+ //CHECK-LABEL: test_mm_mask_xor_epi64
//CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_xor_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- //CHECK-LABEL: @test_mm_maskz_xor_epi64
+ //CHECK-LABEL: test_mm_maskz_xor_epi64
//CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_xor_epi64( __U, __A, __B);
}
__mmask8 test_mm256_cmp_ps_mask_eq_oq(__m256 a, __m256 b) {
- // CHECK-LABEL: @test_mm256_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm256_cmp_ps_mask_eq_oq
// CHECK: fcmp oeq <8 x float> %{{.*}}, %{{.*}}
return _mm256_cmp_ps_mask(a, b, _CMP_EQ_OQ);
}
@@ -1363,7 +1364,7 @@ __mmask8 test_mm256_cmp_ps_mask_true_us(__m256 a, __m256 b) {
}
__mmask8 test_mm256_mask_cmp_ps_mask_eq_oq(__mmask8 m, __m256 a, __m256 b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm256_mask_cmp_ps_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <8 x float> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> [[CMP]], {{.*}}
return _mm256_mask_cmp_ps_mask(m, a, b, _CMP_EQ_OQ);
@@ -1587,7 +1588,7 @@ __mmask8 test_mm256_mask_cmp_ps_mask_true_us(__mmask8 m, __m256 a, __m256 b) {
}
__mmask8 test_mm256_cmp_pd_mask_eq_oq(__m256d a, __m256d b) {
- // CHECK-LABEL: @test_mm256_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm256_cmp_pd_mask_eq_oq
// CHECK: fcmp oeq <4 x double> %{{.*}}, %{{.*}}
return _mm256_cmp_pd_mask(a, b, _CMP_EQ_OQ);
}
@@ -1779,7 +1780,7 @@ __mmask8 test_mm256_cmp_pd_mask_true_us(__m256d a, __m256d b) {
}
__mmask8 test_mm256_mask_cmp_pd_mask_eq_oq(__mmask8 m, __m256d a, __m256d b) {
- // CHECK-LABEL: @test_mm256_mask_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm256_mask_cmp_pd_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x double> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> [[CMP]], {{.*}}
return _mm256_mask_cmp_pd_mask(m, a, b, _CMP_EQ_OQ);
@@ -2003,7 +2004,7 @@ __mmask8 test_mm256_mask_cmp_pd_mask_true_us(__mmask8 m, __m256d a, __m256d b) {
}
__mmask8 test_mm_cmp_ps_mask_eq_oq(__m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm_cmp_ps_mask_eq_oq
// CHECK: fcmp oeq <4 x float> %{{.*}}, %{{.*}}
return _mm_cmp_ps_mask(a, b, _CMP_EQ_OQ);
}
@@ -2195,7 +2196,7 @@ __mmask8 test_mm_cmp_ps_mask_true_us(__m128 a, __m128 b) {
}
__mmask8 test_mm_mask_cmp_ps_mask_eq_oq(__mmask8 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_mask_cmp_ps_mask_eq_oq
+ // CHECK-LABEL: test_mm_mask_cmp_ps_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x float> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> [[CMP]], {{.*}}
return _mm_mask_cmp_ps_mask(m, a, b, _CMP_EQ_OQ);
@@ -2419,7 +2420,7 @@ __mmask8 test_mm_mask_cmp_ps_mask_true_us(__mmask8 m, __m128 a, __m128 b) {
}
__mmask8 test_mm_cmp_pd_mask_eq_oq(__m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm_cmp_pd_mask_eq_oq
// CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}}
return _mm_cmp_pd_mask(a, b, _CMP_EQ_OQ);
}
@@ -2611,7 +2612,7 @@ __mmask8 test_mm_cmp_pd_mask_true_us(__m128d a, __m128d b) {
}
__mmask8 test_mm_mask_cmp_pd_mask_eq_oq(__mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_mask_cmp_pd_mask_eq_oq
+ // CHECK-LABEL: test_mm_mask_cmp_pd_mask_eq_oq
// CHECK: [[CMP:%.*]] = fcmp oeq <2 x double> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> [[CMP]], {{.*}}
return _mm_mask_cmp_pd_mask(m, a, b, _CMP_EQ_OQ);
@@ -2835,7 +2836,7 @@ __mmask8 test_mm_mask_cmp_pd_mask_true_us(__mmask8 m, __m128d a, __m128d b) {
}
__m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_pd
+ // CHECK-LABEL: test_mm_mask_fmadd_pd
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -2843,7 +2844,7 @@ __m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __
}
__m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmsub_pd
+ // CHECK-LABEL: test_mm_mask_fmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2852,7 +2853,7 @@ __m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __
}
__m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_pd
+ // CHECK-LABEL: test_mm_mask3_fmadd_pd
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -2860,7 +2861,7 @@ __m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 _
}
__m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_pd
+ // CHECK-LABEL: test_mm_mask3_fnmadd_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2869,7 +2870,7 @@ __m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8
}
__m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_pd
+ // CHECK-LABEL: test_mm_maskz_fmadd_pd
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
@@ -2877,7 +2878,7 @@ __m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d _
}
__m128d test_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_pd
+ // CHECK-LABEL: test_mm_maskz_fmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2886,7 +2887,7 @@ __m128d test_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d _
}
__m128d test_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_pd
+ // CHECK-LABEL: test_mm_maskz_fnmadd_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -2895,7 +2896,7 @@ __m128d test_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d
}
__m128d test_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_pd
+ // CHECK-LABEL: test_mm_maskz_fnmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
@@ -2905,7 +2906,7 @@ __m128d test_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d
}
__m256d test_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmadd_pd
+ // CHECK-LABEL: test_mm256_mask_fmadd_pd
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -2913,7 +2914,7 @@ __m256d test_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d
}
__m256d test_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsub_pd
+ // CHECK-LABEL: test_mm256_mask_fmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2922,7 +2923,7 @@ __m256d test_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d
}
__m256d test_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmadd_pd
+ // CHECK-LABEL: test_mm256_mask3_fmadd_pd
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -2930,7 +2931,7 @@ __m256d test_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask
}
__m256d test_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmadd_pd
+ // CHECK-LABEL: test_mm256_mask3_fnmadd_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2939,7 +2940,7 @@ __m256d test_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmas
}
__m256d test_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmadd_pd
+ // CHECK-LABEL: test_mm256_maskz_fmadd_pd
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -2947,7 +2948,7 @@ __m256d test_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256
}
__m256d test_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsub_pd
+ // CHECK-LABEL: test_mm256_maskz_fmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2956,7 +2957,7 @@ __m256d test_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256
}
__m256d test_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmadd_pd
+ // CHECK-LABEL: test_mm256_maskz_fnmadd_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2965,7 +2966,7 @@ __m256d test_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m25
}
__m256d test_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmsub_pd
+ // CHECK-LABEL: test_mm256_maskz_fnmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
@@ -2975,7 +2976,7 @@ __m256d test_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m25
}
__m128 test_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmadd_ps
+ // CHECK-LABEL: test_mm_mask_fmadd_ps
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -2983,7 +2984,7 @@ __m128 test_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
}
__m128 test_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmsub_ps
+ // CHECK-LABEL: test_mm_mask_fmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2992,7 +2993,7 @@ __m128 test_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
}
__m128 test_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmadd_ps
+ // CHECK-LABEL: test_mm_mask3_fmadd_ps
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -3000,7 +3001,7 @@ __m128 test_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m128 test_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmadd_ps
+ // CHECK-LABEL: test_mm_mask3_fnmadd_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3009,7 +3010,7 @@ __m128 test_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m128 test_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmadd_ps
+ // CHECK-LABEL: test_mm_maskz_fmadd_ps
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -3017,7 +3018,7 @@ __m128 test_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m128 test_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsub_ps
+ // CHECK-LABEL: test_mm_maskz_fmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3026,7 +3027,7 @@ __m128 test_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m128 test_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmadd_ps
+ // CHECK-LABEL: test_mm_maskz_fnmadd_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3035,7 +3036,7 @@ __m128 test_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m128 test_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fnmsub_ps
+ // CHECK-LABEL: test_mm_maskz_fnmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
@@ -3045,14 +3046,14 @@ __m128 test_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
__m256 test_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmadd_ps
+ // CHECK-LABEL: test_mm256_mask_fmadd_ps
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_fmadd_ps(__A, __U, __B, __C);
}
__m256 test_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsub_ps
+ // CHECK-LABEL: test_mm256_mask_fmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3060,14 +3061,14 @@ __m256 test_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C
}
__m256 test_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmadd_ps
+ // CHECK-LABEL: test_mm256_mask3_fmadd_ps
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask3_fmadd_ps(__A, __B, __C, __U);
}
__m256 test_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmadd_ps
+ // CHECK-LABEL: test_mm256_mask3_fnmadd_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3075,14 +3076,14 @@ __m256 test_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 _
}
__m256 test_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmadd_ps
+ // CHECK-LABEL: test_mm256_maskz_fmadd_ps
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_fmadd_ps(__U, __A, __B, __C);
}
__m256 test_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsub_ps
+ // CHECK-LABEL: test_mm256_maskz_fmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3090,7 +3091,7 @@ __m256 test_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __
}
__m256 test_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmadd_ps
+ // CHECK-LABEL: test_mm256_maskz_fnmadd_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3098,7 +3099,7 @@ __m256 test_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 _
}
__m256 test_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fnmsub_ps
+ // CHECK-LABEL: test_mm256_maskz_fnmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
@@ -3107,7 +3108,7 @@ __m256 test_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 _
}
__m128d test_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmaddsub_pd
+ // CHECK-LABEL: test_mm_mask_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3116,7 +3117,7 @@ __m128d test_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d
}
__m128d test_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fmsubadd_pd
+ // CHECK-LABEL: test_mm_mask_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3125,7 +3126,7 @@ __m128d test_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d
}
__m128d test_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmaddsub_pd
+ // CHECK-LABEL: test_mm_mask3_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3134,7 +3135,7 @@ __m128d test_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask
}
__m128d test_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmaddsub_pd
+ // CHECK-LABEL: test_mm_maskz_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3143,7 +3144,7 @@ __m128d test_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128
}
__m128d test_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsubadd_pd
+ // CHECK-LABEL: test_mm_maskz_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3152,7 +3153,7 @@ __m128d test_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128
}
__m256d test_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmaddsub_pd
+ // CHECK-LABEL: test_mm256_mask_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3161,7 +3162,7 @@ __m256d test_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m2
}
__m256d test_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsubadd_pd
+ // CHECK-LABEL: test_mm256_mask_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3170,7 +3171,7 @@ __m256d test_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m2
}
__m256d test_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmaddsub_pd
+ // CHECK-LABEL: test_mm256_mask3_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3179,7 +3180,7 @@ __m256d test_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mm
}
__m256d test_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmaddsub_pd
+ // CHECK-LABEL: test_mm256_maskz_fmaddsub_pd
// CHECK-NOT: fneg
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3188,7 +3189,7 @@ __m256d test_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m
}
__m256d test_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsubadd_pd
+ // CHECK-LABEL: test_mm256_maskz_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3197,7 +3198,7 @@ __m256d test_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m
}
__m128 test_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmaddsub_ps
+ // CHECK-LABEL: test_mm_mask_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3206,7 +3207,7 @@ __m128 test_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C
}
__m128 test_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fmsubadd_ps
+ // CHECK-LABEL: test_mm_mask_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3215,7 +3216,7 @@ __m128 test_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C
}
__m128 test_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmaddsub_ps
+ // CHECK-LABEL: test_mm_mask3_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3224,7 +3225,7 @@ __m128 test_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __
}
__m128 test_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmaddsub_ps
+ // CHECK-LABEL: test_mm_maskz_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3233,7 +3234,7 @@ __m128 test_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __
}
__m128 test_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_maskz_fmsubadd_ps
+ // CHECK-LABEL: test_mm_maskz_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3242,7 +3243,7 @@ __m128 test_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __
}
__m256 test_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmaddsub_ps
+ // CHECK-LABEL: test_mm256_mask_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3250,7 +3251,7 @@ __m256 test_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256
}
__m256 test_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fmsubadd_ps
+ // CHECK-LABEL: test_mm256_mask_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3258,7 +3259,7 @@ __m256 test_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256
}
__m256 test_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmaddsub_ps
+ // CHECK-LABEL: test_mm256_mask3_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3266,7 +3267,7 @@ __m256 test_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8
}
__m256 test_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmaddsub_ps
+ // CHECK-LABEL: test_mm256_maskz_fmaddsub_ps
// CHECK-NOT: fneg
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3274,7 +3275,7 @@ __m256 test_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256
}
__m256 test_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_maskz_fmsubadd_ps
+ // CHECK-LABEL: test_mm256_maskz_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3282,7 +3283,7 @@ __m256 test_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256
}
__m128d test_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_pd
+ // CHECK-LABEL: test_mm_mask3_fmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3291,7 +3292,7 @@ __m128d test_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 _
}
__m256d test_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsub_pd
+ // CHECK-LABEL: test_mm256_mask3_fmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3300,7 +3301,7 @@ __m256d test_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask
}
__m128 test_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsub_ps
+ // CHECK-LABEL: test_mm_mask3_fmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3309,7 +3310,7 @@ __m128 test_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m256 test_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsub_ps
+ // CHECK-LABEL: test_mm256_mask3_fmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3317,7 +3318,7 @@ __m256 test_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __
}
__m128d test_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsubadd_pd
+ // CHECK-LABEL: test_mm_mask3_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
// CHECK: call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3326,7 +3327,7 @@ __m128d test_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask
}
__m256d test_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsubadd_pd
+ // CHECK-LABEL: test_mm256_mask3_fmsubadd_pd
// CHECK: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
// CHECK: call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3335,7 +3336,7 @@ __m256d test_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mm
}
__m128 test_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fmsubadd_ps
+ // CHECK-LABEL: test_mm_mask3_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
// CHECK: call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3344,7 +3345,7 @@ __m128 test_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __
}
__m256 test_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fmsubadd_ps
+ // CHECK-LABEL: test_mm256_mask3_fmsubadd_ps
// CHECK: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3352,7 +3353,7 @@ __m256 test_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8
}
__m128d test_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_pd
+ // CHECK-LABEL: test_mm_mask_fnmadd_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -3361,7 +3362,7 @@ __m128d test_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d _
}
__m256d test_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmadd_pd
+ // CHECK-LABEL: test_mm256_mask_fnmadd_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3370,7 +3371,7 @@ __m256d test_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256
}
__m128 test_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fnmadd_ps
+ // CHECK-LABEL: test_mm_mask_fnmadd_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3379,7 +3380,7 @@ __m128 test_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
__m256 test_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmadd_ps
+ // CHECK-LABEL: test_mm256_mask_fnmadd_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
@@ -3387,7 +3388,7 @@ __m256 test_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __
}
__m128d test_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_pd
+ // CHECK-LABEL: test_mm_mask_fnmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
@@ -3397,7 +3398,7 @@ __m128d test_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d _
}
__m128d test_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_pd
+ // CHECK-LABEL: test_mm_mask3_fnmsub_pd
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: fneg <2 x double> %{{.*}}
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
@@ -3407,7 +3408,7 @@ __m128d test_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8
}
__m256d test_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmsub_pd
+ // CHECK-LABEL: test_mm256_mask_fnmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
@@ -3417,7 +3418,7 @@ __m256d test_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256
}
__m256d test_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmsub_pd
+ // CHECK-LABEL: test_mm256_mask3_fnmsub_pd
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: fneg <4 x double> %{{.*}}
// CHECK: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
@@ -3427,7 +3428,7 @@ __m256d test_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmas
}
__m128 test_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
- // CHECK-LABEL: @test_mm_mask_fnmsub_ps
+ // CHECK-LABEL: test_mm_mask_fnmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
@@ -3437,7 +3438,7 @@ __m128 test_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
__m128 test_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm_mask3_fnmsub_ps
+ // CHECK-LABEL: test_mm_mask3_fnmsub_ps
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: fneg <4 x float> %{{.*}}
// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
@@ -3447,7 +3448,7 @@ __m128 test_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
__m256 test_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
- // CHECK-LABEL: @test_mm256_mask_fnmsub_ps
+ // CHECK-LABEL: test_mm256_mask_fnmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
@@ -3456,7 +3457,7 @@ __m256 test_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __
}
__m256 test_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
- // CHECK-LABEL: @test_mm256_mask3_fnmsub_ps
+ // CHECK-LABEL: test_mm256_mask3_fnmsub_ps
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: fneg <8 x float> %{{.*}}
// CHECK: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
@@ -3465,1006 +3466,1006 @@ __m256 test_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 _
}
__m128d test_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_add_pd
+ // CHECK-LABEL: test_mm_mask_add_pd
// CHECK: fadd <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_add_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_add_pd
+ // CHECK-LABEL: test_mm_maskz_add_pd
// CHECK: fadd <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_add_pd(__U,__A,__B);
}
__m256d test_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_add_pd
+ // CHECK-LABEL: test_mm256_mask_add_pd
// CHECK: fadd <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_add_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_add_pd
+ // CHECK-LABEL: test_mm256_maskz_add_pd
// CHECK: fadd <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_add_pd(__U,__A,__B);
}
__m128 test_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_add_ps
+ // CHECK-LABEL: test_mm_mask_add_ps
// CHECK: fadd <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_add_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_add_ps
+ // CHECK-LABEL: test_mm_maskz_add_ps
// CHECK: fadd <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_add_ps(__U,__A,__B);
}
__m256 test_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_add_ps
+ // CHECK-LABEL: test_mm256_mask_add_ps
// CHECK: fadd <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_add_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_add_ps
+ // CHECK-LABEL: test_mm256_maskz_add_ps
// CHECK: fadd <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_add_ps(__U,__A,__B);
}
__m128i test_mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W) {
- // CHECK-LABEL: @test_mm_mask_blend_epi32
+ // CHECK-LABEL: test_mm_mask_blend_epi32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_blend_epi32(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_epi32
+ // CHECK-LABEL: test_mm256_mask_blend_epi32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_blend_epi32(__U,__A,__W);
}
__m128d test_mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W) {
- // CHECK-LABEL: @test_mm_mask_blend_pd
+ // CHECK-LABEL: test_mm_mask_blend_pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_blend_pd(__U,__A,__W);
}
__m256d test_mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_pd
+ // CHECK-LABEL: test_mm256_mask_blend_pd
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_blend_pd(__U,__A,__W);
}
__m128 test_mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W) {
- // CHECK-LABEL: @test_mm_mask_blend_ps
+ // CHECK-LABEL: test_mm_mask_blend_ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_blend_ps(__U,__A,__W);
}
__m256 test_mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_ps
+ // CHECK-LABEL: test_mm256_mask_blend_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_blend_ps(__U,__A,__W);
}
__m128i test_mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W) {
- // CHECK-LABEL: @test_mm_mask_blend_epi64
+ // CHECK-LABEL: test_mm_mask_blend_epi64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_blend_epi64(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W) {
- // CHECK-LABEL: @test_mm256_mask_blend_epi64
+ // CHECK-LABEL: test_mm256_mask_blend_epi64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_blend_epi64(__U,__A,__W);
}
__m128d test_mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_compress_pd
+ // CHECK-LABEL: test_mm_mask_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_pd(__W,__U,__A);
}
__m128d test_mm_maskz_compress_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_pd
+ // CHECK-LABEL: test_mm_maskz_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_pd(__U,__A);
}
__m256d test_mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_pd
+ // CHECK-LABEL: test_mm256_mask_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_compress_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_pd
+ // CHECK-LABEL: test_mm256_maskz_compress_pd
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_pd(__U,__A);
}
__m128i test_mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compress_epi64
+ // CHECK-LABEL: test_mm_mask_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_epi64(__W,__U,__A);
}
__m128i test_mm_maskz_compress_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_epi64
+ // CHECK-LABEL: test_mm_maskz_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_epi64(__U,__A);
}
__m256i test_mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_epi64
+ // CHECK-LABEL: test_mm256_mask_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_epi64(__W,__U,__A);
}
__m256i test_mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_epi64
+ // CHECK-LABEL: test_mm256_maskz_compress_epi64
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_epi64(__U,__A);
}
__m128 test_mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_compress_ps
+ // CHECK-LABEL: test_mm_mask_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_ps(__W,__U,__A);
}
__m128 test_mm_maskz_compress_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_ps
+ // CHECK-LABEL: test_mm_maskz_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_ps(__U,__A);
}
__m256 test_mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_ps
+ // CHECK-LABEL: test_mm256_mask_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_compress_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_ps
+ // CHECK-LABEL: test_mm256_maskz_compress_ps
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_ps(__U,__A);
}
__m128i test_mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compress_epi32
+ // CHECK-LABEL: test_mm_mask_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_mask_compress_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_compress_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_compress_epi32
+ // CHECK-LABEL: test_mm_maskz_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm_maskz_compress_epi32(__U,__A);
}
__m256i test_mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compress_epi32
+ // CHECK-LABEL: test_mm256_mask_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_mask_compress_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_compress_epi32
+ // CHECK-LABEL: test_mm256_maskz_compress_epi32
// CHECK: @llvm.x86.avx512.mask.compress
return _mm256_maskz_compress_epi32(__U,__A);
}
void test_mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_pd
+ // CHECK-LABEL: test_mm_mask_compressstoreu_pd
// CHECK: @llvm.masked.compressstore.v2f64(<2 x double> %{{.*}}, ptr %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_compressstoreu_pd(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_pd
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_pd
// CHECK: @llvm.masked.compressstore.v4f64(<4 x double> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_compressstoreu_pd(__P,__U,__A);
}
void test_mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_epi64
+ // CHECK-LABEL: test_mm_mask_compressstoreu_epi64
// CHECK: @llvm.masked.compressstore.v2i64(<2 x i64> %{{.*}}, ptr %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_compressstoreu_epi64(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi64
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_epi64
// CHECK: @llvm.masked.compressstore.v4i64(<4 x i64> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_compressstoreu_epi64(__P,__U,__A);
}
void test_mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_ps
+ // CHECK-LABEL: test_mm_mask_compressstoreu_ps
// CHECK: @llvm.masked.compressstore.v4f32(<4 x float> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_compressstoreu_ps(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_ps
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_ps
// CHECK: @llvm.masked.compressstore.v8f32(<8 x float> %{{.*}}, ptr %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_compressstoreu_ps(__P,__U,__A);
}
void test_mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_compressstoreu_epi32
+ // CHECK-LABEL: test_mm_mask_compressstoreu_epi32
// CHECK: @llvm.masked.compressstore.v4i32(<4 x i32> %{{.*}}, ptr %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_compressstoreu_epi32(__P,__U,__A);
}
void test_mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi32
+ // CHECK-LABEL: test_mm256_mask_compressstoreu_epi32
// CHECK: @llvm.masked.compressstore.v8i32(<8 x i32> %{{.*}}, ptr %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_compressstoreu_epi32(__P,__U,__A);
}
__m128d test_mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_pd
+ // CHECK-LABEL: test_mm_mask_cvtepi32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtepi32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_pd
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtepi32_pd(__U,__A);
}
__m256d test_mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_pd
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_pd
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtepi32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_pd
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtepi32_pd(__U,__A);
}
__m128 test_mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_ps
+ // CHECK-LABEL: test_mm_mask_cvtepi32_ps
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm_mask_cvtepi32_ps(__W,__U,__A);
}
__m128 test_mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_ps
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_ps
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm_maskz_cvtepi32_ps(__U,__A);
}
__m256 test_mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_ps
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_ps
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> {{.*}}, <8 x float> {{.*}}, <8 x float> {{.*}}
return _mm256_mask_cvtepi32_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_ps
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> {{.*}}, <8 x float> {{.*}}, <8 x float> {{.*}}
return _mm256_maskz_cvtepi32_ps(__U,__A);
}
__m128i test_mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_epi32
+ // CHECK-LABEL: test_mm_mask_cvtpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvtpd2dq.128
return _mm_mask_cvtpd_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvtpd2dq.128
return _mm_maskz_cvtpd_epi32(__U,__A);
}
__m128i test_mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtpd_epi32
// CHECK: @llvm.x86.avx.cvt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_mask_cvtpd_epi32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_epi32
// CHECK: @llvm.x86.avx.cvt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_maskz_cvtpd_epi32(__U,__A);
}
__m128 test_mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_ps
+ // CHECK-LABEL: test_mm_mask_cvtpd_ps
// CHECK: @llvm.x86.avx512.mask.cvtpd2ps
return _mm_mask_cvtpd_ps(__W,__U,__A);
}
__m128 test_mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_ps
+ // CHECK-LABEL: test_mm_maskz_cvtpd_ps
// CHECK: @llvm.x86.avx512.mask.cvtpd2ps
return _mm_maskz_cvtpd_ps(__U,__A);
}
__m128 test_mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_ps
+ // CHECK-LABEL: test_mm256_mask_cvtpd_ps
// CHECK: @llvm.x86.avx.cvt.pd2.ps.256
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm256_mask_cvtpd_ps(__W,__U,__A);
}
__m128 test_mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_ps
// CHECK: @llvm.x86.avx.cvt.pd2.ps.256
// CHECK: select <4 x i1> {{.*}}, <4 x float> {{.*}}, <4 x float> {{.*}}
return _mm256_maskz_cvtpd_ps(__U,__A);
}
__m128i test_mm_cvtpd_epu32(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvtpd_epu32
+ // CHECK-LABEL: test_mm_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.128
return _mm_cvtpd_epu32(__A);
}
__m128i test_mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvtpd_epu32
+ // CHECK-LABEL: test_mm_mask_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.128
return _mm_mask_cvtpd_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtpd_epu32
+ // CHECK-LABEL: test_mm_maskz_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.128
return _mm_maskz_cvtpd_epu32(__U,__A);
}
__m128i test_mm256_cvtpd_epu32(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvtpd_epu32
+ // CHECK-LABEL: test_mm256_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.256
return _mm256_cvtpd_epu32(__A);
}
__m128i test_mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtpd_epu32
+ // CHECK-LABEL: test_mm256_mask_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.256
return _mm256_mask_cvtpd_epu32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtpd_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvtpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvtpd2udq.256
return _mm256_maskz_cvtpd_epu32(__U,__A);
}
__m128i test_mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_epi32
+ // CHECK-LABEL: test_mm_mask_cvtps_epi32
// CHECK: @llvm.x86.sse2.cvtps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_mask_cvtps_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtps_epi32
// CHECK: @llvm.x86.sse2.cvtps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_maskz_cvtps_epi32(__U,__A);
}
__m256i test_mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtps_epi32
// CHECK: @llvm.x86.avx.cvt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_mask_cvtps_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtps_epi32
// CHECK: @llvm.x86.avx.cvt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_maskz_cvtps_epi32(__U,__A);
}
__m128d test_mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_pd
+ // CHECK-LABEL: test_mm_mask_cvtps_pd
// CHECK: fpext <2 x float> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtps_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_pd
+ // CHECK-LABEL: test_mm_maskz_cvtps_pd
// CHECK: fpext <2 x float> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtps_pd(__U,__A);
}
__m256d test_mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_pd
+ // CHECK-LABEL: test_mm256_mask_cvtps_pd
// CHECK: fpext <4 x float> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtps_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtps_pd
// CHECK: fpext <4 x float> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtps_pd(__U,__A);
}
__m128i test_mm_cvtps_epu32(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvtps_epu32
+ // CHECK-LABEL: test_mm_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.128
return _mm_cvtps_epu32(__A);
}
__m128i test_mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_epu32
+ // CHECK-LABEL: test_mm_mask_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.128
return _mm_mask_cvtps_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_epu32
+ // CHECK-LABEL: test_mm_maskz_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.128
return _mm_maskz_cvtps_epu32(__U,__A);
}
__m256i test_mm256_cvtps_epu32(__m256 __A) {
- // CHECK-LABEL: @test_mm256_cvtps_epu32
+ // CHECK-LABEL: test_mm256_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.256
return _mm256_cvtps_epu32(__A);
}
__m256i test_mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_epu32
+ // CHECK-LABEL: test_mm256_mask_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.256
return _mm256_mask_cvtps_epu32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvtps_epu32
// CHECK: @llvm.x86.avx512.mask.cvtps2udq.256
return _mm256_maskz_cvtps_epu32(__U,__A);
}
__m128i test_mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvttpd_epi32
+ // CHECK-LABEL: test_mm_mask_cvttpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvttpd2dq.128
return _mm_mask_cvttpd_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttpd_epi32
+ // CHECK-LABEL: test_mm_maskz_cvttpd_epi32
// CHECK: @llvm.x86.avx512.mask.cvttpd2dq.128
return _mm_maskz_cvttpd_epi32(__U,__A);
}
__m128i test_mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttpd_epi32
+ // CHECK-LABEL: test_mm256_mask_cvttpd_epi32
// CHECK: @llvm.x86.avx.cvtt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_mask_cvttpd_epi32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttpd_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvttpd_epi32
// CHECK: @llvm.x86.avx.cvtt.pd2dq.256
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm256_maskz_cvttpd_epi32(__U,__A);
}
__m128i test_mm_cvttpd_epu32(__m128d __A) {
- // CHECK-LABEL: @test_mm_cvttpd_epu32
+ // CHECK-LABEL: test_mm_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.128
return _mm_cvttpd_epu32(__A);
}
__m128i test_mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_cvttpd_epu32
+ // CHECK-LABEL: test_mm_mask_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.128
return _mm_mask_cvttpd_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttpd_epu32
+ // CHECK-LABEL: test_mm_maskz_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.128
return _mm_maskz_cvttpd_epu32(__U,__A);
}
__m128i test_mm256_cvttpd_epu32(__m256d __A) {
- // CHECK-LABEL: @test_mm256_cvttpd_epu32
+ // CHECK-LABEL: test_mm256_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.256
return _mm256_cvttpd_epu32(__A);
}
__m128i test_mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttpd_epu32
+ // CHECK-LABEL: test_mm256_mask_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.256
return _mm256_mask_cvttpd_epu32(__W,__U,__A);
}
__m128i test_mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttpd_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvttpd_epu32
// CHECK: @llvm.x86.avx512.mask.cvttpd2udq.256
return _mm256_maskz_cvttpd_epu32(__U,__A);
}
__m128i test_mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvttps_epi32
+ // CHECK-LABEL: test_mm_mask_cvttps_epi32
// CHECK: @llvm.x86.sse2.cvttps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_mask_cvttps_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttps_epi32
+ // CHECK-LABEL: test_mm_maskz_cvttps_epi32
// CHECK: @llvm.x86.sse2.cvttps2dq
// CHECK: select <4 x i1> {{.*}}, <4 x i32> {{.*}}, <4 x i32> {{.*}}
return _mm_maskz_cvttps_epi32(__U,__A);
}
__m256i test_mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttps_epi32
+ // CHECK-LABEL: test_mm256_mask_cvttps_epi32
// CHECK: @llvm.x86.avx.cvtt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_mask_cvttps_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttps_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvttps_epi32
// CHECK: @llvm.x86.avx.cvtt.ps2dq.256
// CHECK: select <8 x i1> {{.*}}, <8 x i32> {{.*}}, <8 x i32> {{.*}}
return _mm256_maskz_cvttps_epi32(__U,__A);
}
__m128i test_mm_cvttps_epu32(__m128 __A) {
- // CHECK-LABEL: @test_mm_cvttps_epu32
+ // CHECK-LABEL: test_mm_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.128
return _mm_cvttps_epu32(__A);
}
__m128i test_mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvttps_epu32
+ // CHECK-LABEL: test_mm_mask_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.128
return _mm_mask_cvttps_epu32(__W,__U,__A);
}
__m128i test_mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvttps_epu32
+ // CHECK-LABEL: test_mm_maskz_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.128
return _mm_maskz_cvttps_epu32(__U,__A);
}
__m256i test_mm256_cvttps_epu32(__m256 __A) {
- // CHECK-LABEL: @test_mm256_cvttps_epu32
+ // CHECK-LABEL: test_mm256_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.256
return _mm256_cvttps_epu32(__A);
}
__m256i test_mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvttps_epu32
+ // CHECK-LABEL: test_mm256_mask_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.256
return _mm256_mask_cvttps_epu32(__W,__U,__A);
}
__m256i test_mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvttps_epu32
+ // CHECK-LABEL: test_mm256_maskz_cvttps_epu32
// CHECK: @llvm.x86.avx512.mask.cvttps2udq.256
return _mm256_maskz_cvttps_epu32(__U,__A);
}
__m128d test_mm_cvtepu32_pd(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepu32_pd
+ // CHECK-LABEL: test_mm_cvtepu32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
return _mm_cvtepu32_pd(__A);
}
__m128d test_mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu32_pd
+ // CHECK-LABEL: test_mm_mask_cvtepu32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtepu32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu32_pd
+ // CHECK-LABEL: test_mm_maskz_cvtepu32_pd
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtepu32_pd(__U,__A);
}
__m256d test_mm256_cvtepu32_pd(__m128i __A) {
- // CHECK-LABEL: @test_mm256_cvtepu32_pd
+ // CHECK-LABEL: test_mm256_cvtepu32_pd
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
return _mm256_cvtepu32_pd(__A);
}
__m256d test_mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu32_pd
+ // CHECK-LABEL: test_mm256_mask_cvtepu32_pd
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtepu32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu32_pd
+ // CHECK-LABEL: test_mm256_maskz_cvtepu32_pd
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtepu32_pd(__U,__A);
}
__m128 test_mm_cvtepu32_ps(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepu32_ps
+ // CHECK-LABEL: test_mm_cvtepu32_ps
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
return _mm_cvtepu32_ps(__A);
}
__m128 test_mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu32_ps
+ // CHECK-LABEL: test_mm_mask_cvtepu32_ps
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_cvtepu32_ps(__W,__U,__A);
}
__m128 test_mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu32_ps
+ // CHECK-LABEL: test_mm_maskz_cvtepu32_ps
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_cvtepu32_ps(__U,__A);
}
__m256 test_mm256_cvtepu32_ps(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepu32_ps
+ // CHECK-LABEL: test_mm256_cvtepu32_ps
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x float>
return _mm256_cvtepu32_ps(__A);
}
__m256 test_mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu32_ps
+ // CHECK-LABEL: test_mm256_mask_cvtepu32_ps
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_cvtepu32_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu32_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtepu32_ps
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_cvtepu32_ps(__U,__A);
}
__m128d test_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_div_pd
+ // CHECK-LABEL: test_mm_mask_div_pd
// CHECK: fdiv <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_div_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_div_pd
+ // CHECK-LABEL: test_mm_maskz_div_pd
// CHECK: fdiv <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_div_pd(__U,__A,__B);
}
__m256d test_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_div_pd
+ // CHECK-LABEL: test_mm256_mask_div_pd
// CHECK: fdiv <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_div_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_div_pd
+ // CHECK-LABEL: test_mm256_maskz_div_pd
// CHECK: fdiv <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_div_pd(__U,__A,__B);
}
__m128 test_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_div_ps
+ // CHECK-LABEL: test_mm_mask_div_ps
// CHECK: fdiv <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_div_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_div_ps
+ // CHECK-LABEL: test_mm_maskz_div_ps
// CHECK: fdiv <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_div_ps(__U,__A,__B);
}
__m256 test_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_div_ps
+ // CHECK-LABEL: test_mm256_mask_div_ps
// CHECK: fdiv <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_div_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_div_ps
+ // CHECK-LABEL: test_mm256_maskz_div_ps
// CHECK: fdiv <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_div_ps(__U,__A,__B);
}
__m128d test_mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_expand_pd
+ // CHECK-LABEL: test_mm_mask_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_pd(__W,__U,__A);
}
__m128d test_mm_maskz_expand_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_pd
+ // CHECK-LABEL: test_mm_maskz_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_pd(__U,__A);
}
__m256d test_mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_pd
+ // CHECK-LABEL: test_mm256_mask_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_expand_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_pd
+ // CHECK-LABEL: test_mm256_maskz_expand_pd
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_pd(__U,__A);
}
__m128i test_mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_expand_epi64
+ // CHECK-LABEL: test_mm_mask_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_epi64(__W,__U,__A);
}
__m128i test_mm_maskz_expand_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_epi64
+ // CHECK-LABEL: test_mm_maskz_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_epi64(__U,__A);
}
__m256i test_mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_epi64
+ // CHECK-LABEL: test_mm256_mask_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_epi64(__W,__U,__A);
}
__m256i test_mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_epi64
+ // CHECK-LABEL: test_mm256_maskz_expand_epi64
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_epi64(__U,__A);
}
__m128d test_mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_pd
+ // CHECK-LABEL: test_mm_mask_expandloadu_pd
// CHECK: @llvm.masked.expandload.v2f64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_expandloadu_pd(__W,__U,__P);
}
__m128d test_mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_pd
+ // CHECK-LABEL: test_mm_maskz_expandloadu_pd
// CHECK: @llvm.masked.expandload.v2f64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_expandloadu_pd(__U,__P);
}
__m256d test_mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_pd
+ // CHECK-LABEL: test_mm256_mask_expandloadu_pd
// CHECK: @llvm.masked.expandload.v4f64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_expandloadu_pd(__W,__U,__P);
}
__m256d test_mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_pd
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_pd
// CHECK: @llvm.masked.expandload.v4f64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_expandloadu_pd(__U,__P);
}
__m128i test_mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_epi64
+ // CHECK-LABEL: test_mm_mask_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v2i64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_expandloadu_epi64(__W,__U,__P);
}
__m128i test_mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_epi64
+ // CHECK-LABEL: test_mm_maskz_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v2i64(ptr %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_expandloadu_epi64(__U,__P);
}
__m256i test_mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_epi64
+ // CHECK-LABEL: test_mm256_mask_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v4i64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_expandloadu_epi64(__W,__U,__P);
}
__m256i test_mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi64
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_epi64
// CHECK: @llvm.masked.expandload.v4i64(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_expandloadu_epi64(__U,__P);
}
__m128 test_mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_ps
+ // CHECK-LABEL: test_mm_mask_expandloadu_ps
// CHECK: @llvm.masked.expandload.v4f32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_expandloadu_ps(__W,__U,__P);
}
__m128 test_mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_ps
+ // CHECK-LABEL: test_mm_maskz_expandloadu_ps
// CHECK: @llvm.masked.expandload.v4f32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_expandloadu_ps(__U,__P);
}
__m256 test_mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_ps
+ // CHECK-LABEL: test_mm256_mask_expandloadu_ps
// CHECK: @llvm.masked.expandload.v8f32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_expandloadu_ps(__W,__U,__P);
}
__m256 test_mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_ps
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_ps
// CHECK: @llvm.masked.expandload.v8f32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_expandloadu_ps(__U,__P);
}
__m128i test_mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_expandloadu_epi32
+ // CHECK-LABEL: test_mm_mask_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v4i32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_expandloadu_epi32(__W,__U,__P);
}
__m128i test_mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_expandloadu_epi32
+ // CHECK-LABEL: test_mm_maskz_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v4i32(ptr %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_expandloadu_epi32(__U,__P);
}
__m256i test_mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_expandloadu_epi32
+ // CHECK-LABEL: test_mm256_mask_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v8i32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_expandloadu_epi32(__W,__U,__P);
}
__m256i test_mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi32
+ // CHECK-LABEL: test_mm256_maskz_expandloadu_epi32
// CHECK: @llvm.masked.expandload.v8i32(ptr %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_expandloadu_epi32(__U,__P);
}
__m128 test_mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_expand_ps
+ // CHECK-LABEL: test_mm_mask_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_ps(__W,__U,__A);
}
__m128 test_mm_maskz_expand_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_ps
+ // CHECK-LABEL: test_mm_maskz_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_ps(__U,__A);
}
__m256 test_mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_ps
+ // CHECK-LABEL: test_mm256_mask_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_expand_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_ps
+ // CHECK-LABEL: test_mm256_maskz_expand_ps
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_ps(__U,__A);
}
__m128i test_mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_expand_epi32
+ // CHECK-LABEL: test_mm_mask_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_mask_expand_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_expand_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_expand_epi32
+ // CHECK-LABEL: test_mm_maskz_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm_maskz_expand_epi32(__U,__A);
}
__m256i test_mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_expand_epi32
+ // CHECK-LABEL: test_mm256_mask_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_mask_expand_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_expand_epi32
+ // CHECK-LABEL: test_mm256_maskz_expand_epi32
// CHECK: @llvm.x86.avx512.mask.expand
return _mm256_maskz_expand_epi32(__U,__A);
}
__m128d test_mm_getexp_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_getexp_pd
+ // CHECK-LABEL: test_mm_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.128
return _mm_getexp_pd(__A);
}
__m128d test_mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_getexp_pd
+ // CHECK-LABEL: test_mm_mask_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.128
return _mm_mask_getexp_pd(__W,__U,__A);
}
__m128d test_mm_maskz_getexp_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_getexp_pd
+ // CHECK-LABEL: test_mm_maskz_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.128
return _mm_maskz_getexp_pd(__U,__A);
}
__m256d test_mm256_getexp_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_getexp_pd
+ // CHECK-LABEL: test_mm256_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.256
return _mm256_getexp_pd(__A);
}
__m256d test_mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_getexp_pd
+ // CHECK-LABEL: test_mm256_mask_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.256
return _mm256_mask_getexp_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_getexp_pd
+ // CHECK-LABEL: test_mm256_maskz_getexp_pd
// CHECK: @llvm.x86.avx512.mask.getexp.pd.256
return _mm256_maskz_getexp_pd(__U,__A);
}
__m128 test_mm_getexp_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_getexp_ps
+ // CHECK-LABEL: test_mm_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.128
return _mm_getexp_ps(__A);
}
__m128 test_mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_getexp_ps
+ // CHECK-LABEL: test_mm_mask_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.128
return _mm_mask_getexp_ps(__W,__U,__A);
}
__m128 test_mm_maskz_getexp_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_getexp_ps
+ // CHECK-LABEL: test_mm_maskz_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.128
return _mm_maskz_getexp_ps(__U,__A);
}
__m256 test_mm256_getexp_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_getexp_ps
+ // CHECK-LABEL: test_mm256_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.256
return _mm256_getexp_ps(__A);
}
__m256 test_mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_getexp_ps
+ // CHECK-LABEL: test_mm256_mask_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.256
return _mm256_mask_getexp_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_getexp_ps
+ // CHECK-LABEL: test_mm256_maskz_getexp_ps
// CHECK: @llvm.x86.avx512.mask.getexp.ps.256
return _mm256_maskz_getexp_ps(__U,__A);
}
__m128d test_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_max_pd
+ // CHECK-LABEL: test_mm_mask_max_pd
// CHECK: @llvm.x86.sse2.max.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_max_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_max_pd
+ // CHECK-LABEL: test_mm_maskz_max_pd
// CHECK: @llvm.x86.sse2.max.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_max_pd(__U,__A,__B);
}
__m256d test_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_max_pd
+ // CHECK-LABEL: test_mm256_mask_max_pd
// CHECK: @llvm.x86.avx.max.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_max_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_pd
+ // CHECK-LABEL: test_mm256_maskz_max_pd
// CHECK: @llvm.x86.avx.max.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_max_pd(__U,__A,__B);
}
__m128 test_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_max_ps
+ // CHECK-LABEL: test_mm_mask_max_ps
// CHECK: @llvm.x86.sse.max.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_max_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_max_ps
+ // CHECK-LABEL: test_mm_maskz_max_ps
// CHECK: @llvm.x86.sse.max.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_max_ps(__U,__A,__B);
}
__m256 test_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_max_ps
+ // CHECK-LABEL: test_mm256_mask_max_ps
// CHECK: @llvm.x86.avx.max.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_max_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_ps
+ // CHECK-LABEL: test_mm256_maskz_max_ps
// CHECK: @llvm.x86.avx.max.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_max_ps(__U,__A,__B);
}
__m128d test_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_min_pd
+ // CHECK-LABEL: test_mm_mask_min_pd
// CHECK: @llvm.x86.sse2.min.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_min_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_min_pd
+ // CHECK-LABEL: test_mm_maskz_min_pd
// CHECK: @llvm.x86.sse2.min.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_min_pd(__U,__A,__B);
}
__m256d test_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_min_pd
+ // CHECK-LABEL: test_mm256_mask_min_pd
// CHECK: @llvm.x86.avx.min.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_min_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_pd
+ // CHECK-LABEL: test_mm256_maskz_min_pd
// CHECK: @llvm.x86.avx.min.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_min_pd(__U,__A,__B);
}
__m128 test_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_min_ps
+ // CHECK-LABEL: test_mm_mask_min_ps
// CHECK: @llvm.x86.sse.min.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_min_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_min_ps
+ // CHECK-LABEL: test_mm_maskz_min_ps
// CHECK: @llvm.x86.sse.min.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_min_ps(__U,__A,__B);
}
__m256 test_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_min_ps
+ // CHECK-LABEL: test_mm256_mask_min_ps
// CHECK: @llvm.x86.avx.min.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_min_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_ps
+ // CHECK-LABEL: test_mm256_maskz_min_ps
// CHECK: @llvm.x86.avx.min.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_min_ps(__U,__A,__B);
}
__m128d test_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_mul_pd
+ // CHECK-LABEL: test_mm_mask_mul_pd
// CHECK: fmul <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_mul_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_pd
+ // CHECK-LABEL: test_mm_maskz_mul_pd
// CHECK: fmul <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_mul_pd(__U,__A,__B);
}
__m256d test_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_mul_pd
+ // CHECK-LABEL: test_mm256_mask_mul_pd
// CHECK: fmul <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_mul_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_mul_pd
+ // CHECK-LABEL: test_mm256_maskz_mul_pd
// CHECK: fmul <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_mul_pd(__U,__A,__B);
}
__m128 test_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_mul_ps
+ // CHECK-LABEL: test_mm_mask_mul_ps
// CHECK: fmul <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_mul_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_mul_ps
+ // CHECK-LABEL: test_mm_maskz_mul_ps
// CHECK: fmul <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_mul_ps(__U,__A,__B);
}
__m256 test_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_mul_ps
+ // CHECK-LABEL: test_mm256_mask_mul_ps
// CHECK: fmul <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_mul_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_mul_ps
+ // CHECK-LABEL: test_mm256_maskz_mul_ps
// CHECK: fmul <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_mul_ps(__U,__A,__B);
}
__m128i test_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_abs_epi32
+ // CHECK-LABEL: test_mm_mask_abs_epi32
// CHECK: [[ABS:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4472,7 +4473,7 @@ __m128i test_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
return _mm_mask_abs_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_abs_epi32
+ // CHECK-LABEL: test_mm_maskz_abs_epi32
// CHECK: [[ABS:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[ABS]] to <2 x i64>
// CHECK: [[ABS:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4480,7 +4481,7 @@ __m128i test_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
return _mm_maskz_abs_epi32(__U,__A);
}
__m256i test_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_abs_epi32
+ // CHECK-LABEL: test_mm256_mask_abs_epi32
// CHECK: [[ABS:%.*]] = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4488,7 +4489,7 @@ __m256i test_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
return _mm256_mask_abs_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_abs_epi32
+ // CHECK-LABEL: test_mm256_maskz_abs_epi32
// CHECK: [[ABS:%.*]] = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %{{.*}}, i1 false)
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[ABS]] to <4 x i64>
// CHECK: [[ABS:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4496,41 +4497,41 @@ __m256i test_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
return _mm256_maskz_abs_epi32(__U,__A);
}
__m128i test_mm_abs_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_abs_epi64
- // CHECK: [[ABS:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
return _mm_abs_epi64(__A);
}
__m128i test_mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_abs_epi64
- // CHECK: [[ABS:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_mask_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> [[ABS]], <2 x i64> %{{.*}}
return _mm_mask_abs_epi64(__W,__U,__A);
}
__m128i test_mm_maskz_abs_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_abs_epi64
- // CHECK: [[ABS:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm_maskz_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<2 x i64> @llvm.abs.v2i64(<2 x i64> %{{.*}}, i1 false)
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> [[ABS]], <2 x i64> %{{.*}}
return _mm_maskz_abs_epi64(__U,__A);
}
__m256i test_mm256_abs_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_abs_epi64
- // CHECK: [[ABS:%.*]] = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
return _mm256_abs_epi64(__A);
}
__m256i test_mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_abs_epi64
- // CHECK: [[ABS:%.*]] = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_mask_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> [[ABS]], <4 x i64> %{{.*}}
return _mm256_mask_abs_epi64(__W,__U,__A);
}
__m256i test_mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_abs_epi64
- // CHECK: [[ABS:%.*]] = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
+ // CHECK-LABEL: test_mm256_maskz_abs_epi64
+ // CHECK: [[ABS:%.*]] = call {{.*}}<4 x i64> @llvm.abs.v4i64(<4 x i64> %{{.*}}, i1 false)
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> [[ABS]], <4 x i64> %{{.*}}
return _mm256_maskz_abs_epi64(__U,__A);
}
__m128i test_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epi32
+ // CHECK-LABEL: test_mm_maskz_max_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4538,7 +4539,7 @@ __m128i test_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epi32(__M,__A,__B);
}
__m128i test_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epi32
+ // CHECK-LABEL: test_mm_mask_max_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4546,7 +4547,7 @@ __m128i test_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_max_epi32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epi32
+ // CHECK-LABEL: test_mm256_maskz_max_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4554,7 +4555,7 @@ __m256i test_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epi32(__M,__A,__B);
}
__m256i test_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epi32
+ // CHECK-LABEL: test_mm256_mask_max_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4562,41 +4563,41 @@ __m256i test_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_max_epi32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_max_epi64(__M,__A,__B);
}
__m128i test_mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_max_epi64(__W,__M,__A,__B);
}
__m128i test_mm_max_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_max_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_max_epi64(__A,__B);
}
__m256i test_mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_max_epi64(__M,__A,__B);
}
__m256i test_mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_max_epi64(__W,__M,__A,__B);
}
__m256i test_mm256_max_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_max_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_max_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_max_epi64(__A,__B);
}
__m128i test_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epu32
+ // CHECK-LABEL: test_mm_maskz_max_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4604,7 +4605,7 @@ __m128i test_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_max_epu32(__M,__A,__B);
}
__m128i test_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epu32
+ // CHECK-LABEL: test_mm_mask_max_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4612,7 +4613,7 @@ __m128i test_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_max_epu32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epu32
+ // CHECK-LABEL: test_mm256_maskz_max_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4620,7 +4621,7 @@ __m256i test_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_max_epu32(__M,__A,__B);
}
__m256i test_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epu32
+ // CHECK-LABEL: test_mm256_mask_max_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4628,41 +4629,41 @@ __m256i test_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_max_epu32(__W,__M,__A,__B);
}
__m128i test_mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_max_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_max_epu64(__M,__A,__B);
}
__m128i test_mm_max_epu64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_max_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_max_epu64(__A,__B);
}
__m128i test_mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_max_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umax.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_max_epu64(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_max_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_max_epu64(__M,__A,__B);
}
__m256i test_mm256_max_epu64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_max_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_max_epu64(__A,__B);
}
__m256i test_mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_max_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_max_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umax.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_max_epu64(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epi32
+ // CHECK-LABEL: test_mm_maskz_min_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4670,7 +4671,7 @@ __m128i test_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epi32(__M,__A,__B);
}
__m128i test_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epi32
+ // CHECK-LABEL: test_mm_mask_min_epi32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4678,7 +4679,7 @@ __m128i test_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_min_epi32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epi32
+ // CHECK-LABEL: test_mm256_maskz_min_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4686,7 +4687,7 @@ __m256i test_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epi32(__M,__A,__B);
}
__m256i test_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epi32
+ // CHECK-LABEL: test_mm256_mask_min_epi32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4694,41 +4695,41 @@ __m256i test_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_min_epi32(__W,__M,__A,__B);
}
__m128i test_mm_min_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_min_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_min_epi64(__A,__B);
}
__m128i test_mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_min_epi64(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epi64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.smin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_min_epi64(__M,__A,__B);
}
__m256i test_mm256_min_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_min_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_min_epi64(__A,__B);
}
__m256i test_mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_min_epi64(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epi64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_min_epi64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.smin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_min_epi64(__M,__A,__B);
}
__m128i test_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epu32
+ // CHECK-LABEL: test_mm_maskz_min_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4736,7 +4737,7 @@ __m128i test_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
return _mm_maskz_min_epu32(__M,__A,__B);
}
__m128i test_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epu32
+ // CHECK-LABEL: test_mm_mask_min_epu32
// CHECK: [[RES:%.*]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
// CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
@@ -4744,7 +4745,7 @@ __m128i test_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i _
return _mm_mask_min_epu32(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epu32
+ // CHECK-LABEL: test_mm256_maskz_min_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4752,7 +4753,7 @@ __m256i test_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
return _mm256_maskz_min_epu32(__M,__A,__B);
}
__m256i test_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epu32
+ // CHECK-LABEL: test_mm256_mask_min_epu32
// CHECK: [[RES:%.*]] = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
// CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
// CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
@@ -4760,1115 +4761,1115 @@ __m256i test_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256
return _mm256_mask_min_epu32(__W,__M,__A,__B);
}
__m128i test_mm_min_epu64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_min_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_min_epu64(__A,__B);
}
__m128i test_mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_min_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_mask_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_mask_min_epu64(__W,__M,__A,__B);
}
__m128i test_mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_min_epu64
- // CHECK: [[RES:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_maskz_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<2 x i64> @llvm.umin.v2i64(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> {{.*}}, <2 x i64> [[RES]], <2 x i64> {{.*}}
return _mm_maskz_min_epu64(__M,__A,__B);
}
__m256i test_mm256_min_epu64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_min_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_min_epu64(__A,__B);
}
__m256i test_mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_min_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_mask_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_mask_min_epu64(__W,__M,__A,__B);
}
__m256i test_mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_min_epu64
- // CHECK: [[RES:%.*]] = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_maskz_min_epu64
+ // CHECK: [[RES:%.*]] = call {{.*}}<4 x i64> @llvm.umin.v4i64(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> {{.*}}, <4 x i64> [[RES]], <4 x i64> {{.*}}
return _mm256_maskz_min_epu64(__M,__A,__B);
}
__m128d test_mm_roundscale_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_roundscale_pd
+ // CHECK-LABEL: test_mm_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.128
return _mm_roundscale_pd(__A,4);
}
__m128d test_mm_mask_roundscale_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_roundscale_pd
+ // CHECK-LABEL: test_mm_mask_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.128
return _mm_mask_roundscale_pd(__W,__U,__A,4);
}
__m128d test_mm_maskz_roundscale_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_pd
+ // CHECK-LABEL: test_mm_maskz_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.128
return _mm_maskz_roundscale_pd(__U,__A,4);
}
__m256d test_mm256_roundscale_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_roundscale_pd
+ // CHECK-LABEL: test_mm256_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.256
return _mm256_roundscale_pd(__A,4);
}
__m256d test_mm256_mask_roundscale_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_roundscale_pd
+ // CHECK-LABEL: test_mm256_mask_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.256
return _mm256_mask_roundscale_pd(__W,__U,__A,4);
}
__m256d test_mm256_maskz_roundscale_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_roundscale_pd
+ // CHECK-LABEL: test_mm256_maskz_roundscale_pd
// CHECK: @llvm.x86.avx512.mask.rndscale.pd.256
return _mm256_maskz_roundscale_pd(__U,__A,4);
}
__m128 test_mm_roundscale_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_roundscale_ps
+ // CHECK-LABEL: test_mm_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.128
return _mm_roundscale_ps(__A,4);
}
__m128 test_mm_mask_roundscale_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_roundscale_ps
+ // CHECK-LABEL: test_mm_mask_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.128
return _mm_mask_roundscale_ps(__W,__U,__A,4);
}
__m128 test_mm_maskz_roundscale_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_roundscale_ps
+ // CHECK-LABEL: test_mm_maskz_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.128
return _mm_maskz_roundscale_ps(__U,__A, 4);
}
__m256 test_mm256_roundscale_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_roundscale_ps
+ // CHECK-LABEL: test_mm256_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.256
return _mm256_roundscale_ps(__A,4);
}
__m256 test_mm256_mask_roundscale_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_roundscale_ps
+ // CHECK-LABEL: test_mm256_mask_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.256
return _mm256_mask_roundscale_ps(__W,__U,__A,4);
}
__m256 test_mm256_maskz_roundscale_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_roundscale_ps
+ // CHECK-LABEL: test_mm256_maskz_roundscale_ps
// CHECK: @llvm.x86.avx512.mask.rndscale.ps.256
return _mm256_maskz_roundscale_ps(__U,__A,4);
}
__m128d test_mm_scalef_pd(__m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_scalef_pd
+ // CHECK-LABEL: test_mm_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.128
return _mm_scalef_pd(__A,__B);
}
__m128d test_mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_pd
+ // CHECK-LABEL: test_mm_mask_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.128
return _mm_mask_scalef_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_pd
+ // CHECK-LABEL: test_mm_maskz_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.128
return _mm_maskz_scalef_pd(__U,__A,__B);
}
__m256d test_mm256_scalef_pd(__m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_scalef_pd
+ // CHECK-LABEL: test_mm256_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.256
return _mm256_scalef_pd(__A,__B);
}
__m256d test_mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_scalef_pd
+ // CHECK-LABEL: test_mm256_mask_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.256
return _mm256_mask_scalef_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_scalef_pd
+ // CHECK-LABEL: test_mm256_maskz_scalef_pd
// CHECK: @llvm.x86.avx512.mask.scalef.pd.256
return _mm256_maskz_scalef_pd(__U,__A,__B);
}
__m128 test_mm_scalef_ps(__m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_scalef_ps
+ // CHECK-LABEL: test_mm_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.128
return _mm_scalef_ps(__A,__B);
}
__m128 test_mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_scalef_ps
+ // CHECK-LABEL: test_mm_mask_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.128
return _mm_mask_scalef_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_scalef_ps
+ // CHECK-LABEL: test_mm_maskz_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.128
return _mm_maskz_scalef_ps(__U,__A,__B);
}
__m256 test_mm256_scalef_ps(__m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_scalef_ps
+ // CHECK-LABEL: test_mm256_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.256
return _mm256_scalef_ps(__A,__B);
}
__m256 test_mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_scalef_ps
+ // CHECK-LABEL: test_mm256_mask_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.256
return _mm256_mask_scalef_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_scalef_ps
+ // CHECK-LABEL: test_mm256_maskz_scalef_ps
// CHECK: @llvm.x86.avx512.mask.scalef.ps.256
return _mm256_maskz_scalef_ps(__U,__A,__B);
}
void test_mm_i64scatter_pd(double *__addr, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_pd
+ // CHECK-LABEL: test_mm_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.df
return _mm_i64scatter_pd(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_pd(double *__addr, __mmask8 __mask, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_pd
+ // CHECK-LABEL: test_mm_mask_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.df
return _mm_mask_i64scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm_i64scatter_epi64(long long *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_epi64
+ // CHECK-LABEL: test_mm_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.di
return _mm_i64scatter_epi64(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_epi64(long long *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_epi64
+ // CHECK-LABEL: test_mm_mask_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv2.di
return _mm_mask_i64scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_pd(double *__addr, __m256i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_pd
+ // CHECK-LABEL: test_mm256_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.df
return _mm256_i64scatter_pd(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_pd(double *__addr, __mmask8 __mask, __m256i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_pd
+ // CHECK-LABEL: test_mm256_mask_i64scatter_pd
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.df
return _mm256_mask_i64scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_epi64(long long *__addr, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_epi64
+ // CHECK-LABEL: test_mm256_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.di
return _mm256_i64scatter_epi64(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_epi64(long long *__addr, __mmask8 __mask, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_epi64
+ // CHECK-LABEL: test_mm256_mask_i64scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.di
return _mm256_mask_i64scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm_i64scatter_ps(float *__addr, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_ps
+ // CHECK-LABEL: test_mm_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.sf
return _mm_i64scatter_ps(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_ps(float *__addr, __mmask8 __mask, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_ps
+ // CHECK-LABEL: test_mm_mask_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.sf
return _mm_mask_i64scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm_i64scatter_epi32(int *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i64scatter_epi32
+ // CHECK-LABEL: test_mm_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.si
return _mm_i64scatter_epi32(__addr,__index,__v1,2);
}
void test_mm_mask_i64scatter_epi32(int *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i64scatter_epi32
+ // CHECK-LABEL: test_mm_mask_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv4.si
return _mm_mask_i64scatter_epi32(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_ps(float *__addr, __m256i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_ps
+ // CHECK-LABEL: test_mm256_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.sf
return _mm256_i64scatter_ps(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_ps(float *__addr, __mmask8 __mask, __m256i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_ps
+ // CHECK-LABEL: test_mm256_mask_i64scatter_ps
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.sf
return _mm256_mask_i64scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm256_i64scatter_epi32(int *__addr, __m256i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm256_i64scatter_epi32
+ // CHECK-LABEL: test_mm256_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.si
return _mm256_i64scatter_epi32(__addr,__index,__v1,2);
}
void test_mm256_mask_i64scatter_epi32(int *__addr, __mmask8 __mask, __m256i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i64scatter_epi32
+ // CHECK-LABEL: test_mm256_mask_i64scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scatterdiv8.si
return _mm256_mask_i64scatter_epi32(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_pd(double *__addr, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_pd
+ // CHECK-LABEL: test_mm_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv2.df
return _mm_i32scatter_pd(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_pd(double *__addr, __mmask8 __mask, __m128i __index, __m128d __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_pd
+ // CHECK-LABEL: test_mm_mask_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv2.df
return _mm_mask_i32scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_epi64(long long *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_epi64
+ // CHECK-LABEL: test_mm_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv2.di
return _mm_i32scatter_epi64(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_epi64(long long *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_epi64
+ // CHECK-LABEL: test_mm_mask_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv2.di
return _mm_mask_i32scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_pd(double *__addr, __m128i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_pd
+ // CHECK-LABEL: test_mm256_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv4.df
return _mm256_i32scatter_pd(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_pd(double *__addr, __mmask8 __mask, __m128i __index, __m256d __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_pd
+ // CHECK-LABEL: test_mm256_mask_i32scatter_pd
// CHECK: @llvm.x86.avx512.mask.scattersiv4.df
return _mm256_mask_i32scatter_pd(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_epi64(long long *__addr, __m128i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_epi64
+ // CHECK-LABEL: test_mm256_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv4.di
return _mm256_i32scatter_epi64(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_epi64(long long *__addr, __mmask8 __mask, __m128i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_epi64
+ // CHECK-LABEL: test_mm256_mask_i32scatter_epi64
// CHECK: @llvm.x86.avx512.mask.scattersiv4.di
return _mm256_mask_i32scatter_epi64(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_ps(float *__addr, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_ps
+ // CHECK-LABEL: test_mm_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv4.sf
return _mm_i32scatter_ps(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_ps(float *__addr, __mmask8 __mask, __m128i __index, __m128 __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_ps
+ // CHECK-LABEL: test_mm_mask_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv4.sf
return _mm_mask_i32scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm_i32scatter_epi32(int *__addr, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_i32scatter_epi32
+ // CHECK-LABEL: test_mm_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv4.si
return _mm_i32scatter_epi32(__addr,__index,__v1,2);
}
void test_mm_mask_i32scatter_epi32(int *__addr, __mmask8 __mask, __m128i __index, __m128i __v1) {
- // CHECK-LABEL: @test_mm_mask_i32scatter_epi32
+ // CHECK-LABEL: test_mm_mask_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv4.si
return _mm_mask_i32scatter_epi32(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_ps(float *__addr, __m256i __index, __m256 __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_ps
+ // CHECK-LABEL: test_mm256_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv8.sf
return _mm256_i32scatter_ps(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_ps(float *__addr, __mmask8 __mask, __m256i __index, __m256 __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_ps
+ // CHECK-LABEL: test_mm256_mask_i32scatter_ps
// CHECK: @llvm.x86.avx512.mask.scattersiv8.sf
return _mm256_mask_i32scatter_ps(__addr,__mask,__index,__v1,2);
}
void test_mm256_i32scatter_epi32(int *__addr, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_i32scatter_epi32
+ // CHECK-LABEL: test_mm256_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv8.si
return _mm256_i32scatter_epi32(__addr,__index,__v1,2);
}
void test_mm256_mask_i32scatter_epi32(int *__addr, __mmask8 __mask, __m256i __index, __m256i __v1) {
- // CHECK-LABEL: @test_mm256_mask_i32scatter_epi32
+ // CHECK-LABEL: test_mm256_mask_i32scatter_epi32
// CHECK: @llvm.x86.avx512.mask.scattersiv8.si
return _mm256_mask_i32scatter_epi32(__addr,__mask,__index,__v1,2);
}
__m128d test_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_sqrt_pd
+ // CHECK-LABEL: test_mm_mask_sqrt_pd
// CHECK: @llvm.sqrt.v2f64
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_sqrt_pd(__W,__U,__A);
}
__m128d test_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_pd
+ // CHECK-LABEL: test_mm_maskz_sqrt_pd
// CHECK: @llvm.sqrt.v2f64
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_sqrt_pd(__U,__A);
}
__m256d test_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_sqrt_pd
+ // CHECK-LABEL: test_mm256_mask_sqrt_pd
// CHECK: @llvm.sqrt.v4f64
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_sqrt_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_sqrt_pd
+ // CHECK-LABEL: test_mm256_maskz_sqrt_pd
// CHECK: @llvm.sqrt.v4f64
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_sqrt_pd(__U,__A);
}
__m128 test_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_sqrt_ps
+ // CHECK-LABEL: test_mm_mask_sqrt_ps
// CHECK: @llvm.sqrt.v4f32
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_sqrt_ps(__W,__U,__A);
}
__m128 test_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_sqrt_ps
+ // CHECK-LABEL: test_mm_maskz_sqrt_ps
// CHECK: @llvm.sqrt.v4f32
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_sqrt_ps(__U,__A);
}
__m256 test_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_sqrt_ps
+ // CHECK-LABEL: test_mm256_mask_sqrt_ps
// CHECK: @llvm.sqrt.v8f32
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_sqrt_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_sqrt_ps
+ // CHECK-LABEL: test_mm256_maskz_sqrt_ps
// CHECK: @llvm.sqrt.v8f32
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_sqrt_ps(__U,__A);
}
__m128d test_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_sub_pd
+ // CHECK-LABEL: test_mm_mask_sub_pd
// CHECK: fsub <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_sub_pd(__W,__U,__A,__B);
}
__m128d test_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_pd
+ // CHECK-LABEL: test_mm_maskz_sub_pd
// CHECK: fsub <2 x double> %{{.*}}, %{{.*}}
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_sub_pd(__U,__A,__B);
}
__m256d test_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_sub_pd
+ // CHECK-LABEL: test_mm256_mask_sub_pd
// CHECK: fsub <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_sub_pd(__W,__U,__A,__B);
}
__m256d test_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_sub_pd
+ // CHECK-LABEL: test_mm256_maskz_sub_pd
// CHECK: fsub <4 x double> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_sub_pd(__U,__A,__B);
}
__m128 test_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_sub_ps
+ // CHECK-LABEL: test_mm_mask_sub_ps
// CHECK: fsub <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_sub_ps(__W,__U,__A,__B);
}
__m128 test_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_sub_ps
+ // CHECK-LABEL: test_mm_maskz_sub_ps
// CHECK: fsub <4 x float> %{{.*}}, %{{.*}}
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_sub_ps(__U,__A,__B);
}
__m256 test_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_sub_ps
+ // CHECK-LABEL: test_mm256_mask_sub_ps
// CHECK: fsub <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_sub_ps(__W,__U,__A,__B);
}
__m256 test_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_sub_ps
+ // CHECK-LABEL: test_mm256_maskz_sub_ps
// CHECK: fsub <8 x float> %{{.*}}, %{{.*}}
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_sub_ps(__U,__A,__B);
}
__m128i test_mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_epi32
+ // CHECK-LABEL: test_mm_mask2_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask2_permutex2var_epi32(__A,__I,__U,__B);
}
__m256i test_mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask2_permutex2var_epi32(__A,__I,__U,__B);
}
__m128d test_mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_pd
+ // CHECK-LABEL: test_mm_mask2_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask2_permutex2var_pd(__A,__I,__U,__B);
}
__m256d test_mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_pd
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask2_permutex2var_pd(__A,__I,__U,__B);
}
__m128 test_mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_ps
+ // CHECK-LABEL: test_mm_mask2_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask2_permutex2var_ps(__A,__I,__U,__B);
}
__m256 test_mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_ps
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask2_permutex2var_ps(__A,__I,__U,__B);
}
__m128i test_mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask2_permutex2var_epi64
+ // CHECK-LABEL: test_mm_mask2_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask2_permutex2var_epi64(__A,__I,__U,__B);
}
__m256i test_mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_mask2_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask2_permutex2var_epi64(__A,__I,__U,__B);
}
__m128i test_mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutex2var_epi32
+ // CHECK-LABEL: test_mm_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
return _mm_permutex2var_epi32(__A,__I,__B);
}
__m128i test_mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_epi32
+ // CHECK-LABEL: test_mm_mask_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_permutex2var_epi32(__A,__U,__I,__B);
}
__m128i test_mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_epi32
+ // CHECK-LABEL: test_mm_maskz_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_permutex2var_epi32(__U,__A,__I,__B);
}
__m256i test_mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
return _mm256_permutex2var_epi32(__A,__I,__B);
}
__m256i test_mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_mask_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_permutex2var_epi32(__A,__U,__I,__B);
}
__m256i test_mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi32
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_epi32
// CHECK: @llvm.x86.avx512.vpermi2var.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_permutex2var_epi32(__U,__A,__I,__B);
}
__m128d test_mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) {
- // CHECK-LABEL: @test_mm_permutex2var_pd
+ // CHECK-LABEL: test_mm_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
return _mm_permutex2var_pd(__A,__I,__B);
}
__m128d test_mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_pd
+ // CHECK-LABEL: test_mm_mask_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permutex2var_pd(__A,__U,__I,__B);
}
__m128d test_mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_pd
+ // CHECK-LABEL: test_mm_maskz_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.128
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permutex2var_pd(__U,__A,__I,__B);
}
__m256d test_mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_pd
+ // CHECK-LABEL: test_mm256_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
return _mm256_permutex2var_pd(__A,__I,__B);
}
__m256d test_mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_pd
+ // CHECK-LABEL: test_mm256_mask_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutex2var_pd(__A,__U,__I,__B);
}
__m256d test_mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_pd
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_pd
// CHECK: @llvm.x86.avx512.vpermi2var.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutex2var_pd(__U,__A,__I,__B);
}
__m128 test_mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) {
- // CHECK-LABEL: @test_mm_permutex2var_ps
+ // CHECK-LABEL: test_mm_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
return _mm_permutex2var_ps(__A,__I,__B);
}
__m128 test_mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_ps
+ // CHECK-LABEL: test_mm_mask_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permutex2var_ps(__A,__U,__I,__B);
}
__m128 test_mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_ps
+ // CHECK-LABEL: test_mm_maskz_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.128
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permutex2var_ps(__U,__A,__I,__B);
}
__m256 test_mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_ps
+ // CHECK-LABEL: test_mm256_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
return _mm256_permutex2var_ps(__A,__I,__B);
}
__m256 test_mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_ps
+ // CHECK-LABEL: test_mm256_mask_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permutex2var_ps(__A,__U,__I,__B);
}
__m256 test_mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_ps
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_ps
// CHECK: @llvm.x86.avx512.vpermi2var.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permutex2var_ps(__U,__A,__I,__B);
}
__m128i test_mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_permutex2var_epi64
+ // CHECK-LABEL: test_mm_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
return _mm_permutex2var_epi64(__A,__I,__B);
}
__m128i test_mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_permutex2var_epi64
+ // CHECK-LABEL: test_mm_mask_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_permutex2var_epi64(__A,__U,__I,__B);
}
__m128i test_mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_permutex2var_epi64
+ // CHECK-LABEL: test_mm_maskz_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_permutex2var_epi64(__U,__A,__I,__B);
}
__m256i test_mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
return _mm256_permutex2var_epi64(__A,__I,__B);
}
__m256i test_mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_mask_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutex2var_epi64(__A,__U,__I,__B);
}
__m256i test_mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi64
+ // CHECK-LABEL: test_mm256_maskz_permutex2var_epi64
// CHECK: @llvm.x86.avx512.vpermi2var.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutex2var_epi64(__U,__A,__I,__B);
}
__m128i test_mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi8_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi8_epi32
// CHECK: sext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepi8_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi8_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepi8_epi32
// CHECK: sext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepi8_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi8_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi8_epi32
// CHECK: sext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepi8_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi8_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepi8_epi32
// CHECK: sext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepi8_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi8_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepi8_epi64
// CHECK: sext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepi8_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi8_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepi8_epi64
// CHECK: sext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepi8_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi8_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepi8_epi64
// CHECK: sext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepi8_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi8_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepi8_epi64
// CHECK: sext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepi8_epi64(__U, __A);
}
__m128i test_mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepi32_epi64
// CHECK: sext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepi32_epi64(__W, __U, __X);
}
__m128i test_mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_epi64
// CHECK: sext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepi32_epi64(__U, __X);
}
__m256i test_mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_epi64
// CHECK: sext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepi32_epi64(__W, __U, __X);
}
__m256i test_mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_epi64
// CHECK: sext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepi32_epi64(__U, __X);
}
__m128i test_mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi16_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi16_epi32
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepi16_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi16_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepi16_epi32
// CHECK: sext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepi16_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi16_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi16_epi32
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepi16_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi16_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepi16_epi32
// CHECK: sext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepi16_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi16_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepi16_epi64
// CHECK: sext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepi16_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi16_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepi16_epi64
// CHECK: sext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepi16_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi16_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepi16_epi64
// CHECK: sext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepi16_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi16_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepi16_epi64
// CHECK: sext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepi16_epi64(__U, __A);
}
__m128i test_mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu8_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepu8_epi32
// CHECK: zext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepu8_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu8_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepu8_epi32
// CHECK: zext <4 x i8> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepu8_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu8_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepu8_epi32
// CHECK: zext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepu8_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu8_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepu8_epi32
// CHECK: zext <8 x i8> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepu8_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu8_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepu8_epi64
// CHECK: zext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepu8_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu8_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepu8_epi64
// CHECK: zext <2 x i8> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepu8_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu8_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepu8_epi64
// CHECK: zext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepu8_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu8_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepu8_epi64
// CHECK: zext <4 x i8> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepu8_epi64(__U, __A);
}
__m128i test_mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_mask_cvtepu32_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepu32_epi64
// CHECK: zext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepu32_epi64(__W, __U, __X);
}
__m128i test_mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu32_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepu32_epi64
// CHECK: zext <2 x i32> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepu32_epi64(__U, __X);
}
__m256i test_mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu32_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepu32_epi64
// CHECK: zext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepu32_epi64(__W, __U, __X);
}
__m256i test_mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu32_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepu32_epi64
// CHECK: zext <4 x i32> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepu32_epi64(__U, __X);
}
__m128i test_mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu16_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepu16_epi32
// CHECK: zext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_cvtepu16_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu16_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepu16_epi32
// CHECK: zext <4 x i16> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_cvtepu16_epi32(__U, __A);
}
__m256i test_mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu16_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepu16_epi32
// CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_cvtepu16_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu16_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepu16_epi32
// CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_cvtepu16_epi32(__U, __A);
}
__m128i test_mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepu16_epi64
+ // CHECK-LABEL: test_mm_mask_cvtepu16_epi64
// CHECK: zext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_cvtepu16_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepu16_epi64
+ // CHECK-LABEL: test_mm_maskz_cvtepu16_epi64
// CHECK: zext <2 x i16> %{{.*}} to <2 x i64>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_cvtepu16_epi64(__U, __A);
}
__m256i test_mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepu16_epi64
+ // CHECK-LABEL: test_mm256_mask_cvtepu16_epi64
// CHECK: zext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_cvtepu16_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepu16_epi64
+ // CHECK-LABEL: test_mm256_maskz_cvtepu16_epi64
// CHECK: zext <4 x i16> %{{.*}} to <4 x i64>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_cvtepu16_epi64(__U, __A);
}
__m128i test_mm_rol_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_rol_epi32
+ // CHECK-LABEL: test_mm_rol_epi32
// CHECK: @llvm.fshl.v4i32
return _mm_rol_epi32(__A, 5);
}
__m128i test_mm_mask_rol_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_rol_epi32
+ // CHECK-LABEL: test_mm_mask_rol_epi32
// CHECK: @llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_rol_epi32(__W, __U, __A, 5);
}
__m128i test_mm_maskz_rol_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_rol_epi32
+ // CHECK-LABEL: test_mm_maskz_rol_epi32
// CHECK: @llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_rol_epi32(__U, __A, 5);
}
__m256i test_mm256_rol_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_rol_epi32
+ // CHECK-LABEL: test_mm256_rol_epi32
// CHECK: @llvm.fshl.v8i32
return _mm256_rol_epi32(__A, 5);
}
__m256i test_mm256_mask_rol_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_rol_epi32
+ // CHECK-LABEL: test_mm256_mask_rol_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_rol_epi32(__W, __U, __A, 5);
}
__m256i test_mm256_maskz_rol_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_rol_epi32
+ // CHECK-LABEL: test_mm256_maskz_rol_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_rol_epi32(__U, __A, 5);
}
__m128i test_mm_rol_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_rol_epi64
+ // CHECK-LABEL: test_mm_rol_epi64
// CHECK: @llvm.fshl.v2i64
return _mm_rol_epi64(__A, 5);
}
__m128i test_mm_mask_rol_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_rol_epi64
+ // CHECK-LABEL: test_mm_mask_rol_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_rol_epi64(__W, __U, __A, 5);
}
__m128i test_mm_maskz_rol_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_rol_epi64
+ // CHECK-LABEL: test_mm_maskz_rol_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_rol_epi64(__U, __A, 5);
}
__m256i test_mm256_rol_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_rol_epi64
+ // CHECK-LABEL: test_mm256_rol_epi64
// CHECK: @llvm.fshl.v4i64
return _mm256_rol_epi64(__A, 5);
}
__m256i test_mm256_mask_rol_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_rol_epi64
+ // CHECK-LABEL: test_mm256_mask_rol_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_rol_epi64(__W, __U, __A, 5);
}
__m256i test_mm256_maskz_rol_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_rol_epi64
+ // CHECK-LABEL: test_mm256_maskz_rol_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_rol_epi64(__U, __A, 5);
}
__m128i test_mm_rolv_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rolv_epi32
+ // CHECK-LABEL: test_mm_rolv_epi32
// CHECK: llvm.fshl.v4i32
return _mm_rolv_epi32(__A, __B);
}
__m128i test_mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rolv_epi32
+ // CHECK-LABEL: test_mm_mask_rolv_epi32
// CHECK: llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_rolv_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rolv_epi32
+ // CHECK-LABEL: test_mm_maskz_rolv_epi32
// CHECK: llvm.fshl.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_rolv_epi32(__U, __A, __B);
}
__m256i test_mm256_rolv_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rolv_epi32
+ // CHECK-LABEL: test_mm256_rolv_epi32
// CHECK: @llvm.fshl.v8i32
return _mm256_rolv_epi32(__A, __B);
}
__m256i test_mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rolv_epi32
+ // CHECK-LABEL: test_mm256_mask_rolv_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_rolv_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rolv_epi32
+ // CHECK-LABEL: test_mm256_maskz_rolv_epi32
// CHECK: @llvm.fshl.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_rolv_epi32(__U, __A, __B);
}
__m128i test_mm_rolv_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rolv_epi64
+ // CHECK-LABEL: test_mm_rolv_epi64
// CHECK: @llvm.fshl.v2i64
return _mm_rolv_epi64(__A, __B);
}
__m128i test_mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rolv_epi64
+ // CHECK-LABEL: test_mm_mask_rolv_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_rolv_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rolv_epi64
+ // CHECK-LABEL: test_mm_maskz_rolv_epi64
// CHECK: @llvm.fshl.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_rolv_epi64(__U, __A, __B);
}
__m256i test_mm256_rolv_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rolv_epi64
+ // CHECK-LABEL: test_mm256_rolv_epi64
// CHECK: @llvm.fshl.v4i64
return _mm256_rolv_epi64(__A, __B);
}
__m256i test_mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rolv_epi64
+ // CHECK-LABEL: test_mm256_mask_rolv_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_rolv_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rolv_epi64
+ // CHECK-LABEL: test_mm256_maskz_rolv_epi64
// CHECK: @llvm.fshl.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_rolv_epi64(__U, __A, __B);
}
__m128i test_mm_ror_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_ror_epi32
+ // CHECK-LABEL: test_mm_ror_epi32
// CHECK: @llvm.fshr.v4i32
return _mm_ror_epi32(__A, 5);
}
__m128i test_mm_mask_ror_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_ror_epi32
+ // CHECK-LABEL: test_mm_mask_ror_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_ror_epi32(__W, __U, __A, 5);
}
__m128i test_mm_maskz_ror_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_ror_epi32
+ // CHECK-LABEL: test_mm_maskz_ror_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_ror_epi32(__U, __A, 5);
}
__m256i test_mm256_ror_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_ror_epi32
+ // CHECK-LABEL: test_mm256_ror_epi32
// CHECK: @llvm.fshr.v8i32
return _mm256_ror_epi32(__A, 5);
}
__m256i test_mm256_mask_ror_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_ror_epi32
+ // CHECK-LABEL: test_mm256_mask_ror_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_ror_epi32(__W, __U, __A, 5);
}
__m256i test_mm256_maskz_ror_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_ror_epi32
+ // CHECK-LABEL: test_mm256_maskz_ror_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_ror_epi32(__U, __A, 5);
}
__m128i test_mm_ror_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_ror_epi64
+ // CHECK-LABEL: test_mm_ror_epi64
// CHECK: @llvm.fshr.v2i64
return _mm_ror_epi64(__A, 5);
}
__m128i test_mm_mask_ror_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_ror_epi64
+ // CHECK-LABEL: test_mm_mask_ror_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_ror_epi64(__W, __U, __A, 5);
}
__m128i test_mm_maskz_ror_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_ror_epi64
+ // CHECK-LABEL: test_mm_maskz_ror_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_ror_epi64(__U, __A, 5);
}
__m256i test_mm256_ror_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_ror_epi64
+ // CHECK-LABEL: test_mm256_ror_epi64
// CHECK: @llvm.fshr.v4i64
return _mm256_ror_epi64(__A, 5);
}
__m256i test_mm256_mask_ror_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_ror_epi64
+ // CHECK-LABEL: test_mm256_mask_ror_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_ror_epi64(__W, __U, __A,5);
}
__m256i test_mm256_maskz_ror_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_ror_epi64
+ // CHECK-LABEL: test_mm256_maskz_ror_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_ror_epi64(__U, __A, 5);
@@ -5876,798 +5877,798 @@ __m256i test_mm256_maskz_ror_epi64(__mmask8 __U, __m256i __A) {
__m128i test_mm_rorv_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rorv_epi32
+ // CHECK-LABEL: test_mm_rorv_epi32
// CHECK: @llvm.fshr.v4i32
return _mm_rorv_epi32(__A, __B);
}
__m128i test_mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rorv_epi32
+ // CHECK-LABEL: test_mm_mask_rorv_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_rorv_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rorv_epi32
+ // CHECK-LABEL: test_mm_maskz_rorv_epi32
// CHECK: @llvm.fshr.v4i32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_rorv_epi32(__U, __A, __B);
}
__m256i test_mm256_rorv_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rorv_epi32
+ // CHECK-LABEL: test_mm256_rorv_epi32
// CHECK: @llvm.fshr.v8i32
return _mm256_rorv_epi32(__A, __B);
}
__m256i test_mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rorv_epi32
+ // CHECK-LABEL: test_mm256_mask_rorv_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_rorv_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rorv_epi32
+ // CHECK-LABEL: test_mm256_maskz_rorv_epi32
// CHECK: @llvm.fshr.v8i32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_rorv_epi32(__U, __A, __B);
}
__m128i test_mm_rorv_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_rorv_epi64
+ // CHECK-LABEL: test_mm_rorv_epi64
// CHECK: @llvm.fshr.v2i64
return _mm_rorv_epi64(__A, __B);
}
__m128i test_mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_rorv_epi64
+ // CHECK-LABEL: test_mm_mask_rorv_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_rorv_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_rorv_epi64
+ // CHECK-LABEL: test_mm_maskz_rorv_epi64
// CHECK: @llvm.fshr.v2i64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_rorv_epi64(__U, __A, __B);
}
__m256i test_mm256_rorv_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_rorv_epi64
+ // CHECK-LABEL: test_mm256_rorv_epi64
// CHECK: @llvm.fshr.v4i64
return _mm256_rorv_epi64(__A, __B);
}
__m256i test_mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_rorv_epi64
+ // CHECK-LABEL: test_mm256_mask_rorv_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_rorv_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_rorv_epi64
+ // CHECK-LABEL: test_mm256_maskz_rorv_epi64
// CHECK: @llvm.fshr.v4i64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_rorv_epi64(__U, __A, __B);
}
__m128i test_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_sllv_epi64
+ // CHECK-LABEL: test_mm_mask_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sllv_epi64(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_sllv_epi64
+ // CHECK-LABEL: test_mm_maskz_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sllv_epi64(__U, __X, __Y);
}
__m256i test_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_sllv_epi64
+ // CHECK-LABEL: test_mm256_mask_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sllv_epi64(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_sllv_epi64
+ // CHECK-LABEL: test_mm256_maskz_sllv_epi64
// CHECK: @llvm.x86.avx2.psllv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sllv_epi64(__U, __X, __Y);
}
__m128i test_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_sllv_epi32
+ // CHECK-LABEL: test_mm_mask_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sllv_epi32(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_sllv_epi32
+ // CHECK-LABEL: test_mm_maskz_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sllv_epi32(__U, __X, __Y);
}
__m256i test_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_sllv_epi32
+ // CHECK-LABEL: test_mm256_mask_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sllv_epi32(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_sllv_epi32
+ // CHECK-LABEL: test_mm256_maskz_sllv_epi32
// CHECK: @llvm.x86.avx2.psllv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sllv_epi32(__U, __X, __Y);
}
__m128i test_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srlv_epi64
+ // CHECK-LABEL: test_mm_mask_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srlv_epi64(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srlv_epi64
+ // CHECK-LABEL: test_mm_maskz_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srlv_epi64(__U, __X, __Y);
}
__m256i test_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srlv_epi64
+ // CHECK-LABEL: test_mm256_mask_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srlv_epi64(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srlv_epi64
+ // CHECK-LABEL: test_mm256_maskz_srlv_epi64
// CHECK: @llvm.x86.avx2.psrlv.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srlv_epi64(__U, __X, __Y);
}
__m128i test_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srlv_epi32
+ // CHECK-LABEL: test_mm_mask_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srlv_epi32(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srlv_epi32
+ // CHECK-LABEL: test_mm_maskz_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srlv_epi32(__U, __X, __Y);
}
__m256i test_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srlv_epi32
+ // CHECK-LABEL: test_mm256_mask_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srlv_epi32(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srlv_epi32
+ // CHECK-LABEL: test_mm256_maskz_srlv_epi32
// CHECK: @llvm.x86.avx2.psrlv.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srlv_epi32(__U, __X, __Y);
}
__m128i test_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srl_epi32
+ // CHECK-LABEL: test_mm_mask_srl_epi32
// CHECK: @llvm.x86.sse2.psrl.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srl_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srl_epi32
+ // CHECK-LABEL: test_mm_maskz_srl_epi32
// CHECK: @llvm.x86.sse2.psrl.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srl_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_srl_epi32
+ // CHECK-LABEL: test_mm256_mask_srl_epi32
// CHECK: @llvm.x86.avx2.psrl.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srl_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srl_epi32
+ // CHECK-LABEL: test_mm256_maskz_srl_epi32
// CHECK: @llvm.x86.avx2.psrl.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srl_epi32(__U, __A, __B);
}
__m128i test_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srli_epi32
+ // CHECK-LABEL: test_mm_mask_srli_epi32
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srli_epi32(__W, __U, __A, 5);
}
__m128i test_mm_mask_srli_epi32_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srli_epi32_2
+ // CHECK-LABEL: test_mm_mask_srli_epi32_2
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srli_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi32
+ // CHECK-LABEL: test_mm_maskz_srli_epi32
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srli_epi32(__U, __A, 5);
}
__m128i test_mm_maskz_srli_epi32_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi32_2
+ // CHECK-LABEL: test_mm_maskz_srli_epi32_2
// CHECK: @llvm.x86.sse2.psrli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srli_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi32
+ // CHECK-LABEL: test_mm256_mask_srli_epi32
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srli_epi32(__W, __U, __A, 5);
}
__m256i test_mm256_mask_srli_epi32_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi32_2
+ // CHECK-LABEL: test_mm256_mask_srli_epi32_2
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srli_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi32
+ // CHECK-LABEL: test_mm256_maskz_srli_epi32
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srli_epi32(__U, __A, 5);
}
__m256i test_mm256_maskz_srli_epi32_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi32_2
+ // CHECK-LABEL: test_mm256_maskz_srli_epi32_2
// CHECK: @llvm.x86.avx2.psrli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srli_epi32(__U, __A, __B);
}
__m128i test_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_srl_epi64
+ // CHECK-LABEL: test_mm_mask_srl_epi64
// CHECK: @llvm.x86.sse2.psrl.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srl_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_srl_epi64
+ // CHECK-LABEL: test_mm_maskz_srl_epi64
// CHECK: @llvm.x86.sse2.psrl.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srl_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_srl_epi64
+ // CHECK-LABEL: test_mm256_mask_srl_epi64
// CHECK: @llvm.x86.avx2.psrl.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srl_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_srl_epi64
+ // CHECK-LABEL: test_mm256_maskz_srl_epi64
// CHECK: @llvm.x86.avx2.psrl.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srl_epi64(__U, __A, __B);
}
__m128i test_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srli_epi64
+ // CHECK-LABEL: test_mm_mask_srli_epi64
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srli_epi64(__W, __U, __A, 5);
}
__m128i test_mm_mask_srli_epi64_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srli_epi64_2
+ // CHECK-LABEL: test_mm_mask_srli_epi64_2
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srli_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi64
+ // CHECK-LABEL: test_mm_maskz_srli_epi64
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srli_epi64(__U, __A, 5);
}
__m128i test_mm_maskz_srli_epi64_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srli_epi64_2
+ // CHECK-LABEL: test_mm_maskz_srli_epi64_2
// CHECK: @llvm.x86.sse2.psrli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srli_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi64
+ // CHECK-LABEL: test_mm256_mask_srli_epi64
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srli_epi64(__W, __U, __A, 5);
}
__m256i test_mm256_mask_srli_epi64_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srli_epi64_2
+ // CHECK-LABEL: test_mm256_mask_srli_epi64_2
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srli_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi64
+ // CHECK-LABEL: test_mm256_maskz_srli_epi64
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srli_epi64(__U, __A, 5);
}
__m256i test_mm256_maskz_srli_epi64_2(__mmask8 __U,__m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srli_epi64_2
+ // CHECK-LABEL: test_mm256_maskz_srli_epi64_2
// CHECK: @llvm.x86.avx2.psrli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srli_epi64(__U, __A, __B);
}
__m128i test_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sll_epi32
+ // CHECK-LABEL: test_mm_mask_sll_epi32
// CHECK: @llvm.x86.sse2.psll.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sll_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sll_epi32
+ // CHECK-LABEL: test_mm_maskz_sll_epi32
// CHECK: @llvm.x86.sse2.psll.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sll_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sll_epi32
+ // CHECK-LABEL: test_mm256_mask_sll_epi32
// CHECK: @llvm.x86.avx2.psll.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sll_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sll_epi32
+ // CHECK-LABEL: test_mm256_maskz_sll_epi32
// CHECK: @llvm.x86.avx2.psll.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sll_epi32(__U, __A, __B);
}
__m128i test_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_slli_epi32
+ // CHECK-LABEL: test_mm_mask_slli_epi32
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_slli_epi32(__W, __U, __A, 5);
}
__m128i test_mm_mask_slli_epi32_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_slli_epi32_2
+ // CHECK-LABEL: test_mm_mask_slli_epi32_2
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_slli_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi32
+ // CHECK-LABEL: test_mm_maskz_slli_epi32
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_slli_epi32(__U, __A, 5);
}
__m128i test_mm_maskz_slli_epi32_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi32_2
+ // CHECK-LABEL: test_mm_maskz_slli_epi32_2
// CHECK: @llvm.x86.sse2.pslli.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_slli_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi32
+ // CHECK-LABEL: test_mm256_mask_slli_epi32
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_slli_epi32(__W, __U, __A, 5);
}
__m256i test_mm256_mask_slli_epi32_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi32_2
+ // CHECK-LABEL: test_mm256_mask_slli_epi32_2
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_slli_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi32
+ // CHECK-LABEL: test_mm256_maskz_slli_epi32
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_slli_epi32(__U, __A, 5);
}
__m256i test_mm256_maskz_slli_epi32_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi32_2
+ // CHECK-LABEL: test_mm256_maskz_slli_epi32_2
// CHECK: @llvm.x86.avx2.pslli.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_slli_epi32(__U, __A, __B);
}
__m128i test_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sll_epi64
+ // CHECK-LABEL: test_mm_mask_sll_epi64
// CHECK: @llvm.x86.sse2.psll.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sll_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sll_epi64
+ // CHECK-LABEL: test_mm_maskz_sll_epi64
// CHECK: @llvm.x86.sse2.psll.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sll_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sll_epi64
+ // CHECK-LABEL: test_mm256_mask_sll_epi64
// CHECK: @llvm.x86.avx2.psll.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sll_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sll_epi64
+ // CHECK-LABEL: test_mm256_maskz_sll_epi64
// CHECK: @llvm.x86.avx2.psll.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sll_epi64(__U, __A, __B);
}
__m128i test_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_slli_epi64
+ // CHECK-LABEL: test_mm_mask_slli_epi64
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_slli_epi64(__W, __U, __A, 5);
}
__m128i test_mm_mask_slli_epi64_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_slli_epi64_2
+ // CHECK-LABEL: test_mm_mask_slli_epi64_2
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_slli_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi64
+ // CHECK-LABEL: test_mm_maskz_slli_epi64
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_slli_epi64(__U, __A, 5);
}
__m128i test_mm_maskz_slli_epi64_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_slli_epi64_2
+ // CHECK-LABEL: test_mm_maskz_slli_epi64_2
// CHECK: @llvm.x86.sse2.pslli.q
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_slli_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi64
+ // CHECK-LABEL: test_mm256_mask_slli_epi64
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_slli_epi64(__W, __U, __A, 5);
}
__m256i test_mm256_mask_slli_epi64_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_slli_epi64_2
+ // CHECK-LABEL: test_mm256_mask_slli_epi64_2
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_slli_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi64
+ // CHECK-LABEL: test_mm256_maskz_slli_epi64
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_slli_epi64(__U, __A, 5);
}
__m256i test_mm256_maskz_slli_epi64_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_slli_epi64_2
+ // CHECK-LABEL: test_mm256_maskz_slli_epi64_2
// CHECK: @llvm.x86.avx2.pslli.q
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_slli_epi64(__U, __A, __B);
}
__m128i test_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srav_epi32
+ // CHECK-LABEL: test_mm_mask_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srav_epi32(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srav_epi32
+ // CHECK-LABEL: test_mm_maskz_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srav_epi32(__U, __X, __Y);
}
__m256i test_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srav_epi32
+ // CHECK-LABEL: test_mm256_mask_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srav_epi32(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srav_epi32
+ // CHECK-LABEL: test_mm256_maskz_srav_epi32
// CHECK: @llvm.x86.avx2.psrav.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srav_epi32(__U, __X, __Y);
}
__m128i test_mm_srav_epi64(__m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_srav_epi64
+ // CHECK-LABEL: test_mm_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.128
return _mm_srav_epi64(__X, __Y);
}
__m128i test_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_mask_srav_epi64
+ // CHECK-LABEL: test_mm_mask_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srav_epi64(__W, __U, __X, __Y);
}
__m128i test_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y) {
- // CHECK-LABEL: @test_mm_maskz_srav_epi64
+ // CHECK-LABEL: test_mm_maskz_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srav_epi64(__U, __X, __Y);
}
__m256i test_mm256_srav_epi64(__m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_srav_epi64
+ // CHECK-LABEL: test_mm256_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.256
return _mm256_srav_epi64(__X, __Y);
}
__m256i test_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_srav_epi64
+ // CHECK-LABEL: test_mm256_mask_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srav_epi64(__W, __U, __X, __Y);
}
__m256i test_mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_srav_epi64
+ // CHECK-LABEL: test_mm256_maskz_srav_epi64
// CHECK: @llvm.x86.avx512.psrav.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srav_epi64(__U, __X, __Y);
}
void test_mm_store_epi32(void *__P, __m128i __A) {
- // CHECK-LABEL: @test_mm_store_epi32
+ // CHECK-LABEL: test_mm_store_epi32
// CHECK: store <2 x i64> %{{.*}}, ptr %{{.*}}
return _mm_store_epi32(__P, __A);
}
void test_mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_store_epi32
+ // CHECK-LABEL: test_mm_mask_store_epi32
// CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.}}, i32 16, <4 x i1> %{{.*}})
return _mm_mask_store_epi32(__P, __U, __A);
}
void test_mm256_store_epi32(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_store_epi32
+ // CHECK-LABEL: test_mm256_store_epi32
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}
return _mm256_store_epi32(__P, __A);
}
void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_store_epi32
+ // CHECK-LABEL: test_mm256_mask_store_epi32
// CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.}}, i32 32, <8 x i1> %{{.*}})
return _mm256_mask_store_epi32(__P, __U, __A);
}
__m128i test_mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_mov_epi32
+ // CHECK-LABEL: test_mm_mask_mov_epi32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_mov_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_epi32
+ // CHECK-LABEL: test_mm_maskz_mov_epi32
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_mov_epi32(__U, __A);
}
__m256i test_mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_epi32
+ // CHECK-LABEL: test_mm256_mask_mov_epi32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_mov_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_epi32
+ // CHECK-LABEL: test_mm256_maskz_mov_epi32
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_mov_epi32(__U, __A);
}
__m128i test_mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_mov_epi64
+ // CHECK-LABEL: test_mm_mask_mov_epi64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_mov_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_epi64
+ // CHECK-LABEL: test_mm_maskz_mov_epi64
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_mov_epi64(__U, __A);
}
__m256i test_mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_epi64
+ // CHECK-LABEL: test_mm256_mask_mov_epi64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_mov_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_epi64
+ // CHECK-LABEL: test_mm256_maskz_mov_epi64
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_mov_epi64(__U, __A);
}
__m128i test_mm_load_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm_load_epi32
+ // CHECK-LABEL: test_mm_load_epi32
// CHECK: load <2 x i64>, ptr %{{.*}}
return _mm_load_epi32(__P);
}
__m128i test_mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_epi32
+ // CHECK-LABEL: test_mm_mask_load_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_load_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_epi32
+ // CHECK-LABEL: test_mm_maskz_load_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_load_epi32(__U, __P);
}
__m256i test_mm256_load_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm256_load_epi32
+ // CHECK-LABEL: test_mm256_load_epi32
// CHECK: load <4 x i64>, ptr %{{.*}}
return _mm256_load_epi32(__P);
}
__m256i test_mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_epi32
+ // CHECK-LABEL: test_mm256_mask_load_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_load_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_epi32
+ // CHECK-LABEL: test_mm256_maskz_load_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_load_epi32(__U, __P);
}
__m128i test_mm_load_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm_load_epi64
+ // CHECK-LABEL: test_mm_load_epi64
// CHECK: load <2 x i64>, ptr %{{.*}}
return _mm_load_epi64(__P);
}
__m128i test_mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_epi64
+ // CHECK-LABEL: test_mm_mask_load_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_load_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_epi64
+ // CHECK-LABEL: test_mm_maskz_load_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_load_epi64(__U, __P);
}
__m256i test_mm256_load_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm256_load_epi64
+ // CHECK-LABEL: test_mm256_load_epi64
// CHECK: load <4 x i64>, ptr %{{.*}}
return _mm256_load_epi64(__P);
}
__m256i test_mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_epi64
+ // CHECK-LABEL: test_mm256_mask_load_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_load_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_epi64
+ // CHECK-LABEL: test_mm256_maskz_load_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_load_epi64(__U, __P);
}
void test_mm_store_epi64(void *__P, __m128i __A) {
- // CHECK-LABEL: @test_mm_store_epi64
+ // CHECK-LABEL: test_mm_store_epi64
// CHECK: store <2 x i64> %{{.*}}, ptr %{{.*}}
return _mm_store_epi64(__P, __A);
}
void test_mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_store_epi64
+ // CHECK-LABEL: test_mm_mask_store_epi64
// CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
return _mm_mask_store_epi64(__P, __U, __A);
}
void test_mm256_store_epi64(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_store_epi64
+ // CHECK-LABEL: test_mm256_store_epi64
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}
return _mm256_store_epi64(__P, __A);
}
void test_mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_store_epi64
+ // CHECK-LABEL: test_mm256_mask_store_epi64
// CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
return _mm256_mask_store_epi64(__P, __U, __A);
}
__m128d test_mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_movedup_pd
+ // CHECK-LABEL: test_mm_mask_movedup_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_movedup_pd(__W, __U, __A);
}
__m128d test_mm_maskz_movedup_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_movedup_pd
+ // CHECK-LABEL: test_mm_maskz_movedup_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_movedup_pd(__U, __A);
}
__m256d test_mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_movedup_pd
+ // CHECK-LABEL: test_mm256_mask_movedup_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_movedup_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_movedup_pd
+ // CHECK-LABEL: test_mm256_maskz_movedup_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_movedup_pd(__U, __A);
}
__m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) {
- // CHECK-LABEL: @test_mm_mask_set1_epi32
+ // CHECK-LABEL: test_mm_mask_set1_epi32
// CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <4 x i32> %{{.*}}32 1
// CHECK: insertelement <4 x i32> %{{.*}}32 2
@@ -6678,7 +6679,7 @@ __m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) {
}
__m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
- // CHECK-LABEL: @test_mm_maskz_set1_epi32
+ // CHECK-LABEL: test_mm_maskz_set1_epi32
// CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <4 x i32> %{{.*}}32 1
// CHECK: insertelement <4 x i32> %{{.*}}32 2
@@ -6689,7 +6690,7 @@ __m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
}
__m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
- // CHECK-LABEL: @test_mm256_mask_set1_epi32
+ // CHECK-LABEL: test_mm256_mask_set1_epi32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
@@ -6703,7 +6704,7 @@ __m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
}
__m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
- // CHECK-LABEL: @test_mm256_maskz_set1_epi32
+ // CHECK-LABEL: test_mm256_maskz_set1_epi32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
@@ -6717,7 +6718,7 @@ __m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
}
__m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm_mask_set1_epi64
+ // CHECK-LABEL: test_mm_mask_set1_epi64
// CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -6726,7 +6727,7 @@ __m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
}
__m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm_maskz_set1_epi64
+ // CHECK-LABEL: test_mm_maskz_set1_epi64
// CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
@@ -6735,7 +6736,7 @@ __m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
}
__m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm256_mask_set1_epi64
+ // CHECK-LABEL: test_mm256_mask_set1_epi64
// CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
@@ -6746,7 +6747,7 @@ __m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
}
__m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
- // CHECK-LABEL: @test_mm256_maskz_set1_epi64
+ // CHECK-LABEL: test_mm256_maskz_set1_epi64
// CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
@@ -6757,646 +6758,646 @@ __m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
}
__m128d test_mm_fixupimm_pd(__m128d __A, __m128d __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_fixupimm_pd
+ // CHECK-LABEL: test_mm_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128
return _mm_fixupimm_pd(__A, __B, __C, 5);
}
__m128d test_mm_mask_fixupimm_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_fixupimm_pd
+ // CHECK-LABEL: test_mm_mask_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128
return _mm_mask_fixupimm_pd(__A, __U, __B, __C, 5);
}
__m128d test_mm_maskz_fixupimm_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_fixupimm_pd
+ // CHECK-LABEL: test_mm_maskz_fixupimm_pd
// CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.128
return _mm_maskz_fixupimm_pd(__U, __A, __B, __C, 5);
}
__m256d test_mm256_fixupimm_pd(__m256d __A, __m256d __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_fixupimm_pd
+ // CHECK-LABEL: test_mm256_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.256
return _mm256_fixupimm_pd(__A, __B, __C, 5);
}
__m256d test_mm256_mask_fixupimm_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_fixupimm_pd
+ // CHECK-LABEL: test_mm256_mask_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.256
return _mm256_mask_fixupimm_pd(__A, __U, __B, __C, 5);
}
__m256d test_mm256_maskz_fixupimm_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_fixupimm_pd
+ // CHECK-LABEL: test_mm256_maskz_fixupimm_pd
// CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.256
return _mm256_maskz_fixupimm_pd(__U, __A, __B, __C, 5);
}
__m128 test_mm_fixupimm_ps(__m128 __A, __m128 __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_fixupimm_ps
+ // CHECK-LABEL: test_mm_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.128
return _mm_fixupimm_ps(__A, __B, __C, 5);
}
__m128 test_mm_mask_fixupimm_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_fixupimm_ps
+ // CHECK-LABEL: test_mm_mask_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.128
return _mm_mask_fixupimm_ps(__A, __U, __B, __C, 5);
}
__m128 test_mm_maskz_fixupimm_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_fixupimm_ps
+ // CHECK-LABEL: test_mm_maskz_fixupimm_ps
// CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.128
return _mm_maskz_fixupimm_ps(__U, __A, __B, __C, 5);
}
__m256 test_mm256_fixupimm_ps(__m256 __A, __m256 __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_fixupimm_ps
+ // CHECK-LABEL: test_mm256_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.256
return _mm256_fixupimm_ps(__A, __B, __C, 5);
}
__m256 test_mm256_mask_fixupimm_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_fixupimm_ps
+ // CHECK-LABEL: test_mm256_mask_fixupimm_ps
// CHECK: @llvm.x86.avx512.mask.fixupimm.ps.256
return _mm256_mask_fixupimm_ps(__A, __U, __B, __C, 5);
}
__m256 test_mm256_maskz_fixupimm_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_fixupimm_ps
+ // CHECK-LABEL: test_mm256_maskz_fixupimm_ps
// CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.256
return _mm256_maskz_fixupimm_ps(__U, __A, __B, __C, 5);
}
__m128d test_mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_pd
+ // CHECK-LABEL: test_mm_mask_load_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_load_pd(__W, __U, __P);
}
__m128d test_mm_maskz_load_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_pd
+ // CHECK-LABEL: test_mm_maskz_load_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_load_pd(__U, __P);
}
__m256d test_mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_pd
+ // CHECK-LABEL: test_mm256_mask_load_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_load_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_load_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_pd
+ // CHECK-LABEL: test_mm256_maskz_load_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_load_pd(__U, __P);
}
__m128 test_mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_load_ps
+ // CHECK-LABEL: test_mm_mask_load_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_load_ps(__W, __U, __P);
}
__m128 test_mm_maskz_load_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_load_ps
+ // CHECK-LABEL: test_mm_maskz_load_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_load_ps(__U, __P);
}
__m256 test_mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_load_ps
+ // CHECK-LABEL: test_mm256_mask_load_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_load_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_load_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_load_ps
+ // CHECK-LABEL: test_mm256_maskz_load_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_load_ps(__U, __P);
}
__m128i test_mm_loadu_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm_loadu_epi64
+ // CHECK-LABEL: test_mm_loadu_epi64
// CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm_loadu_epi64(__P);
}
__m128i test_mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_epi64
+ // CHECK-LABEL: test_mm_mask_loadu_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_loadu_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_epi64
+ // CHECK-LABEL: test_mm_maskz_loadu_epi64
// CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_loadu_epi64(__U, __P);
}
__m256i test_mm256_loadu_epi64(void const *__P) {
- // CHECK-LABEL: @test_mm256_loadu_epi64
+ // CHECK-LABEL: test_mm256_loadu_epi64
// CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm256_loadu_epi64(__P);
}
__m256i test_mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_epi64
+ // CHECK-LABEL: test_mm256_mask_loadu_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_loadu_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_epi64
+ // CHECK-LABEL: test_mm256_maskz_loadu_epi64
// CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_loadu_epi64(__U, __P);
}
__m128i test_mm_loadu_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm_loadu_epi32
+ // CHECK-LABEL: test_mm_loadu_epi32
// CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm_loadu_epi32(__P);
}
__m128i test_mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_epi32
+ // CHECK-LABEL: test_mm_mask_loadu_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_loadu_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_epi32
+ // CHECK-LABEL: test_mm_maskz_loadu_epi32
// CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_loadu_epi32(__U, __P);
}
__m256i test_mm256_loadu_epi32(void const *__P) {
- // CHECK-LABEL: @test_mm256_loadu_epi32
+ // CHECK-LABEL: test_mm256_loadu_epi32
// CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}}
return _mm256_loadu_epi32(__P);
}
__m256i test_mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_epi32
+ // CHECK-LABEL: test_mm256_mask_loadu_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_loadu_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_epi32
+ // CHECK-LABEL: test_mm256_maskz_loadu_epi32
// CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_loadu_epi32(__U, __P);
}
__m128d test_mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_pd
+ // CHECK-LABEL: test_mm_mask_loadu_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_loadu_pd(__W, __U, __P);
}
__m128d test_mm_maskz_loadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_pd
+ // CHECK-LABEL: test_mm_maskz_loadu_pd
// CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_loadu_pd(__U, __P);
}
__m256d test_mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_pd
+ // CHECK-LABEL: test_mm256_mask_loadu_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_loadu_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_loadu_pd(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_pd
+ // CHECK-LABEL: test_mm256_maskz_loadu_pd
// CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_loadu_pd(__U, __P);
}
__m128 test_mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_mask_loadu_ps
+ // CHECK-LABEL: test_mm_mask_loadu_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_loadu_ps(__W, __U, __P);
}
__m128 test_mm_maskz_loadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm_maskz_loadu_ps
+ // CHECK-LABEL: test_mm_maskz_loadu_ps
// CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_loadu_ps(__U, __P);
}
__m256 test_mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_mask_loadu_ps
+ // CHECK-LABEL: test_mm256_mask_loadu_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_loadu_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_loadu_ps(__mmask8 __U, void const *__P) {
- // CHECK-LABEL: @test_mm256_maskz_loadu_ps
+ // CHECK-LABEL: test_mm256_maskz_loadu_ps
// CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_loadu_ps(__U, __P);
}
void test_mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_store_pd
+ // CHECK-LABEL: test_mm_mask_store_pd
// CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
return _mm_mask_store_pd(__P, __U, __A);
}
void test_mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_store_pd
+ // CHECK-LABEL: test_mm256_mask_store_pd
// CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
return _mm256_mask_store_pd(__P, __U, __A);
}
void test_mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_store_ps
+ // CHECK-LABEL: test_mm_mask_store_ps
// CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 16, <4 x i1> %{{.*}})
return _mm_mask_store_ps(__P, __U, __A);
}
void test_mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_store_ps
+ // CHECK-LABEL: test_mm256_mask_store_ps
// CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 32, <8 x i1> %{{.*}})
return _mm256_mask_store_ps(__P, __U, __A);
}
void test_mm_storeu_epi64(void *__p, __m128i __a) {
- // check-label: @test_mm_storeu_epi64
+ // CHECK-LABEL: test_mm_storeu_epi64
// check: store <2 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm_storeu_epi64(__p, __a);
}
void test_mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_epi64
+ // CHECK-LABEL: test_mm_mask_storeu_epi64
// CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
return _mm_mask_storeu_epi64(__P, __U, __A);
}
void test_mm256_storeu_epi64(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_storeu_epi64
+ // CHECK-LABEL: test_mm256_storeu_epi64
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm256_storeu_epi64(__P, __A);
}
void test_mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_epi64
+ // CHECK-LABEL: test_mm256_mask_storeu_epi64
// CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm256_mask_storeu_epi64(__P, __U, __A);
}
void test_mm_storeu_epi32(void *__P, __m128i __A) {
- // CHECK-LABEL: @test_mm_storeu_epi32
+ // CHECK-LABEL: test_mm_storeu_epi32
// CHECK: store <2 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm_storeu_epi32(__P, __A);
}
void test_mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_storeu_epi32
// CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm_mask_storeu_epi32(__P, __U, __A);
}
void test_mm256_storeu_epi32(void *__P, __m256i __A) {
- // CHECK-LABEL: @test_mm256_storeu_epi32
+ // CHECK-LABEL: test_mm256_storeu_epi32
// CHECK: store <4 x i64> %{{.*}}, ptr %{{.*}}, align 1{{$}}
return _mm256_storeu_epi32(__P, __A);
}
void test_mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_storeu_epi32
// CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm256_mask_storeu_epi32(__P, __U, __A);
}
void test_mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_pd
+ // CHECK-LABEL: test_mm_mask_storeu_pd
// CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
return _mm_mask_storeu_pd(__P, __U, __A);
}
void test_mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_pd
+ // CHECK-LABEL: test_mm256_mask_storeu_pd
// CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm256_mask_storeu_pd(__P, __U, __A);
}
void test_mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_storeu_ps
+ // CHECK-LABEL: test_mm_mask_storeu_ps
// CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm_mask_storeu_ps(__P, __U, __A);
}
void test_mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_storeu_ps
+ // CHECK-LABEL: test_mm256_mask_storeu_ps
// CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm256_mask_storeu_ps(__P, __U, __A);
}
__m128d test_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_pd
+ // CHECK-LABEL: test_mm_mask_unpackhi_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_unpackhi_pd(__W, __U, __A, __B);
}
__m128d test_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_pd
+ // CHECK-LABEL: test_mm_maskz_unpackhi_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_unpackhi_pd(__U, __A, __B);
}
__m256d test_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_pd
+ // CHECK-LABEL: test_mm256_mask_unpackhi_pd
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_unpackhi_pd(__W, __U, __A, __B);
}
__m256d test_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_pd
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_pd
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_unpackhi_pd(__U, __A, __B);
}
__m128 test_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_ps
+ // CHECK-LABEL: test_mm_mask_unpackhi_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}} <4 x float> %{{.*}}
return _mm_mask_unpackhi_ps(__W, __U, __A, __B);
}
__m128 test_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_ps
+ // CHECK-LABEL: test_mm_maskz_unpackhi_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}} <4 x float> %{{.*}}
return _mm_maskz_unpackhi_ps(__U, __A, __B);
}
__m256 test_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_ps
+ // CHECK-LABEL: test_mm256_mask_unpackhi_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_unpackhi_ps(__W, __U, __A, __B);
}
__m256 test_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_ps
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_unpackhi_ps(__U, __A, __B);
}
__m128d test_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_pd
+ // CHECK-LABEL: test_mm_mask_unpacklo_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_unpacklo_pd(__W, __U, __A, __B);
}
__m128d test_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_pd
+ // CHECK-LABEL: test_mm_maskz_unpacklo_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_unpacklo_pd(__U, __A, __B);
}
__m256d test_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_pd
+ // CHECK-LABEL: test_mm256_mask_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_unpacklo_pd(__W, __U, __A, __B);
}
__m256d test_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_pd
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}} <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_unpacklo_pd(__U, __A, __B);
}
__m128 test_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_ps
+ // CHECK-LABEL: test_mm_mask_unpacklo_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_unpacklo_ps(__W, __U, __A, __B);
}
__m128 test_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_ps
+ // CHECK-LABEL: test_mm_maskz_unpacklo_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_unpacklo_ps(__U, __A, __B);
}
__m256 test_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_ps
+ // CHECK-LABEL: test_mm256_mask_unpacklo_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_unpacklo_ps(__W, __U, __A, __B);
}
__m256 test_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_ps
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_unpacklo_ps(__U, __A, __B);
}
__m128d test_mm_rcp14_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_rcp14_pd
+ // CHECK-LABEL: test_mm_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.128
return _mm_rcp14_pd(__A);
}
__m128d test_mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_rcp14_pd
+ // CHECK-LABEL: test_mm_mask_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.128
return _mm_mask_rcp14_pd(__W, __U, __A);
}
__m128d test_mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_rcp14_pd
+ // CHECK-LABEL: test_mm_maskz_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.128
return _mm_maskz_rcp14_pd(__U, __A);
}
__m256d test_mm256_rcp14_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_rcp14_pd
+ // CHECK-LABEL: test_mm256_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.256
return _mm256_rcp14_pd(__A);
}
__m256d test_mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_rcp14_pd
+ // CHECK-LABEL: test_mm256_mask_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.256
return _mm256_mask_rcp14_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_rcp14_pd
+ // CHECK-LABEL: test_mm256_maskz_rcp14_pd
// CHECK: @llvm.x86.avx512.rcp14.pd.256
return _mm256_maskz_rcp14_pd(__U, __A);
}
__m128 test_mm_rcp14_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_rcp14_ps
+ // CHECK-LABEL: test_mm_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.128
return _mm_rcp14_ps(__A);
}
__m128 test_mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_rcp14_ps
+ // CHECK-LABEL: test_mm_mask_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.128
return _mm_mask_rcp14_ps(__W, __U, __A);
}
__m128 test_mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_rcp14_ps
+ // CHECK-LABEL: test_mm_maskz_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.128
return _mm_maskz_rcp14_ps(__U, __A);
}
__m256 test_mm256_rcp14_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_rcp14_ps
+ // CHECK-LABEL: test_mm256_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.256
return _mm256_rcp14_ps(__A);
}
__m256 test_mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_rcp14_ps
+ // CHECK-LABEL: test_mm256_mask_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.256
return _mm256_mask_rcp14_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_rcp14_ps
+ // CHECK-LABEL: test_mm256_maskz_rcp14_ps
// CHECK: @llvm.x86.avx512.rcp14.ps.256
return _mm256_maskz_rcp14_ps(__U, __A);
}
__m128d test_mm_mask_permute_pd(__m128d __W, __mmask8 __U, __m128d __X) {
- // CHECK-LABEL: @test_mm_mask_permute_pd
+ // CHECK-LABEL: test_mm_mask_permute_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> <i32 1, i32 0>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permute_pd(__W, __U, __X, 1);
}
__m128d test_mm_maskz_permute_pd(__mmask8 __U, __m128d __X) {
- // CHECK-LABEL: @test_mm_maskz_permute_pd
+ // CHECK-LABEL: test_mm_maskz_permute_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> <i32 1, i32 0>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permute_pd(__U, __X, 1);
}
__m256d test_mm256_mask_permute_pd(__m256d __W, __mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_mask_permute_pd
+ // CHECK-LABEL: test_mm256_mask_permute_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permute_pd(__W, __U, __X, 5);
}
__m256d test_mm256_maskz_permute_pd(__mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_maskz_permute_pd
+ // CHECK-LABEL: test_mm256_maskz_permute_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permute_pd(__U, __X, 5);
}
__m128 test_mm_mask_permute_ps(__m128 __W, __mmask8 __U, __m128 __X) {
- // CHECK-LABEL: @test_mm_mask_permute_ps
+ // CHECK-LABEL: test_mm_mask_permute_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permute_ps(__W, __U, __X, 0x1b);
}
__m128 test_mm_maskz_permute_ps(__mmask8 __U, __m128 __X) {
- // CHECK-LABEL: @test_mm_maskz_permute_ps
+ // CHECK-LABEL: test_mm_maskz_permute_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permute_ps(__U, __X, 0x1b);
}
__m256 test_mm256_mask_permute_ps(__m256 __W, __mmask8 __U, __m256 __X) {
- // CHECK-LABEL: @test_mm256_mask_permute_ps
+ // CHECK-LABEL: test_mm256_mask_permute_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permute_ps(__W, __U, __X, 0x1b);
}
__m256 test_mm256_maskz_permute_ps(__mmask8 __U, __m256 __X) {
- // CHECK-LABEL: @test_mm256_maskz_permute_ps
+ // CHECK-LABEL: test_mm256_maskz_permute_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permute_ps(__U, __X, 0x1b);
}
__m128d test_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_permutevar_pd
+ // CHECK-LABEL: test_mm_mask_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permutevar_pd(__W, __U, __A, __C);
}
__m128d test_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_permutevar_pd
+ // CHECK-LABEL: test_mm_maskz_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permutevar_pd(__U, __A, __C);
}
__m256d test_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_permutevar_pd
+ // CHECK-LABEL: test_mm256_mask_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutevar_pd(__W, __U, __A, __C);
}
__m256d test_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_permutevar_pd
+ // CHECK-LABEL: test_mm256_maskz_permutevar_pd
// CHECK: @llvm.x86.avx.vpermilvar.pd.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutevar_pd(__U, __A, __C);
}
__m128 test_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_permutevar_ps
+ // CHECK-LABEL: test_mm_mask_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permutevar_ps(__W, __U, __A, __C);
}
__m128 test_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_permutevar_ps
+ // CHECK-LABEL: test_mm_maskz_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permutevar_ps(__U, __A, __C);
}
__m256 test_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_permutevar_ps
+ // CHECK-LABEL: test_mm256_mask_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permutevar_ps(__W, __U, __A, __C);
}
__m256 test_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_permutevar_ps
+ // CHECK-LABEL: test_mm256_maskz_permutevar_ps
// CHECK: @llvm.x86.avx.vpermilvar.ps.256
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permutevar_ps(__U, __A, __C);
}
__mmask8 test_mm_test_epi32_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_test_epi32_mask
+ // CHECK-LABEL: test_mm_test_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return _mm_test_epi32_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_test_epi32_mask
+ // CHECK-LABEL: test_mm_mask_test_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7404,14 +7405,14 @@ __mmask8 test_mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_test_epi32_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_test_epi32_mask
+ // CHECK-LABEL: test_mm256_test_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return _mm256_test_epi32_mask(__A, __B);
}
__mmask8 test_mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_test_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_test_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
@@ -7419,14 +7420,14 @@ __mmask8 test_mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
}
__mmask8 test_mm_test_epi64_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_test_epi64_mask
+ // CHECK-LABEL: test_mm_test_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return _mm_test_epi64_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_test_epi64_mask
+ // CHECK-LABEL: test_mm_mask_test_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
@@ -7434,14 +7435,14 @@ __mmask8 test_mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_test_epi64_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_test_epi64_mask
+ // CHECK-LABEL: test_mm256_test_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return _mm256_test_epi64_mask(__A, __B);
}
__mmask8 test_mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_test_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_test_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7449,14 +7450,14 @@ __mmask8 test_mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
}
__mmask8 test_mm_testn_epi32_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_testn_epi32_mask
+ // CHECK-LABEL: test_mm_testn_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return _mm_testn_epi32_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_testn_epi32_mask
+ // CHECK-LABEL: test_mm_mask_testn_epi32_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7464,14 +7465,14 @@ __mmask8 test_mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_testn_epi32_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_testn_epi32_mask
+ // CHECK-LABEL: test_mm256_testn_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return _mm256_testn_epi32_mask(__A, __B);
}
__mmask8 test_mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_testn_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_testn_epi32_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
@@ -7479,14 +7480,14 @@ __mmask8 test_mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B
}
__mmask8 test_mm_testn_epi64_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_testn_epi64_mask
+ // CHECK-LABEL: test_mm_testn_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return _mm_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_testn_epi64_mask
+ // CHECK-LABEL: test_mm_mask_testn_epi64_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
@@ -7494,14 +7495,14 @@ __mmask8 test_mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
}
__mmask8 test_mm256_testn_epi64_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_testn_epi64_mask
+ // CHECK-LABEL: test_mm256_testn_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return _mm256_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_testn_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_testn_epi64_mask
// CHECK: and <4 x i64> %{{.*}}, %{{.*}}
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
@@ -7509,428 +7510,428 @@ __mmask8 test_mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B
}
__m128i test_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_epi32
+ // CHECK-LABEL: test_mm_mask_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_unpackhi_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_epi32
+ // CHECK-LABEL: test_mm_maskz_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_unpackhi_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_epi32
+ // CHECK-LABEL: test_mm256_mask_unpackhi_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_unpackhi_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_epi32
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_unpackhi_epi32(__U, __A, __B);
}
__m128i test_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpackhi_epi64
+ // CHECK-LABEL: test_mm_mask_unpackhi_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_unpackhi_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpackhi_epi64
+ // CHECK-LABEL: test_mm_maskz_unpackhi_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_unpackhi_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpackhi_epi64
+ // CHECK-LABEL: test_mm256_mask_unpackhi_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_unpackhi_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpackhi_epi64
+ // CHECK-LABEL: test_mm256_maskz_unpackhi_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_unpackhi_epi64(__U, __A, __B);
}
__m128i test_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_epi32
+ // CHECK-LABEL: test_mm_mask_unpacklo_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_unpacklo_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_epi32
+ // CHECK-LABEL: test_mm_maskz_unpacklo_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_unpacklo_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_epi32
+ // CHECK-LABEL: test_mm256_mask_unpacklo_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_unpacklo_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_epi32
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_unpacklo_epi32(__U, __A, __B);
}
__m128i test_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_unpacklo_epi64
+ // CHECK-LABEL: test_mm_mask_unpacklo_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_unpacklo_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_unpacklo_epi64
+ // CHECK-LABEL: test_mm_maskz_unpacklo_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_unpacklo_epi64(__U, __A, __B);
}
__m256i test_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_unpacklo_epi64
+ // CHECK-LABEL: test_mm256_mask_unpacklo_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_unpacklo_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_unpacklo_epi64
+ // CHECK-LABEL: test_mm256_maskz_unpacklo_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_unpacklo_epi64(__U, __A, __B);
}
__m128i test_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sra_epi32
+ // CHECK-LABEL: test_mm_mask_sra_epi32
// CHECK: @llvm.x86.sse2.psra.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_sra_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sra_epi32
+ // CHECK-LABEL: test_mm_maskz_sra_epi32
// CHECK: @llvm.x86.sse2.psra.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_sra_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sra_epi32
+ // CHECK-LABEL: test_mm256_mask_sra_epi32
// CHECK: @llvm.x86.avx2.psra.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_sra_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sra_epi32
+ // CHECK-LABEL: test_mm256_maskz_sra_epi32
// CHECK: @llvm.x86.avx2.psra.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_sra_epi32(__U, __A, __B);
}
__m128i test_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srai_epi32
+ // CHECK-LABEL: test_mm_mask_srai_epi32
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srai_epi32(__W, __U, __A, 5);
}
__m128i test_mm_mask_srai_epi32_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srai_epi32_2
+ // CHECK-LABEL: test_mm_mask_srai_epi32_2
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_srai_epi32(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi32
+ // CHECK-LABEL: test_mm_maskz_srai_epi32
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srai_epi32(__U, __A, 5);
}
__m128i test_mm_maskz_srai_epi32_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi32_2
+ // CHECK-LABEL: test_mm_maskz_srai_epi32_2
// CHECK: @llvm.x86.sse2.psrai.d
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_srai_epi32(__U, __A, __B);
}
__m256i test_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi32
+ // CHECK-LABEL: test_mm256_mask_srai_epi32
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srai_epi32(__W, __U, __A, 5);
}
__m256i test_mm256_mask_srai_epi32_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi32_2
+ // CHECK-LABEL: test_mm256_mask_srai_epi32_2
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_srai_epi32(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi32
+ // CHECK-LABEL: test_mm256_maskz_srai_epi32
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srai_epi32(__U, __A, 5);
}
__m256i test_mm256_maskz_srai_epi32_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi32_2
+ // CHECK-LABEL: test_mm256_maskz_srai_epi32_2
// CHECK: @llvm.x86.avx2.psrai.d
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_srai_epi32(__U, __A, __B);
}
__m128i test_mm_sra_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_sra_epi64
+ // CHECK-LABEL: test_mm_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.128
return _mm_sra_epi64(__A, __B);
}
__m128i test_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_sra_epi64
+ // CHECK-LABEL: test_mm_mask_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_sra_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_sra_epi64
+ // CHECK-LABEL: test_mm_maskz_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_sra_epi64(__U, __A, __B);
}
__m256i test_mm256_sra_epi64(__m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_sra_epi64
+ // CHECK-LABEL: test_mm256_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.256
return _mm256_sra_epi64(__A, __B);
}
__m256i test_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_sra_epi64
+ // CHECK-LABEL: test_mm256_mask_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_sra_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_sra_epi64
+ // CHECK-LABEL: test_mm256_maskz_sra_epi64
// CHECK: @llvm.x86.avx512.psra.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_sra_epi64(__U, __A, __B);
}
__m128i test_mm_srai_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_srai_epi64
+ // CHECK-LABEL: test_mm_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.128
return _mm_srai_epi64(__A, 5);
}
__m128i test_mm_srai_epi64_2(__m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_srai_epi64_2
+ // CHECK-LABEL: test_mm_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.128
return _mm_srai_epi64(__A, __B);
}
__m128i test_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_srai_epi64
+ // CHECK-LABEL: test_mm_mask_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srai_epi64(__W, __U, __A, 5);
}
__m128i test_mm_mask_srai_epi64_2(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_mask_srai_epi64_2
+ // CHECK-LABEL: test_mm_mask_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_srai_epi64(__W, __U, __A, __B);
}
__m128i test_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi64
+ // CHECK-LABEL: test_mm_maskz_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srai_epi64(__U, __A, 5);
}
__m128i test_mm_maskz_srai_epi64_2(__mmask8 __U, __m128i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm_maskz_srai_epi64_2
+ // CHECK-LABEL: test_mm_maskz_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_srai_epi64(__U, __A, __B);
}
__m256i test_mm256_srai_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_srai_epi64
+ // CHECK-LABEL: test_mm256_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.256
return _mm256_srai_epi64(__A, 5);
}
__m256i test_mm256_srai_epi64_2(__m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_srai_epi64_2
+ // CHECK-LABEL: test_mm256_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.256
return _mm256_srai_epi64(__A, __B);
}
__m256i test_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi64
+ // CHECK-LABEL: test_mm256_mask_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srai_epi64(__W, __U, __A, 5);
}
__m256i test_mm256_mask_srai_epi64_2(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_mask_srai_epi64_2
+ // CHECK-LABEL: test_mm256_mask_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_srai_epi64(__W, __U, __A, __B);
}
__m256i test_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi64
+ // CHECK-LABEL: test_mm256_maskz_srai_epi64
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srai_epi64(__U, __A, 5);
}
__m256i test_mm256_maskz_srai_epi64_2(__mmask8 __U, __m256i __A, unsigned int __B) {
- // CHECK-LABEL: @test_mm256_maskz_srai_epi64_2
+ // CHECK-LABEL: test_mm256_maskz_srai_epi64_2
// CHECK: @llvm.x86.avx512.psrai.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_srai_epi64(__U, __A, __B);
}
__m128i test_mm_ternarylogic_epi32(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_ternarylogic_epi32
+ // CHECK-LABEL: test_mm_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.128
return _mm_ternarylogic_epi32(__A, __B, __C, 4);
}
__m128i test_mm_mask_ternarylogic_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_ternarylogic_epi32
+ // CHECK-LABEL: test_mm_mask_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_ternarylogic_epi32(__A, __U, __B, __C, 4);
}
__m128i test_mm_maskz_ternarylogic_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_ternarylogic_epi32
+ // CHECK-LABEL: test_mm_maskz_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.128
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
return _mm_maskz_ternarylogic_epi32(__U, __A, __B, __C, 4);
}
__m256i test_mm256_ternarylogic_epi32(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_ternarylogic_epi32
+ // CHECK-LABEL: test_mm256_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.256
return _mm256_ternarylogic_epi32(__A, __B, __C, 4);
}
__m256i test_mm256_mask_ternarylogic_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_ternarylogic_epi32
+ // CHECK-LABEL: test_mm256_mask_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_ternarylogic_epi32(__A, __U, __B, __C, 4);
}
__m256i test_mm256_maskz_ternarylogic_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_ternarylogic_epi32
+ // CHECK-LABEL: test_mm256_maskz_ternarylogic_epi32
// CHECK: @llvm.x86.avx512.pternlog.d.256
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> zeroinitializer
return _mm256_maskz_ternarylogic_epi32(__U, __A, __B, __C, 4);
}
__m128i test_mm_ternarylogic_epi64(__m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_ternarylogic_epi64
+ // CHECK-LABEL: test_mm_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.128
return _mm_ternarylogic_epi64(__A, __B, __C, 4);
}
__m128i test_mm_mask_ternarylogic_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_mask_ternarylogic_epi64
+ // CHECK-LABEL: test_mm_mask_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_ternarylogic_epi64(__A, __U, __B, __C, 4);
}
__m128i test_mm_maskz_ternarylogic_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
- // CHECK-LABEL: @test_mm_maskz_ternarylogic_epi64
+ // CHECK-LABEL: test_mm_maskz_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.128
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> zeroinitializer
return _mm_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4);
}
__m256i test_mm256_ternarylogic_epi64(__m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_ternarylogic_epi64
+ // CHECK-LABEL: test_mm256_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.256
return _mm256_ternarylogic_epi64(__A, __B, __C, 4);
}
__m256i test_mm256_mask_ternarylogic_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_mask_ternarylogic_epi64
+ // CHECK-LABEL: test_mm256_mask_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_ternarylogic_epi64(__A, __U, __B, __C, 4);
}
__m256i test_mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
- // CHECK-LABEL: @test_mm256_maskz_ternarylogic_epi64
+ // CHECK-LABEL: test_mm256_maskz_ternarylogic_epi64
// CHECK: @llvm.x86.avx512.pternlog.q.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> zeroinitializer
return _mm256_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4);
}
__m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_shuffle_f32x4
+ // CHECK-LABEL: test_mm256_shuffle_f32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_shuffle_f32x4(__A, __B, 3);
}
__m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_f32x4
+ // CHECK-LABEL: test_mm256_mask_shuffle_f32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_shuffle_f32x4(__W, __U, __A, __B, 3);
}
__m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_f32x4
+ // CHECK-LABEL: test_mm256_maskz_shuffle_f32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_shuffle_f32x4(__U, __A, __B, 3);
}
__m256d test_mm256_shuffle_f64x2(__m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_shuffle_f64x2
+ // CHECK-LABEL: test_mm256_shuffle_f64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_f64x2(__A, __B, 3);
}
__m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_f64x2
+ // CHECK-LABEL: test_mm256_mask_shuffle_f64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -7938,7 +7939,7 @@ __m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __
}
__m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_f64x2
+ // CHECK-LABEL: test_mm256_maskz_shuffle_f64x2
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
@@ -7946,33 +7947,33 @@ __m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) {
}
__m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shuffle_i32x4
+ // CHECK-LABEL: test_mm256_shuffle_i32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_shuffle_i32x4(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_i32x4
+ // CHECK-LABEL: test_mm256_mask_shuffle_i32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_i32x4
+ // CHECK-LABEL: test_mm256_maskz_shuffle_i32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 3);
}
__m256i test_mm256_shuffle_i64x2(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_shuffle_i64x2
+ // CHECK-LABEL: test_mm256_shuffle_i64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_i64x2(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_i64x2
+ // CHECK-LABEL: test_mm256_mask_shuffle_i64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -7980,7 +7981,7 @@ __m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __
}
__m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_i64x2
+ // CHECK-LABEL: test_mm256_maskz_shuffle_i64x2
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
@@ -7988,1576 +7989,1576 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
}
__m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_mask_shuffle_pd
+ // CHECK-LABEL: test_mm_mask_shuffle_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_shuffle_pd(__W, __U, __A, __B, 3);
}
__m128d test_mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_pd
+ // CHECK-LABEL: test_mm_maskz_shuffle_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_shuffle_pd(__U, __A, __B, 3);
}
__m256d test_mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_pd
+ // CHECK-LABEL: test_mm256_mask_shuffle_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_shuffle_pd(__W, __U, __A, __B, 3);
}
__m256d test_mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_pd
+ // CHECK-LABEL: test_mm256_maskz_shuffle_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_shuffle_pd(__U, __A, __B, 3);
}
__m128 test_mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_mask_shuffle_ps
+ // CHECK-LABEL: test_mm_mask_shuffle_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_shuffle_ps(__W, __U, __A, __B, 4);
}
__m128 test_mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_ps
+ // CHECK-LABEL: test_mm_maskz_shuffle_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_shuffle_ps(__U, __A, __B, 4);
}
__m256 test_mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_ps
+ // CHECK-LABEL: test_mm256_mask_shuffle_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_shuffle_ps(__W, __U, __A, __B, 4);
}
__m256 test_mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_ps
+ // CHECK-LABEL: test_mm256_maskz_shuffle_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_shuffle_ps(__U, __A, __B, 4);
}
__m128d test_mm_rsqrt14_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_rsqrt14_pd
+ // CHECK-LABEL: test_mm_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_rsqrt14_pd(__A);
}
__m128d test_mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_rsqrt14_pd
+ // CHECK-LABEL: test_mm_mask_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_mask_rsqrt14_pd(__W, __U, __A);
}
__m128d test_mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt14_pd
+ // CHECK-LABEL: test_mm_maskz_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.128
return _mm_maskz_rsqrt14_pd(__U, __A);
}
__m256d test_mm256_rsqrt14_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_rsqrt14_pd
+ // CHECK-LABEL: test_mm256_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_rsqrt14_pd(__A);
}
__m256d test_mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_rsqrt14_pd
+ // CHECK-LABEL: test_mm256_mask_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_mask_rsqrt14_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_rsqrt14_pd
+ // CHECK-LABEL: test_mm256_maskz_rsqrt14_pd
// CHECK: @llvm.x86.avx512.rsqrt14.pd.256
return _mm256_maskz_rsqrt14_pd(__U, __A);
}
__m128 test_mm_rsqrt14_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_rsqrt14_ps
+ // CHECK-LABEL: test_mm_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_rsqrt14_ps(__A);
}
__m128 test_mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_rsqrt14_ps
+ // CHECK-LABEL: test_mm_mask_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_mask_rsqrt14_ps(__W, __U, __A);
}
__m128 test_mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt14_ps
+ // CHECK-LABEL: test_mm_maskz_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.128
return _mm_maskz_rsqrt14_ps(__U, __A);
}
__m256 test_mm256_rsqrt14_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_rsqrt14_ps
+ // CHECK-LABEL: test_mm256_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_rsqrt14_ps(__A);
}
__m256 test_mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_rsqrt14_ps
+ // CHECK-LABEL: test_mm256_mask_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_mask_rsqrt14_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_rsqrt14_ps
+ // CHECK-LABEL: test_mm256_maskz_rsqrt14_ps
// CHECK: @llvm.x86.avx512.rsqrt14.ps.256
return _mm256_maskz_rsqrt14_ps(__U, __A);
}
__m256 test_mm256_broadcast_f32x4(__m128 __A) {
- // CHECK-LABEL: @test_mm256_broadcast_f32x4
+ // CHECK-LABEL: test_mm256_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm256_broadcast_f32x4(__A);
}
__m256 test_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_f32x4
+ // CHECK-LABEL: test_mm256_mask_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcast_f32x4(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_f32x4
+ // CHECK-LABEL: test_mm256_maskz_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcast_f32x4(__M, __A);
}
__m256i test_mm256_broadcast_i32x4(__m128i const* __A) {
- // CHECK-LABEL: @test_mm256_broadcast_i32x4
+ // CHECK-LABEL: test_mm256_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm256_broadcast_i32x4(_mm_loadu_si128(__A));
}
__m256i test_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcast_i32x4
+ // CHECK-LABEL: test_mm256_mask_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_broadcast_i32x4(__O, __M, _mm_loadu_si128(__A));
}
__m256i test_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i const* __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcast_i32x4
+ // CHECK-LABEL: test_mm256_maskz_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A));
}
__m256d test_mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastsd_pd
+ // CHECK-LABEL: test_mm256_mask_broadcastsd_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_broadcastsd_pd(__O, __M, __A);
}
__m256d test_mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastsd_pd
+ // CHECK-LABEL: test_mm256_maskz_broadcastsd_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_broadcastsd_pd(__M, __A);
}
__m128 test_mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastss_ps
+ // CHECK-LABEL: test_mm_mask_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_broadcastss_ps(__O, __M, __A);
}
__m128 test_mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastss_ps
+ // CHECK-LABEL: test_mm_maskz_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_broadcastss_ps(__M, __A);
}
__m256 test_mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastss_ps
+ // CHECK-LABEL: test_mm256_mask_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcastss_ps(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastss_ps
+ // CHECK-LABEL: test_mm256_maskz_broadcastss_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcastss_ps(__M, __A);
}
__m128i test_mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastd_epi32
+ // CHECK-LABEL: test_mm_mask_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_broadcastd_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastd_epi32
+ // CHECK-LABEL: test_mm_maskz_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_broadcastd_epi32(__M, __A);
}
__m256i test_mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastd_epi32
+ // CHECK-LABEL: test_mm256_mask_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_broadcastd_epi32(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastd_epi32
+ // CHECK-LABEL: test_mm256_maskz_broadcastd_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_broadcastd_epi32(__M, __A);
}
__m128i test_mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_broadcastq_epi64
+ // CHECK-LABEL: test_mm_mask_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_broadcastq_epi64(__O, __M, __A);
}
__m128i test_mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_broadcastq_epi64
+ // CHECK-LABEL: test_mm_maskz_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> zeroinitializer
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_broadcastq_epi64(__M, __A);
}
__m256i test_mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_broadcastq_epi64
+ // CHECK-LABEL: test_mm256_mask_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_broadcastq_epi64(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_broadcastq_epi64
+ // CHECK-LABEL: test_mm256_maskz_broadcastq_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_broadcastq_epi64(__M, __A);
}
__m128i test_mm_cvtsepi32_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.128
return _mm_cvtsepi32_epi8(__A);
}
__m128i test_mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.128
return _mm_mask_cvtsepi32_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.128
return _mm_maskz_cvtsepi32_epi8(__M, __A);
}
void test_mm_mask_cvtsepi32_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.mem.128
return _mm_mask_cvtsepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtsepi32_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm256_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.256
return _mm256_cvtsepi32_epi8(__A);
}
__m128i test_mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.256
return _mm256_mask_cvtsepi32_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi32_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.256
return _mm256_maskz_cvtsepi32_epi8(__M, __A);
}
void test_mm256_mask_cvtsepi32_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.db.mem.256
return _mm256_mask_cvtsepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtsepi32_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.128
return _mm_cvtsepi32_epi16(__A);
}
__m128i test_mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.128
return _mm_mask_cvtsepi32_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.128
return _mm_maskz_cvtsepi32_epi16(__M, __A);
}
void test_mm_mask_cvtsepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi32_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.mem.128
return _mm_mask_cvtsepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtsepi32_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm256_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.256
return _mm256_cvtsepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.256
return _mm256_mask_cvtsepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi32_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.256
return _mm256_maskz_cvtsepi32_epi16(__M, __A);
}
void test_mm256_mask_cvtsepi32_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi32_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.dw.mem.256
return _mm256_mask_cvtsepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtsepi64_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.128
return _mm_cvtsepi64_epi8(__A);
}
__m128i test_mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.128
return _mm_mask_cvtsepi64_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.128
return _mm_maskz_cvtsepi64_epi8(__M, __A);
}
void test_mm_mask_cvtsepi64_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.mem.128
return _mm_mask_cvtsepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtsepi64_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm256_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.256
return _mm256_cvtsepi64_epi8(__A);
}
__m128i test_mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.256
return _mm256_mask_cvtsepi64_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi64_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.256
return _mm256_maskz_cvtsepi64_epi8(__M, __A);
}
void test_mm256_mask_cvtsepi64_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.qb.mem.256
return _mm256_mask_cvtsepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtsepi64_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.128
return _mm_cvtsepi64_epi32(__A);
}
__m128i test_mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.128
return _mm_mask_cvtsepi64_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.128
return _mm_maskz_cvtsepi64_epi32(__M, __A);
}
void test_mm_mask_cvtsepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.mem.128
return _mm_mask_cvtsepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm256_cvtsepi64_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm256_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.256
return _mm256_cvtsepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.256
return _mm256_mask_cvtsepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi64_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.256
return _mm256_maskz_cvtsepi64_epi32(__M, __A);
}
void test_mm256_mask_cvtsepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovs.qd.mem.256
return _mm256_mask_cvtsepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm_cvtsepi64_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.128
return _mm_cvtsepi64_epi16(__A);
}
__m128i test_mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.128
return _mm_mask_cvtsepi64_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.128
return _mm_maskz_cvtsepi64_epi16(__M, __A);
}
void test_mm_mask_cvtsepi64_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtsepi64_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtsepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.mem.128
return _mm_mask_cvtsepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtsepi64_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm256_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.256
return _mm256_cvtsepi64_epi16(__A);
}
__m128i test_mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.256
return _mm256_mask_cvtsepi64_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtsepi64_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtsepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.256
return _mm256_maskz_cvtsepi64_epi16(__M, __A);
}
void test_mm256_mask_cvtsepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtsepi64_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtsepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovs.qw.mem.256
return _mm256_mask_cvtsepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtusepi32_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.128
return _mm_cvtusepi32_epi8(__A);
}
__m128i test_mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.128
return _mm_mask_cvtusepi32_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.128
return _mm_maskz_cvtusepi32_epi8(__M, __A);
}
void test_mm_mask_cvtusepi32_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.mem.128
return _mm_mask_cvtusepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtusepi32_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm256_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.256
return _mm256_cvtusepi32_epi8(__A);
}
__m128i test_mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.256
return _mm256_mask_cvtusepi32_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi32_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.256
return _mm256_maskz_cvtusepi32_epi8(__M, __A);
}
void test_mm256_mask_cvtusepi32_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.db.mem.256
return _mm256_mask_cvtusepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtusepi32_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.128
return _mm_cvtusepi32_epi16(__A);
}
__m128i test_mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.128
return _mm_mask_cvtusepi32_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.128
return _mm_maskz_cvtusepi32_epi16(__M, __A);
}
void test_mm_mask_cvtusepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi32_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.mem.128
return _mm_mask_cvtusepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtusepi32_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm256_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.256
return _mm256_cvtusepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.256
return _mm256_mask_cvtusepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi32_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.256
return _mm256_maskz_cvtusepi32_epi16(__M, __A);
}
void test_mm256_mask_cvtusepi32_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi32_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.dw.mem.256
return _mm256_mask_cvtusepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtusepi64_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.128
return _mm_cvtusepi64_epi8(__A);
}
__m128i test_mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.128
return _mm_mask_cvtusepi64_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.128
return _mm_maskz_cvtusepi64_epi8(__M, __A);
}
void test_mm_mask_cvtusepi64_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.mem.128
return _mm_mask_cvtusepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtusepi64_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm256_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.256
return _mm256_cvtusepi64_epi8(__A);
}
__m128i test_mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.256
return _mm256_mask_cvtusepi64_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi64_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.256
return _mm256_maskz_cvtusepi64_epi8(__M, __A);
}
void test_mm256_mask_cvtusepi64_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.qb.mem.256
return _mm256_mask_cvtusepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtusepi64_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.128
return _mm_cvtusepi64_epi32(__A);
}
__m128i test_mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.128
return _mm_mask_cvtusepi64_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.128
return _mm_maskz_cvtusepi64_epi32(__M, __A);
}
void test_mm_mask_cvtusepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.mem.128
return _mm_mask_cvtusepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm256_cvtusepi64_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm256_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.256
return _mm256_cvtusepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.256
return _mm256_mask_cvtusepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi64_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.256
return _mm256_maskz_cvtusepi64_epi32(__M, __A);
}
void test_mm256_mask_cvtusepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmovus.qd.mem.256
return _mm256_mask_cvtusepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm_cvtusepi64_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.128
return _mm_cvtusepi64_epi16(__A);
}
__m128i test_mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.128
return _mm_mask_cvtusepi64_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.128
return _mm_maskz_cvtusepi64_epi16(__M, __A);
}
void test_mm_mask_cvtusepi64_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtusepi64_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtusepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.mem.128
return _mm_mask_cvtusepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtusepi64_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm256_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.256
return _mm256_cvtusepi64_epi16(__A);
}
__m128i test_mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.256
return _mm256_mask_cvtusepi64_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtusepi64_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtusepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.256
return _mm256_maskz_cvtusepi64_epi16(__M, __A);
}
void test_mm256_mask_cvtusepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtusepi64_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtusepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmovus.qw.mem.256
return _mm256_mask_cvtusepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtepi32_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi32_epi8
+ // CHECK-LABEL: test_mm_cvtepi32_epi8
// CHECK: trunc <4 x i32> %{{.*}} to <4 x i8>
// CHECK: shufflevector <4 x i8> %{{.*}}, <4 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
return _mm_cvtepi32_epi8(__A);
}
__m128i test_mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.128
return _mm_mask_cvtepi32_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.128
return _mm_maskz_cvtepi32_epi8(__M, __A);
}
void test_mm_mask_cvtepi32_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.mem.128
return _mm_mask_cvtepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtepi32_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi32_epi8
+ // CHECK-LABEL: test_mm256_cvtepi32_epi8
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i8>
// CHECK: shufflevector <8 x i8> %{{.*}}, <8 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm256_cvtepi32_epi8(__A);
}
__m128i test_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.256
return _mm256_mask_cvtepi32_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.256
return _mm256_maskz_cvtepi32_epi8(__M, __A);
}
void test_mm256_mask_cvtepi32_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.mem.256
return _mm256_mask_cvtepi32_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtepi32_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi32_epi16
+ // CHECK-LABEL: test_mm_cvtepi32_epi16
// CHECK: trunc <4 x i32> %{{.*}} to <4 x i16>
// CHECK: shufflevector <4 x i16> %{{.*}}, <4 x i16> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm_cvtepi32_epi16(__A);
}
__m128i test_mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.128
return _mm_mask_cvtepi32_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi32_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.128
return _mm_maskz_cvtepi32_epi16(__M, __A);
}
void test_mm_mask_cvtepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi32_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.mem.128
return _mm_mask_cvtepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtepi32_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi32_epi16
+ // CHECK-LABEL: test_mm256_cvtepi32_epi16
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
return _mm256_cvtepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
return _mm256_mask_cvtepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
return _mm256_maskz_cvtepi32_epi16(__M, __A);
}
void test_mm256_mask_cvtepi32_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi32_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi32_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.mem.256
return _mm256_mask_cvtepi32_storeu_epi16(__P, __M, __A);
}
__m128i test_mm_cvtepi64_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_epi8
+ // CHECK-LABEL: test_mm_cvtepi64_epi8
// CHECK: trunc <2 x i64> %{{.*}} to <2 x i8>
// CHECK: shufflevector <2 x i8> %{{.*}}, <2 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
return _mm_cvtepi64_epi8(__A);
}
__m128i test_mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.128
return _mm_mask_cvtepi64_epi8(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_epi8
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.128
return _mm_maskz_cvtepi64_epi8(__M, __A);
}
void test_mm_mask_cvtepi64_storeu_epi8(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_storeu_epi8
+ // CHECK-LABEL: test_mm_mask_cvtepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.mem.128
return _mm_mask_cvtepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm256_cvtepi64_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_epi8
+ // CHECK-LABEL: test_mm256_cvtepi64_epi8
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i8>
// CHECK: shufflevector <4 x i8> %{{.*}}, <4 x i8> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
return _mm256_cvtepi64_epi8(__A);
}
__m128i test_mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.256
return _mm256_mask_cvtepi64_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi8
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.256
return _mm256_maskz_cvtepi64_epi8(__M, __A);
}
void test_mm256_mask_cvtepi64_storeu_epi8(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_storeu_epi8
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.mem.256
return _mm256_mask_cvtepi64_storeu_epi8(__P, __M, __A);
}
__m128i test_mm_cvtepi64_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_epi32
+ // CHECK-LABEL: test_mm_cvtepi64_epi32
// CHECK: trunc <2 x i64> %{{.*}} to <2 x i32>
// CHECK: shufflevector <2 x i32> %{{.*}}, <2 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
return _mm_cvtepi64_epi32(__A);
}
__m128i test_mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.128
return _mm_mask_cvtepi64_epi32(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_epi32
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.128
return _mm_maskz_cvtepi64_epi32(__M, __A);
}
void test_mm_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_storeu_epi32
+ // CHECK-LABEL: test_mm_mask_cvtepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.128
return _mm_mask_cvtepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm256_cvtepi64_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_epi32
+ // CHECK-LABEL: test_mm256_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
return _mm256_cvtepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_mask_cvtepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi32
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_maskz_cvtepi64_epi32(__M, __A);
}
void test_mm256_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_storeu_epi32
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.256
return _mm256_mask_cvtepi64_storeu_epi32(__P, __M, __A);
}
__m128i test_mm_cvtepi64_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_cvtepi64_epi16
+ // CHECK-LABEL: test_mm_cvtepi64_epi16
// CHECK: trunc <2 x i64> %{{.*}} to <2 x i16>
// CHECK: shufflevector <2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 3, i32 3, i32 3>
return _mm_cvtepi64_epi16(__A);
}
__m128i test_mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.128
return _mm_mask_cvtepi64_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtepi64_epi16
+ // CHECK-LABEL: test_mm_maskz_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.128
return _mm_maskz_cvtepi64_epi16(__M, __A);
}
void test_mm_mask_cvtepi64_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtepi64_storeu_epi16
+ // CHECK-LABEL: test_mm_mask_cvtepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.mem.128
return _mm_mask_cvtepi64_storeu_epi16(__P, __M, __A);
}
__m128i test_mm256_cvtepi64_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_cvtepi64_epi16
+ // CHECK-LABEL: test_mm256_cvtepi64_epi16
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i16>
// CHECK: shufflevector <4 x i16> %{{.*}}, <4 x i16> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
return _mm256_cvtepi64_epi16(__A);
}
__m128i test_mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.256
return _mm256_mask_cvtepi64_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi16
+ // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.256
return _mm256_maskz_cvtepi64_epi16(__M, __A);
}
void test_mm256_mask_cvtepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtepi64_storeu_epi16
+ // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.mem.256
return _mm256_mask_cvtepi64_storeu_epi16(__P, __M, __A);
}
__m128 test_mm256_extractf32x4_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_extractf32x4_ps
+ // CHECK-LABEL: test_mm256_extractf32x4_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf32x4_ps(__A, 1);
}
__m128 test_mm256_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_extractf32x4_ps
+ // CHECK-LABEL: test_mm256_mask_extractf32x4_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_mask_extractf32x4_ps(__W, __U, __A, 1);
}
__m128 test_mm256_maskz_extractf32x4_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_extractf32x4_ps
+ // CHECK-LABEL: test_mm256_maskz_extractf32x4_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_maskz_extractf32x4_ps(__U, __A, 1);
}
__m128i test_mm256_extracti32x4_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_extracti32x4_epi32
+ // CHECK-LABEL: test_mm256_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extracti32x4_epi32(__A, 1);
}
__m128i test_mm256_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_extracti32x4_epi32
+ // CHECK-LABEL: test_mm256_mask_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_mask_extracti32x4_epi32(__W, __U, __A, 1);
}
__m128i test_mm256_maskz_extracti32x4_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_extracti32x4_epi32
+ // CHECK-LABEL: test_mm256_maskz_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_maskz_extracti32x4_epi32(__U, __A, 1);
}
__m256 test_mm256_insertf32x4(__m256 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm256_insertf32x4
+ // CHECK-LABEL: test_mm256_insertf32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_insertf32x4(__A, __B, 1);
}
__m256 test_mm256_mask_insertf32x4(__m256 __W, __mmask8 __U, __m256 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm256_mask_insertf32x4
+ // CHECK-LABEL: test_mm256_mask_insertf32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_insertf32x4(__W, __U, __A, __B, 1);
}
__m256 test_mm256_maskz_insertf32x4(__mmask8 __U, __m256 __A, __m128 __B) {
- // CHECK-LABEL: @test_mm256_maskz_insertf32x4
+ // CHECK-LABEL: test_mm256_maskz_insertf32x4
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_insertf32x4(__U, __A, __B, 1);
}
__m256i test_mm256_inserti32x4(__m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_inserti32x4
+ // CHECK-LABEL: test_mm256_inserti32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_inserti32x4(__A, __B, 1);
}
__m256i test_mm256_mask_inserti32x4(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_mask_inserti32x4
+ // CHECK-LABEL: test_mm256_mask_inserti32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_inserti32x4(__W, __U, __A, __B, 1);
}
__m256i test_mm256_maskz_inserti32x4(__mmask8 __U, __m256i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm256_maskz_inserti32x4
+ // CHECK-LABEL: test_mm256_maskz_inserti32x4
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_inserti32x4(__U, __A, __B, 1);
}
__m128d test_mm_getmant_pd(__m128d __A) {
- // CHECK-LABEL: @test_mm_getmant_pd
+ // CHECK-LABEL: test_mm_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.128
return _mm_getmant_pd(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128d test_mm_mask_getmant_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_getmant_pd
+ // CHECK-LABEL: test_mm_mask_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.128
return _mm_mask_getmant_pd(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128d test_mm_maskz_getmant_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_getmant_pd
+ // CHECK-LABEL: test_mm_maskz_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.128
return _mm_maskz_getmant_pd(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256d test_mm256_getmant_pd(__m256d __A) {
- // CHECK-LABEL: @test_mm256_getmant_pd
+ // CHECK-LABEL: test_mm256_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.256
return _mm256_getmant_pd(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256d test_mm256_mask_getmant_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_getmant_pd
+ // CHECK-LABEL: test_mm256_mask_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.256
return _mm256_mask_getmant_pd(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256d test_mm256_maskz_getmant_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_getmant_pd
+ // CHECK-LABEL: test_mm256_maskz_getmant_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.256
return _mm256_maskz_getmant_pd(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128 test_mm_getmant_ps(__m128 __A) {
- // CHECK-LABEL: @test_mm_getmant_ps
+ // CHECK-LABEL: test_mm_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.128
return _mm_getmant_ps(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128 test_mm_mask_getmant_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_getmant_ps
+ // CHECK-LABEL: test_mm_mask_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.128
return _mm_mask_getmant_ps(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128 test_mm_maskz_getmant_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_getmant_ps
+ // CHECK-LABEL: test_mm_maskz_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.128
return _mm_maskz_getmant_ps(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256 test_mm256_getmant_ps(__m256 __A) {
- // CHECK-LABEL: @test_mm256_getmant_ps
+ // CHECK-LABEL: test_mm256_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.256
return _mm256_getmant_ps(__A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256 test_mm256_mask_getmant_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_getmant_ps
+ // CHECK-LABEL: test_mm256_mask_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.256
return _mm256_mask_getmant_ps(__W, __U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m256 test_mm256_maskz_getmant_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_getmant_ps
+ // CHECK-LABEL: test_mm256_maskz_getmant_ps
// CHECK: @llvm.x86.avx512.mask.getmant.ps.256
return _mm256_maskz_getmant_ps(__U, __A,_MM_MANT_NORM_p5_2, _MM_MANT_SIGN_nan);
}
__m128d test_mm_mmask_i64gather_pd(__m128d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_pd
+ // CHECK-LABEL: test_mm_mmask_i64gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3div2.df
return _mm_mmask_i64gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mmask_i64gather_epi64(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_epi64
+ // CHECK-LABEL: test_mm_mmask_i64gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3div2.di
return _mm_mmask_i64gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m256d test_mm256_mmask_i64gather_pd(__m256d __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_pd
+ // CHECK-LABEL: test_mm256_mmask_i64gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3div4.df
return _mm256_mmask_i64gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m256i test_mm256_mmask_i64gather_epi64(__m256i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_epi64
+ // CHECK-LABEL: test_mm256_mmask_i64gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3div4.di
return _mm256_mmask_i64gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m128 test_mm_mmask_i64gather_ps(__m128 __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_ps
+ // CHECK-LABEL: test_mm_mmask_i64gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3div4.sf
return _mm_mmask_i64gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mmask_i64gather_epi32(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mmask_i64gather_epi32
+ // CHECK-LABEL: test_mm_mmask_i64gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3div4.si
return _mm_mmask_i64gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m128 test_mm256_mmask_i64gather_ps(__m128 __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_ps
+ // CHECK-LABEL: test_mm256_mmask_i64gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3div8.sf
return _mm256_mmask_i64gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm256_mmask_i64gather_epi32(__m128i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mmask_i64gather_epi32
+ // CHECK-LABEL: test_mm256_mmask_i64gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3div8.si
return _mm256_mmask_i64gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m128d test_mm_mask_i32gather_pd(__m128d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_pd
+ // CHECK-LABEL: test_mm_mask_i32gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3siv2.df
return _mm_mmask_i32gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mask_i32gather_epi64(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_epi64
+ // CHECK-LABEL: test_mm_mask_i32gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3siv2.di
return _mm_mmask_i32gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m256d test_mm256_mask_i32gather_pd(__m256d __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_pd
+ // CHECK-LABEL: test_mm256_mask_i32gather_pd
// CHECK: @llvm.x86.avx512.mask.gather3siv4.df
return _mm256_mmask_i32gather_pd(__v1_old, __mask, __index, __addr, 2);
}
__m256i test_mm256_mask_i32gather_epi64(__m256i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_epi64
+ // CHECK-LABEL: test_mm256_mask_i32gather_epi64
// CHECK: @llvm.x86.avx512.mask.gather3siv4.di
return _mm256_mmask_i32gather_epi64(__v1_old, __mask, __index, __addr, 2);
}
__m128 test_mm_mask_i32gather_ps(__m128 __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_ps
+ // CHECK-LABEL: test_mm_mask_i32gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3siv4.sf
return _mm_mmask_i32gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m128i test_mm_mask_i32gather_epi32(__m128i __v1_old, __mmask8 __mask, __m128i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm_mask_i32gather_epi32
+ // CHECK-LABEL: test_mm_mask_i32gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3siv4.si
return _mm_mmask_i32gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m256 test_mm256_mask_i32gather_ps(__m256 __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_ps
+ // CHECK-LABEL: test_mm256_mask_i32gather_ps
// CHECK: @llvm.x86.avx512.mask.gather3siv8.sf
return _mm256_mmask_i32gather_ps(__v1_old, __mask, __index, __addr, 2);
}
__m256i test_mm256_mask_i32gather_epi32(__m256i __v1_old, __mmask8 __mask, __m256i __index, void const *__addr) {
- // CHECK-LABEL: @test_mm256_mask_i32gather_epi32
+ // CHECK-LABEL: test_mm256_mask_i32gather_epi32
// CHECK: @llvm.x86.avx512.mask.gather3siv8.si
return _mm256_mmask_i32gather_epi32(__v1_old, __mask, __index, __addr, 2);
}
__m256d test_mm256_permutex_pd(__m256d __X) {
- // CHECK-LABEL: @test_mm256_permutex_pd
+ // CHECK-LABEL: test_mm256_permutex_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
return _mm256_permutex_pd(__X, 3);
}
__m256d test_mm256_mask_permutex_pd(__m256d __W, __mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_mask_permutex_pd
+ // CHECK-LABEL: test_mm256_mask_permutex_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutex_pd(__W, __U, __X, 1);
}
__m256d test_mm256_maskz_permutex_pd(__mmask8 __U, __m256d __X) {
- // CHECK-LABEL: @test_mm256_maskz_permutex_pd
+ // CHECK-LABEL: test_mm256_maskz_permutex_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutex_pd(__U, __X, 1);
}
__m256i test_mm256_permutex_epi64(__m256i __X) {
- // CHECK-LABEL: @test_mm256_permutex_epi64
+ // CHECK-LABEL: test_mm256_permutex_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
return _mm256_permutex_epi64(__X, 3);
}
__m256i test_mm256_mask_permutex_epi64(__m256i __W, __mmask8 __M, __m256i __X) {
- // CHECK-LABEL: @test_mm256_mask_permutex_epi64
+ // CHECK-LABEL: test_mm256_mask_permutex_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutex_epi64(__W, __M, __X, 3);
}
__m256i test_mm256_maskz_permutex_epi64(__mmask8 __M, __m256i __X) {
- // CHECK-LABEL: @test_mm256_maskz_permutex_epi64
+ // CHECK-LABEL: test_mm256_maskz_permutex_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutex_epi64(__M, __X, 3);
}
__m256d test_mm256_permutexvar_pd(__m256i __X, __m256d __Y) {
- // CHECK-LABEL: @test_mm256_permutexvar_pd
+ // CHECK-LABEL: test_mm256_permutexvar_pd
// CHECK: @llvm.x86.avx512.permvar.df.256
return _mm256_permutexvar_pd(__X, __Y);
}
__m256d test_mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_pd
+ // CHECK-LABEL: test_mm256_mask_permutexvar_pd
// CHECK: @llvm.x86.avx512.permvar.df.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutexvar_pd(__W, __U, __X, __Y);
}
__m256d test_mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_pd
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_pd
// CHECK: @llvm.x86.avx512.permvar.df.256
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutexvar_pd(__U, __X, __Y);
}
__m256i test_mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_epi64
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_epi64
// CHECK: @llvm.x86.avx512.permvar.di.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutexvar_epi64(__M, __X, __Y);
}
__m256i test_mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_epi64
+ // CHECK-LABEL: test_mm256_mask_permutexvar_epi64
// CHECK: @llvm.x86.avx512.permvar.di.256
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutexvar_epi64(__W, __M, __X, __Y);
}
__m256 test_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_ps
+ // CHECK-LABEL: test_mm256_mask_permutexvar_ps
// CHECK: @llvm.x86.avx2.permps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permutexvar_ps(__W, __U, __X, __Y);
}
__m256 test_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_ps
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permutexvar_ps(__U, __X, __Y);
}
__m256 test_mm256_permutexvar_ps(__m256i __X, __m256 __Y) {
- // CHECK-LABEL: @test_mm256_permutexvar_ps
+ // CHECK-LABEL: test_mm256_permutexvar_ps
// CHECK: @llvm.x86.avx2.permps
return _mm256_permutexvar_ps( __X, __Y);
}
__m256i test_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_maskz_permutexvar_epi32
+ // CHECK-LABEL: test_mm256_maskz_permutexvar_epi32
// CHECK: @llvm.x86.avx2.permd
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_permutexvar_epi32(__M, __X, __Y);
}
__m256i test_mm256_permutexvar_epi32(__m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_permutexvar_epi32
+ // CHECK-LABEL: test_mm256_permutexvar_epi32
// CHECK: @llvm.x86.avx2.permd
return _mm256_permutexvar_epi32(__X, __Y);
}
__m256i test_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
- // CHECK-LABEL: @test_mm256_mask_permutexvar_epi32
+ // CHECK-LABEL: test_mm256_mask_permutexvar_epi32
// CHECK: @llvm.x86.avx2.permd
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_permutexvar_epi32(__W, __M, __X, __Y);
}
__m128i test_mm_alignr_epi32(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_alignr_epi32
+ // CHECK-LABEL: test_mm_alignr_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
return _mm_alignr_epi32(__A, __B, 1);
}
__m128i test_mm_mask_alignr_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_alignr_epi32
+ // CHECK-LABEL: test_mm_mask_alignr_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_alignr_epi32(__W, __U, __A, __B, 5);
}
__m128i test_mm_maskz_alignr_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_alignr_epi32
+ // CHECK-LABEL: test_mm_maskz_alignr_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_alignr_epi32(__U, __A, __B, 1);
}
__m256i test_mm256_alignr_epi32(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_alignr_epi32
+ // CHECK-LABEL: test_mm256_alignr_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
return _mm256_alignr_epi32(__A, __B, 1);
}
__m256i test_mm256_mask_alignr_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_alignr_epi32
+ // CHECK-LABEL: test_mm256_mask_alignr_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_alignr_epi32(__W, __U, __A, __B, 9);
}
__m256i test_mm256_maskz_alignr_epi32(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_alignr_epi32
+ // CHECK-LABEL: test_mm256_maskz_alignr_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_alignr_epi32(__U, __A, __B, 1);
}
__m128i test_mm_alignr_epi64(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_alignr_epi64
+ // CHECK-LABEL: test_mm_alignr_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 2>
return _mm_alignr_epi64(__A, __B, 1);
}
__m128i test_mm_mask_alignr_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_alignr_epi64
+ // CHECK-LABEL: test_mm_mask_alignr_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_alignr_epi64(__W, __U, __A, __B, 3);
}
__m128i test_mm_maskz_alignr_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_alignr_epi64
+ // CHECK-LABEL: test_mm_maskz_alignr_epi64
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 2>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_alignr_epi64(__U, __A, __B, 1);
}
__m256i test_mm256_alignr_epi64(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_alignr_epi64
+ // CHECK-LABEL: test_mm256_alignr_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
return _mm256_alignr_epi64(__A, __B, 1);
}
__m256i test_mm256_mask_alignr_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_alignr_epi64
+ // CHECK-LABEL: test_mm256_mask_alignr_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_alignr_epi64(__W, __U, __A, __B, 5);
}
__m256i test_mm256_maskz_alignr_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_alignr_epi64
+ // CHECK-LABEL: test_mm256_maskz_alignr_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_alignr_epi64(__U, __A, __B, 1);
}
__m128 test_mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_movehdup_ps
+ // CHECK-LABEL: test_mm_mask_movehdup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_movehdup_ps(__W, __U, __A);
}
__m128 test_mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_movehdup_ps
+ // CHECK-LABEL: test_mm_maskz_movehdup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_movehdup_ps(__U, __A);
}
__m256 test_mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_movehdup_ps
+ // CHECK-LABEL: test_mm256_mask_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_movehdup_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_movehdup_ps
+ // CHECK-LABEL: test_mm256_maskz_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_movehdup_ps(__U, __A);
}
__m128 test_mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_moveldup_ps
+ // CHECK-LABEL: test_mm_mask_moveldup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_moveldup_ps(__W, __U, __A);
}
__m128 test_mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_moveldup_ps
+ // CHECK-LABEL: test_mm_maskz_moveldup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
// CHECK: select <4 x i1> %{{.*}} <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_moveldup_ps(__U, __A);
}
__m256 test_mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_moveldup_ps
+ // CHECK-LABEL: test_mm256_mask_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_moveldup_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_moveldup_ps
+ // CHECK-LABEL: test_mm256_maskz_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
// CHECK: select <8 x i1> %{{.*}} <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_moveldup_ps(__U, __A);
}
__m128i test_mm_mask_shuffle_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_shuffle_epi32
+ // CHECK-LABEL: test_mm_mask_shuffle_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_shuffle_epi32(__W, __U, __A, 1);
}
__m128i test_mm_maskz_shuffle_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_shuffle_epi32
+ // CHECK-LABEL: test_mm_maskz_shuffle_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> poison, <4 x i32> <i32 2, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_shuffle_epi32(__U, __A, 2);
}
__m256i test_mm256_mask_shuffle_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_shuffle_epi32
+ // CHECK-LABEL: test_mm256_mask_shuffle_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <8 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shuffle_epi32(__W, __U, __A, 2);
}
__m256i test_mm256_maskz_shuffle_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_shuffle_epi32
+ // CHECK-LABEL: test_mm256_maskz_shuffle_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <8 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shuffle_epi32(__U, __A, 2);
}
__m128d test_mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_mask_mov_pd
+ // CHECK-LABEL: test_mm_mask_mov_pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_mov_pd(__W, __U, __A);
}
__m128d test_mm_maskz_mov_pd(__mmask8 __U, __m128d __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_pd
+ // CHECK-LABEL: test_mm_maskz_mov_pd
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_mov_pd(__U, __A);
}
__m256d test_mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_pd
+ // CHECK-LABEL: test_mm256_mask_mov_pd
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_mov_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_mov_pd(__mmask8 __U, __m256d __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_pd
+ // CHECK-LABEL: test_mm256_maskz_mov_pd
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_mov_pd(__U, __A);
}
__m128 test_mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_mov_ps
+ // CHECK-LABEL: test_mm_mask_mov_ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_mov_ps(__W, __U, __A);
}
__m128 test_mm_maskz_mov_ps(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_mov_ps
+ // CHECK-LABEL: test_mm_maskz_mov_ps
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_mov_ps(__U, __A);
}
__m256 test_mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_mov_ps
+ // CHECK-LABEL: test_mm256_mask_mov_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_mov_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_mov_ps(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_mov_ps
+ // CHECK-LABEL: test_mm256_maskz_mov_ps
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_mov_ps(__U, __A);
}
__m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_cvtph_ps
+ // CHECK-LABEL: test_mm_mask_cvtph_ps
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: fpext <4 x half> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -9565,7 +9566,7 @@ __m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
}
__m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtph_ps
+ // CHECK-LABEL: test_mm_maskz_cvtph_ps
// CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: fpext <4 x half> %{{.*}} to <4 x float>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -9573,167 +9574,167 @@ __m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
}
__m256 test_mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtph_ps
+ // CHECK-LABEL: test_mm256_mask_cvtph_ps
// CHECK: fpext <8 x half> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_cvtph_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtph_ps
+ // CHECK-LABEL: test_mm256_maskz_cvtph_ps
// CHECK: fpext <8 x half> %{{.*}} to <8 x float>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_cvtph_ps(__U, __A);
}
__m128i test_mm_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvtps_ph
+ // CHECK-LABEL: test_mm_mask_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm_maskz_cvtps_ph(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvtps_ph
+ // CHECK-LABEL: test_mm_maskz_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm256_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvtps_ph
+ // CHECK-LABEL: test_mm256_mask_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm256_maskz_cvtps_ph(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvtps_ph
+ // CHECK-LABEL: test_mm256_maskz_cvtps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
__m128i test_mm_mask_cvt_roundps_ph(__m128i __W, __mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_mask_cvt_roundps_ph
+ // CHECK-LABEL: test_mm_mask_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO);
}
__m128i test_mm_maskz_cvt_roundps_ph(__mmask8 __U, __m128 __A) {
- // CHECK-LABEL: @test_mm_maskz_cvt_roundps_ph
+ // CHECK-LABEL: test_mm_maskz_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.128
return _mm_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO);
}
__m128i test_mm256_mask_cvt_roundps_ph(__m128i __W, __mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_mask_cvt_roundps_ph
+ // CHECK-LABEL: test_mm256_mask_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO);
}
__m128i test_mm256_maskz_cvt_roundps_ph(__mmask8 __U, __m256 __A) {
- // CHECK-LABEL: @test_mm256_maskz_cvt_roundps_ph
+ // CHECK-LABEL: test_mm256_maskz_cvt_roundps_ph
// CHECK: @llvm.x86.avx512.mask.vcvtps2ph.256
return _mm256_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO);
}
__mmask8 test_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm_cmpeq_epi32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpeq_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epi32_mask
// CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpeq_epi64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpeq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm_cmpeq_epi64_mask
// CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpeq_epi64_mask(__a, __b);
}
__mmask8 test_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm_cmpgt_epi32_mask
// CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epi32_mask(__a, __b);
}
__mmask8 test_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epi32_mask
// CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_mask_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm_mask_cmpgt_epi64_mask
// CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
// CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm_mask_cmpgt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
- // CHECK-LABEL: @test_mm_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm_cmpgt_epi64_mask
// CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm_cmpgt_epi64_mask(__a, __b);
}
__mmask8 test_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epi32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpeq_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epi32_mask
// CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpeq_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpeq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpeq_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpeq_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpeq_epi64_mask
// CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpeq_epi64_mask(__a, __b);
}
__mmask8 test_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epi32_mask
// CHECK: icmp sgt <8 x i32> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epi32_mask(__a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epi32_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epi32_mask
// CHECK: icmp sgt <8 x i32> %{{.*}}, %{{.*}}
// CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epi32_mask(__u, __a, __b);
}
__mmask8 test_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_mask_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_cmpgt_epi64_mask
// CHECK: icmp sgt <4 x i64> %{{.*}}, %{{.*}}
// CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_mask_cmpgt_epi64_mask(__u, __a, __b);
}
__mmask8 test_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
- // CHECK-LABEL: @test_mm256_cmpgt_epi64_mask
+ // CHECK-LABEL: test_mm256_cmpgt_epi64_mask
// CHECK: icmp sgt <4 x i64> %{{.*}}, %{{.*}}
return (__mmask8)_mm256_cmpgt_epi64_mask(__a, __b);
}
diff --git a/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c b/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c
index 4e65da0..b53410a 100644
--- a/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbitalg-builtins.c
@@ -1,105 +1,113 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bitalg -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m256i test_mm256_popcnt_epi16(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi16
+ // CHECK-LABEL: test_mm256_popcnt_epi16
// CHECK: @llvm.ctpop.v16i16
return _mm256_popcnt_epi16(__A);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_popcnt_epi16((__m256i)(__v16hi){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 15, 14, 1, 0, 8, 1, 9, 2, 2, 4, 2, 6, 2, 9, 2));
__m256i test_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi16
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi16
// CHECK: @llvm.ctpop.v16i16
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_popcnt_epi16(__A, __U, __B);
}
__m256i test_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi16
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi16
// CHECK: @llvm.ctpop.v16i16
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_popcnt_epi16(__U, __B);
}
__m128i test_mm_popcnt_epi16(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi16
+ // CHECK-LABEL: test_mm_popcnt_epi16
// CHECK: @llvm.ctpop.v8i16
return _mm_popcnt_epi16(__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm_popcnt_epi16((__m128i)(__v8hi){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 15, 14, 1, 0, 8, 1, 9));
__m128i test_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi16
+ // CHECK-LABEL: test_mm_mask_popcnt_epi16
// CHECK: @llvm.ctpop.v8i16
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_popcnt_epi16(__A, __U, __B);
}
__m128i test_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi16
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi16
// CHECK: @llvm.ctpop.v8i16
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_popcnt_epi16(__U, __B);
}
__m256i test_mm256_popcnt_epi8(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi8
+ // CHECK-LABEL: test_mm256_popcnt_epi8
// CHECK: @llvm.ctpop.v32i8
return _mm256_popcnt_epi8(__A);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_popcnt_epi8((__m256i)(__v32qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73, +5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3, 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3));
__m256i test_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi8
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi8
// CHECK: @llvm.ctpop.v32i8
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_popcnt_epi8(__A, __U, __B);
}
__m256i test_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi8
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi8
// CHECK: @llvm.ctpop.v32i8
// CHECK: select <32 x i1> %{{[0-9]+}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_popcnt_epi8(__U, __B);
}
__m128i test_mm_popcnt_epi8(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi8
+ // CHECK-LABEL: test_mm_popcnt_epi8
// CHECK: @llvm.ctpop.v16i8
return _mm_popcnt_epi8(__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm_popcnt_epi8((__m128i)(__v16qi){+5, -3, -10, +8, 0, -16, +16, -16, +3, +9, +15, +33, +63, +33, +53, +73}), 2, 7, 6, 1, 0, 4, 1, 4, 2, 2, 4, 2, 6, 2, 4, 3));
__m128i test_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi8
+ // CHECK-LABEL: test_mm_mask_popcnt_epi8
// CHECK: @llvm.ctpop.v16i8
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_popcnt_epi8(__A, __U, __B);
}
__m128i test_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi8
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi8
// CHECK: @llvm.ctpop.v16i8
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_popcnt_epi8(__U, __B);
}
__mmask32 test_mm256_mask_bitshuffle_epi64_mask(__mmask32 __U, __m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_mask_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm256_mask_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.256
// CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_bitshuffle_epi64_mask(__U, __A, __B);
}
__mmask32 test_mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B) {
- // CHECK-LABEL: @test_mm256_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm256_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.256
return _mm256_bitshuffle_epi64_mask(__A, __B);
}
__mmask16 test_mm_mask_bitshuffle_epi64_mask(__mmask16 __U, __m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_mask_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm_mask_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.128
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm_mask_bitshuffle_epi64_mask(__U, __A, __B);
}
__mmask16 test_mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B) {
- // CHECK-LABEL: @test_mm_bitshuffle_epi64_mask
+ // CHECK-LABEL: test_mm_bitshuffle_epi64_mask
// CHECK: @llvm.x86.avx512.vpshufbitqmb.128
return _mm_bitshuffle_epi64_mask(__A, __B);
}
diff --git a/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c b/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c
index ca8f5e4..8927ae2 100644
--- a/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vpopcntdq-builtins.c
@@ -1,45 +1,48 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
#include "builtin_test_helpers.h"
__m512i test_mm512_popcnt_epi64(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi64
+ // CHECK-LABEL: test_mm512_popcnt_epi64
// CHECK: @llvm.ctpop.v8i64
return _mm512_popcnt_epi64(__A);
}
-TEST_CONSTEXPR(match_v8di(_mm512_popcnt_epi64((__m512i)(__v8di){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 31, 30, 1, 0, 24, 1, 25));
+TEST_CONSTEXPR(match_v8di(_mm512_popcnt_epi64((__m512i)(__v8di){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 63, 62, 1, 0, 56, 1, 57));
__m512i test_mm512_mask_popcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi64
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi64
// CHECK: @llvm.ctpop.v8i64
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_popcnt_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_popcnt_epi64(__mmask8 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi64
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi64
// CHECK: @llvm.ctpop.v8i64
// CHECK: select <8 x i1> %{{[0-9]+}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_popcnt_epi64(__U, __A);
}
__m512i test_mm512_popcnt_epi32(__m512i __A) {
- // CHECK-LABEL: @test_mm512_popcnt_epi32
+ // CHECK-LABEL: test_mm512_popcnt_epi32
// CHECK: @llvm.ctpop.v16i32
return _mm512_popcnt_epi32(__A);
}
TEST_CONSTEXPR(match_v16si(_mm512_popcnt_epi32((__m512i)(__v16si){+5, -3, -10, +8, 0, -256, +256, -128, +3, +9, +15, +33, +63, +129, +511, +1025}), 2, 31, 30, 1, 0, 24, 1, 25, 2, 2, 4, 2, 6, 2, 9, 2));
__m512i test_mm512_mask_popcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_mask_popcnt_epi32
+ // CHECK-LABEL: test_mm512_mask_popcnt_epi32
// CHECK: @llvm.ctpop.v16i32
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_popcnt_epi32(__W, __U, __A);
}
__m512i test_mm512_maskz_popcnt_epi32(__mmask16 __U, __m512i __A) {
- // CHECK-LABEL: @test_mm512_maskz_popcnt_epi32
+ // CHECK-LABEL: test_mm512_maskz_popcnt_epi32
// CHECK: @llvm.ctpop.v16i32
// CHECK: select <16 x i1> %{{[0-9]+}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_popcnt_epi32(__U, __A);
diff --git a/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c b/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c
index 5d18b68..d9fbd76 100644
--- a/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vpopcntdqvl-builtins.c
@@ -1,87 +1,90 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
#include "builtin_test_helpers.h"
__m128i test_mm_popcnt_epi64(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi64
+ // CHECK-LABEL: test_mm_popcnt_epi64
// CHECK: @llvm.ctpop.v2i64
return _mm_popcnt_epi64(__A);
}
TEST_CONSTEXPR(match_v2di(_mm_popcnt_epi64((__m128i)(__v2di){+5, -3}), 2, 63));
__m128i test_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi64
+ // CHECK-LABEL: test_mm_mask_popcnt_epi64
// CHECK: @llvm.ctpop.v2i64
// CHECK: select <2 x i1> %{{.+}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_popcnt_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi64
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi64
// CHECK: @llvm.ctpop.v2i64
// CHECK: select <2 x i1> %{{.+}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_popcnt_epi64(__U, __A);
}
__m128i test_mm_popcnt_epi32(__m128i __A) {
- // CHECK-LABEL: @test_mm_popcnt_epi32
+ // CHECK-LABEL: test_mm_popcnt_epi32
// CHECK: @llvm.ctpop.v4i32
return _mm_popcnt_epi32(__A);
}
TEST_CONSTEXPR(match_v4si(_mm_popcnt_epi32((__m128i)(__v4si){+5, -3, -10, +8}), 2, 31, 30, 1));
__m128i test_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_mask_popcnt_epi32
+ // CHECK-LABEL: test_mm_mask_popcnt_epi32
// CHECK: @llvm.ctpop.v4i32
// CHECK: select <4 x i1> %{{.+}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_popcnt_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) {
- // CHECK-LABEL: @test_mm_maskz_popcnt_epi32
+ // CHECK-LABEL: test_mm_maskz_popcnt_epi32
// CHECK: @llvm.ctpop.v4i32
// CHECK: select <4 x i1> %{{.+}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_popcnt_epi32(__U, __A);
}
__m256i test_mm256_popcnt_epi64(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi64
+ // CHECK-LABEL: test_mm256_popcnt_epi64
// CHECK: @llvm.ctpop.v4i64
return _mm256_popcnt_epi64(__A);
}
TEST_CONSTEXPR(match_v4di(_mm256_popcnt_epi64((__m256i)(__v4di){+5, -3, -10, +8}), 2, 63, 62, 1));
__m256i test_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi64
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi64
// CHECK: @llvm.ctpop.v4i64
// CHECK: select <4 x i1> %{{.+}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_popcnt_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi64
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi64
// CHECK: @llvm.ctpop.v4i64
// CHECK: select <4 x i1> %{{.+}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_popcnt_epi64(__U, __A);
}
__m256i test_mm256_popcnt_epi32(__m256i __A) {
- // CHECK-LABEL: @test_mm256_popcnt_epi32
+ // CHECK-LABEL: test_mm256_popcnt_epi32
// CHECK: @llvm.ctpop.v8i32
return _mm256_popcnt_epi32(__A);
}
TEST_CONSTEXPR(match_v8si(_mm256_popcnt_epi32((__m256i)(__v8si){+5, -3, -10, +8, 0, -256, +256, -128}), 2, 31, 30, 1, 0, 24, 1, 25));
__m256i test_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_mask_popcnt_epi32
+ // CHECK-LABEL: test_mm256_mask_popcnt_epi32
// CHECK: @llvm.ctpop.v8i32
// CHECK: select <8 x i1> %{{.+}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_popcnt_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) {
- // CHECK-LABEL: @test_mm256_maskz_popcnt_epi32
+ // CHECK-LABEL: test_mm256_maskz_popcnt_epi32
// CHECK: @llvm.ctpop.v8i32
// CHECK: select <8 x i1> %{{.+}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_popcnt_epi32(__U, __A);
diff --git a/clang/test/CodeGen/X86/builtin_test_helpers.h b/clang/test/CodeGen/X86/builtin_test_helpers.h
index f719694..5d4ee7d 100644
--- a/clang/test/CodeGen/X86/builtin_test_helpers.h
+++ b/clang/test/CodeGen/X86/builtin_test_helpers.h
@@ -122,6 +122,36 @@ constexpr bool match_v16si(__m512i _v, int a, int b, int c, int d, int e, int f,
return v[0] == a && v[1] == b && v[2] == c && v[3] == d && v[4] == e && v[5] == f && v[6] == g && v[7] == h && v[8] == i && v[9] == j && v[10] == k && v[11] == l && v[12] == m && v[13] == n && v[14] == o && v[15] == p;
}
+constexpr bool match_v32hi(__m512i _v, short __e00, short __e01, short __e02, short __e03, short __e04, short __e05, short __e06, short __e07,
+ short __e08, short __e09, short __e10, short __e11, short __e12, short __e13, short __e14, short __e15,
+ short __e16, short __e17, short __e18, short __e19, short __e20, short __e21, short __e22, short __e23,
+ short __e24, short __e25, short __e26, short __e27, short __e28, short __e29, short __e30, short __e31) {
+ __v32hi v = (__v32hi)_v;
+ return v[ 0] == __e00 && v[ 1] == __e01 && v[ 2] == __e02 && v[ 3] == __e03 && v[ 4] == __e04 && v[ 5] == __e05 && v[ 6] == __e06 && v[ 7] == __e07 &&
+ v[ 8] == __e08 && v[ 9] == __e09 && v[10] == __e10 && v[11] == __e11 && v[12] == __e12 && v[13] == __e13 && v[14] == __e14 && v[15] == __e15 &&
+ v[16] == __e16 && v[17] == __e17 && v[18] == __e18 && v[19] == __e19 && v[20] == __e20 && v[21] == __e21 && v[22] == __e22 && v[23] == __e23 &&
+ v[24] == __e24 && v[25] == __e25 && v[26] == __e26 && v[27] == __e27 && v[28] == __e28 && v[29] == __e29 && v[30] == __e30 && v[31] == __e31;
+}
+
+constexpr bool match_v64qi(__m512i _v, char __e00, char __e01, char __e02, char __e03, char __e04, char __e05, char __e06, char __e07,
+ char __e08, char __e09, char __e10, char __e11, char __e12, char __e13, char __e14, char __e15,
+ char __e16, char __e17, char __e18, char __e19, char __e20, char __e21, char __e22, char __e23,
+ char __e24, char __e25, char __e26, char __e27, char __e28, char __e29, char __e30, char __e31,
+ char __e32, char __e33, char __e34, char __e35, char __e36, char __e37, char __e38, char __e39,
+ char __e40, char __e41, char __e42, char __e43, char __e44, char __e45, char __e46, char __e47,
+ char __e48, char __e49, char __e50, char __e51, char __e52, char __e53, char __e54, char __e55,
+ char __e56, char __e57, char __e58, char __e59, char __e60, char __e61, char __e62, char __e63) {
+ __v64qi v = (__v64qi)_v;
+ return v[ 0] == __e00 && v[ 1] == __e01 && v[ 2] == __e02 && v[ 3] == __e03 && v[ 4] == __e04 && v[ 5] == __e05 && v[ 6] == __e06 && v[ 7] == __e07 &&
+ v[ 8] == __e08 && v[ 9] == __e09 && v[10] == __e10 && v[11] == __e11 && v[12] == __e12 && v[13] == __e13 && v[14] == __e14 && v[15] == __e15 &&
+ v[16] == __e16 && v[17] == __e17 && v[18] == __e18 && v[19] == __e19 && v[20] == __e20 && v[21] == __e21 && v[22] == __e22 && v[23] == __e23 &&
+ v[24] == __e24 && v[25] == __e25 && v[26] == __e26 && v[27] == __e27 && v[28] == __e28 && v[29] == __e29 && v[30] == __e30 && v[31] == __e31 &&
+ v[32] == __e32 && v[33] == __e33 && v[34] == __e34 && v[35] == __e35 && v[36] == __e36 && v[37] == __e37 && v[38] == __e38 && v[39] == __e39 &&
+ v[40] == __e40 && v[41] == __e41 && v[42] == __e42 && v[43] == __e43 && v[44] == __e44 && v[45] == __e45 && v[46] == __e46 && v[47] == __e47 &&
+ v[48] == __e48 && v[49] == __e49 && v[50] == __e50 && v[51] == __e51 && v[52] == __e52 && v[53] == __e53 && v[54] == __e54 && v[55] == __e55 &&
+ v[56] == __e56 && v[57] == __e57 && v[58] == __e58 && v[59] == __e59 && v[60] == __e60 && v[61] == __e61 && v[62] == __e62 && v[63] == __e63;
+}
+
#define TEST_CONSTEXPR(...) static_assert(__VA_ARGS__)
#else
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index 52cbe45..6f20986 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -34,24 +34,28 @@ __m64 test_mm_add_pi8(__m64 a, __m64 b) {
// CHECK: add <8 x i8> {{%.*}}, {{%.*}}
return _mm_add_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_add_pi8(_mm_setr_pi8(-3, +2, -1, 0, +1, -2, +3, -4), _mm_setr_pi8(-18, +16, -14, +12, -10, +8, +6, -4)), -21, +18, -15, +12, -9, +6, +9, -8));
__m64 test_mm_add_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_pi16
// CHECK: add <4 x i16> {{%.*}}, {{%.*}}
return _mm_add_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_add_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), -9, +6, +9, -8));
__m64 test_mm_add_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_pi32
// CHECK: add <2 x i32> {{%.*}}, {{%.*}}
return _mm_add_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_add_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-9, +8}), -4, +5));
__m64 test_mm_add_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_add_si64
// CHECK: add i64 {{%.*}}, {{%.*}}
return _mm_add_si64(a, b);
}
+TEST_CONSTEXPR(match_v1di(_mm_add_si64((__m64)(__v1di){+42}, (__m64)(__v1di){-100}), -58));
__m64 test_mm_adds_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_adds_pi8
@@ -88,6 +92,7 @@ __m64 test_mm_and_si64(__m64 a, __m64 b) {
// CHECK: and <1 x i64> {{%.*}}, {{%.*}}
return _mm_and_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_and_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, 0, 0, -1));
__m64 test_mm_andnot_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_andnot_si64
@@ -95,6 +100,7 @@ __m64 test_mm_andnot_si64(__m64 a, __m64 b) {
// CHECK: and <1 x i64> [[TMP]], {{%.*}}
return _mm_andnot_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_andnot_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, 0, -1, 0));
__m64 test_mm_avg_pu8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_avg_pu8
@@ -114,6 +120,7 @@ __m64 test_mm_cmpeq_pi8(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <8 x i1> [[CMP]] to <8 x i8>
return _mm_cmpeq_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_cmpeq_pi8(_mm_setr_pi8(-3, +2, -1, 0, +1, -2, +3, -4), _mm_setr_pi8(-3, -2, +1, 0, -1, -2, -3, -4)), -1, 0, 0, -1, 0, -1, 0, -1));
__m64 test_mm_cmpeq_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpeq_pi16
@@ -121,6 +128,7 @@ __m64 test_mm_cmpeq_pi16(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <4 x i1> [[CMP]] to <4 x i16>
return _mm_cmpeq_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_cmpeq_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, -1, +3, +4}), 0, 0, -1, 0));
__m64 test_mm_cmpeq_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpeq_pi32
@@ -128,6 +136,7 @@ __m64 test_mm_cmpeq_pi32(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <2 x i1> [[CMP]] to <2 x i32>
return _mm_cmpeq_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_cmpeq_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-5, -3}), 0, -1));
__m64 test_mm_cmpgt_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpgt_pi8
@@ -135,6 +144,7 @@ __m64 test_mm_cmpgt_pi8(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <8 x i1> [[CMP]] to <8 x i8>
return _mm_cmpgt_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_cmpgt_pi8(_mm_setr_pi8(-3, +2, -1, 0, +1, -2, +3, -4), _mm_setr_pi8(-3, -2, +1, 0, -1, -2, -3, -4)), 0, -1, 0, 0, -1, 0, -1, 0));
__m64 test_mm_cmpgt_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpgt_pi16
@@ -142,6 +152,7 @@ __m64 test_mm_cmpgt_pi16(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <4 x i1> [[CMP]] to <4 x i16>
return _mm_cmpgt_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_cmpgt_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, -1, +3, +4}), -1, 0, 0, 0));
__m64 test_mm_cmpgt_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_cmpgt_pi32
@@ -149,6 +160,7 @@ __m64 test_mm_cmpgt_pi32(__m64 a, __m64 b) {
// CHECK-NEXT: {{%.*}} = sext <2 x i1> [[CMP]] to <2 x i32>
return _mm_cmpgt_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_cmpgt_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-5, -3}), -1, 0));
__m128 test_mm_cvt_pi2ps(__m128 a, __m64 b) {
// CHECK-LABEL: test_mm_cvt_pi2ps
@@ -210,12 +222,14 @@ __m64 test_mm_cvtsi32_si64(int a) {
// CHECK: insertelement <2 x i32>
return _mm_cvtsi32_si64(a);
}
+TEST_CONSTEXPR(match_v2si(_mm_cvtsi32_si64(-127), -127, 0));
int test_mm_cvtsi64_si32(__m64 a) {
// CHECK-LABEL: test_mm_cvtsi64_si32
// CHECK: extractelement <2 x i32>
return _mm_cvtsi64_si32(a);
}
+TEST_CONSTEXPR(_mm_cvtsi64_si32((__m64)(__v4hi){-2, 0, -1, -1}) == 65534);
__m64 test_mm_cvttpd_pi32(__m128d a) {
// CHECK-LABEL: test_mm_cvttpd_pi32
@@ -240,11 +254,13 @@ __m64 test_m_from_int(int a) {
// CHECK: insertelement <2 x i32>
return _m_from_int(a);
}
+TEST_CONSTEXPR(match_v2si(_m_from_int(255), 255, 0));
__m64 test_m_from_int64(long long a) {
// CHECK-LABEL: test_m_from_int64
return _m_from_int64(a);
}
+TEST_CONSTEXPR(match_v1di(_m_from_int64(-65536), -65536LL));
__m64 test_mm_hadd_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hadd_pi16
@@ -349,12 +365,14 @@ __m64 test_mm_mulhi_pi16(__m64 a, __m64 b) {
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulh.w(
return _mm_mulhi_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhi_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), -1, -1, 0, 0));
__m64 test_mm_mulhi_pu16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mulhi_pu16
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulhu.w(
return _mm_mulhi_pu16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhi_pu16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), 0, 7, 0, -8));
__m64 test_mm_mulhrs_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mulhrs_pi16
@@ -367,12 +385,14 @@ __m64 test_mm_mullo_pi16(__m64 a, __m64 b) {
// CHECK: mul <4 x i16> {{%.*}}, {{%.*}}
return _mm_mullo_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mullo_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), -10, -16, +18, +16));
__m64 test_mm_or_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_or_si64
// CHECK: or <1 x i64> {{%.*}}, {{%.*}}
return _mm_or_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_or_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, -1, -1, -1));
__m64 test_mm_packs_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_packs_pi16
@@ -644,24 +664,28 @@ __m64 test_mm_sub_pi8(__m64 a, __m64 b) {
// CHECK: sub <8 x i8> {{%.*}}, {{%.*}}
return _mm_sub_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_sub_pi8(_mm_setr_pi8(-3, +2, -1, 0, +1, -2, +3, -4), _mm_setr_pi8(-18, +16, -14, +12, -10, +8, +6, -4)), +15, -14, +13, -12, +11, -10, -3, 0));
__m64 test_mm_sub_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sub_pi16
// CHECK: sub <4 x i16> {{%.*}}, {{%.*}}
return _mm_sub_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_sub_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-10, +8, +6, -4}), +11, -10, -3, 0));
__m64 test_mm_sub_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sub_pi32
// CHECK: sub <2 x i32> {{%.*}}, {{%.*}}
return _mm_sub_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_sub_pi32((__m64)(__v2si){+5, -3}, (__m64)(__v2si){-9, +8}), +14, -11));
__m64 test_mm_sub_si64(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_sub_si64
// CHECK: sub i64 {{%.*}}, {{%.*}}
return _mm_sub_si64(a, b);
}
+TEST_CONSTEXPR(match_v1di(_mm_sub_si64((__m64)(__v1di){+42}, (__m64)(__v1di){-100}), +142));
__m64 test_mm_subs_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_subs_pi8
@@ -692,11 +716,13 @@ int test_m_to_int(__m64 a) {
// CHECK: extractelement <2 x i32>
return _m_to_int(a);
}
+TEST_CONSTEXPR(_m_to_int((__m64)(__v4hi){0, -2, -1, -1}) == -131072);
long long test_m_to_int64(__m64 a) {
// CHECK-LABEL: test_m_to_int64
return _m_to_int64(a);
}
+TEST_CONSTEXPR(_m_to_int64((__m64)(__v4hi){0, -2, 0, -1}) == -281470681874432LL);
__m64 test_mm_unpackhi_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_unpackhi_pi8
@@ -739,3 +765,4 @@ __m64 test_mm_xor_si64(__m64 a, __m64 b) {
// CHECK: xor <1 x i64> {{%.*}}, {{%.*}}
return _mm_xor_si64(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_xor_si64((__m64)(__v4hi){0, -1, 0, -1}, (__m64)(__v4hi){0, 0, -1, -1}), 0, -1, -1, 0));
diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c
index 04df59e..23013dd 100644
--- a/clang/test/CodeGen/X86/sse2-builtins.c
+++ b/clang/test/CodeGen/X86/sse2-builtins.c
@@ -96,6 +96,7 @@ __m128i test_mm_and_si128(__m128i A, __m128i B) {
// CHECK: and <2 x i64>
return _mm_and_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_and_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, 0, 0, -1));
__m128d test_mm_andnot_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_andnot_pd
@@ -111,6 +112,7 @@ __m128i test_mm_andnot_si128(__m128i A, __m128i B) {
// CHECK: and <2 x i64>
return _mm_andnot_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_andnot_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, 0, -1, 0));
__m128i test_mm_avg_epu8(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_avg_epu8
@@ -938,18 +940,21 @@ __m128i test_mm_mulhi_epi16(__m128i A, __m128i B) {
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhi_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhi_epi16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), -1, 0, 0, -1, -1, -1, -1, -1));
__m128i test_mm_mulhi_epu16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_mulhi_epu16
// CHECK: call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhi_epu16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhi_epu16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), 0, -16, 0, 9, 4, 5, 6, 1));
__m128i test_mm_mullo_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_mullo_epi16
// CHECK: mul <8 x i16> %{{.*}}, %{{.*}}
return _mm_mullo_epi16(A, B);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mullo_epi16((__m128i)(__v8hi){+1, -2, +3, -4, +5, -6, +7, -8}, (__m128i)(__v8hi){-16, -14, +12, +10, -8, +6, -4, +2}), -16, 28, 36, -40, -40, -36, -28, -16));
__m128d test_mm_or_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_or_pd
@@ -963,6 +968,7 @@ __m128i test_mm_or_si128(__m128i A, __m128i B) {
// CHECK: or <2 x i64> %{{.*}}, %{{.*}}
return _mm_or_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_or_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, -1, -1, -1));
__m128i test_mm_packs_epi16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_packs_epi16
@@ -1831,3 +1837,4 @@ __m128i test_mm_xor_si128(__m128i A, __m128i B) {
// CHECK: xor <2 x i64> %{{.*}}, %{{.*}}
return _mm_xor_si128(A, B);
}
+TEST_CONSTEXPR(match_v4si(_mm_xor_si128((__m128i)(__v4si){0, -1, 0, -1}, (__m128i)(__v4si){0, 0, -1, -1}), 0, -1, -1, 0));
diff --git a/clang/test/CodeGen/aggregate-assign-call.c b/clang/test/CodeGen/aggregate-assign-call.c
index 7d97239..f09e77032 100644
--- a/clang/test/CodeGen/aggregate-assign-call.c
+++ b/clang/test/CodeGen/aggregate-assign-call.c
@@ -24,23 +24,23 @@ struct S bar(void) {
// O1: %[[TMP2_ALLOCA:[^ ]+]] = alloca %struct.S
// O1: %[[TMP3_ALLOCA:[^ ]+]] = alloca %struct.S
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP1_ALLOCA]])
// O1: call void @foo
r = foo();
// O1: memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP1_ALLOCA]])
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP2_ALLOCA]])
// O1: call void @foo
r = foo();
// O1: memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP2_ALLOCA]])
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP3_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP3_ALLOCA]])
// O1: call void @foo
r = foo();
// O1: memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP3_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP3_ALLOCA]])
return r;
}
@@ -59,17 +59,17 @@ struct S baz(int i, volatile int *j) {
// O1: %[[TMP2_ALLOCA:[^ ]+]] = alloca %struct.S
do {
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP1_ALLOCA]])
//
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP1_ALLOCA]])
//
// O1: call void @foo_int(ptr dead_on_unwind writable sret(%struct.S) align 4 %[[TMP1_ALLOCA]],
// O1: call void @llvm.memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP1_ALLOCA]])
- // O1: call void @llvm.lifetime.start.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP1_ALLOCA]])
+ // O1: call void @llvm.lifetime.start.p0(ptr %[[TMP2_ALLOCA]])
// O1: call void @foo_int(ptr dead_on_unwind writable sret(%struct.S) align 4 %[[TMP2_ALLOCA]],
// O1: call void @llvm.memcpy
- // O1: call void @llvm.lifetime.end.p0({{[^,]*}}, ptr %[[TMP2_ALLOCA]])
+ // O1: call void @llvm.lifetime.end.p0(ptr %[[TMP2_ALLOCA]])
r = foo_int(({
if (*j)
break;
diff --git a/clang/test/CodeGen/attr-counted-by-for-pointers.c b/clang/test/CodeGen/attr-counted-by-for-pointers.c
index e939e49..0d72b58 100644
--- a/clang/test/CodeGen/attr-counted-by-for-pointers.c
+++ b/clang/test/CodeGen/attr-counted-by-for-pointers.c
@@ -32,7 +32,7 @@ struct annotated_ptr {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT10:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
@@ -85,7 +85,7 @@ void test1(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT10:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]]
@@ -138,7 +138,7 @@ void test2(struct annotated_ptr *p, int index, struct foo *value) {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: [[DOTNOT:%.*]] = icmp ugt i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[DOTNOT]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], label [[CONT10:%.*]], !prof [[PROF15:![0-9]+]], !nosanitize [[META2]]
@@ -311,7 +311,7 @@ size_t test6(struct annotated_ptr *p, int index) {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
-// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 8
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[IDXPROM]], [[TMP0]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT10:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]]
diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c
index 9fb50c6..59e1b13 100644
--- a/clang/test/CodeGen/attr-counted-by.c
+++ b/clang/test/CodeGen/attr-counted-by.c
@@ -1209,7 +1209,7 @@ int test12_a, test12_b;
// SANITIZE-WITH-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// SANITIZE-WITH-ATTR-NEXT: entry:
// SANITIZE-WITH-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR9:[0-9]+]]
+// SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR9:[0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT10:![0-9]+]]
// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp ult i32 [[INDEX]], 6
// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = zext i32 [[INDEX]] to i64
@@ -1235,7 +1235,7 @@ int test12_a, test12_b;
// NO-SANITIZE-WITH-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR4:[0-9]+]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR12:[0-9]+]]
+// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR12:[0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]]
@@ -1251,7 +1251,7 @@ int test12_a, test12_b;
// SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// SANITIZE-WITHOUT-ATTR-NEXT: entry:
// SANITIZE-WITHOUT-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR7:[0-9]+]]
+// SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR7:[0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = icmp ult i32 [[INDEX]], 6
// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = zext i32 [[INDEX]] to i64
@@ -1277,7 +1277,7 @@ int test12_a, test12_b;
// NO-SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[INDEX:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BAZ:%.*]] = alloca [[STRUCT_HANG:%.*]], align 4
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR10:[0-9]+]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BAZ]]) #[[ATTR10:[0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]]
diff --git a/clang/test/CodeGen/builtin-bpf-btf-type-id.c b/clang/test/CodeGen/builtin-bpf-btf-type-id.c
index 4c6efd6..c8f29ee 100644
--- a/clang/test/CodeGen/builtin-bpf-btf-type-id.c
+++ b/clang/test/CodeGen/builtin-bpf-btf-type-id.c
@@ -21,5 +21,5 @@ unsigned test3() {
//
// CHECK: ![[INT]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed
// CHECK: ![[INT_POINTER]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: ![[INT]], size: 64
-// CHECK: ![[STRUCT_T1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1"
// CHECK: ![[TYPEDEF_T1]] = !DIDerivedType(tag: DW_TAG_typedef, name: "__t1"
+// CHECK: ![[STRUCT_T1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1"
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c b/clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c
index 53b4489..a8cc073 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c
+++ b/clang/test/CodeGen/builtins-bpf-preserve-field-info-3.c
@@ -37,5 +37,5 @@ unsigned unit3() {
// CHECK: call i32 @llvm.bpf.preserve.type.info(i32 5, i64 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[ENUM_AA]]
// CHECK: ![[ENUM_AA]] = !DICompositeType(tag: DW_TAG_enumeration_type, name: "AA"
-// CHECK: ![[STRUCT_S]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s"
// CHECK: ![[TYPEDEF_INT]] = !DIDerivedType(tag: DW_TAG_typedef, name: "__int"
+// CHECK: ![[STRUCT_S]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s"
diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c
index f201dfe..375664b 100644
--- a/clang/test/CodeGen/builtins-wasm.c
+++ b/clang/test/CodeGen/builtins-wasm.c
@@ -751,24 +751,3 @@ void *tp (void) {
return __builtin_thread_pointer ();
// WEBASSEMBLY: call {{.*}} @llvm.thread.pointer.p0()
}
-
-typedef void (*Fvoid)(void);
-typedef float (*Ffloats)(float, double, int);
-typedef void (*Fpointers)(Fvoid, Ffloats, void*, int*, int***, char[5]);
-
-void use(int);
-
-void test_function_pointer_signature_void(Fvoid func) {
- // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison)
- use(__builtin_wasm_test_function_pointer_signature(func));
-}
-
-void test_function_pointer_signature_floats(Ffloats func) {
- // WEBASSEMBLY: tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float 0.000000e+00, token poison, float 0.000000e+00, double 0.000000e+00, i32 0)
- use(__builtin_wasm_test_function_pointer_signature(func));
-}
-
-void test_function_pointer_signature_pointers(Fpointers func) {
- // WEBASSEMBLY: %0 = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null)
- use(__builtin_wasm_test_function_pointer_signature(func));
-}
diff --git a/clang/test/CodeGen/builtins-x86.c b/clang/test/CodeGen/builtins-x86.c
index c42c321..31f3097 100644
--- a/clang/test/CodeGen/builtins-x86.c
+++ b/clang/test/CodeGen/builtins-x86.c
@@ -22,6 +22,7 @@ typedef float V2f __attribute__((vector_size(8)));
// 128-bit
typedef char V16c __attribute__((vector_size(16)));
typedef signed short V8s __attribute__((vector_size(16)));
+typedef unsigned short V8u __attribute__((vector_size(16)));
typedef signed int V4i __attribute__((vector_size(16)));
#ifndef OPENCL
typedef signed long long V2LLi __attribute__((vector_size(16)));
@@ -99,6 +100,7 @@ void f0(void) {
// 128-bit
V16c tmp_V16c;
V8s tmp_V8s;
+ V8u tmp_V8u;
V4i tmp_V4i;
V2LLi tmp_V2LLi;
V4f tmp_V4f;
@@ -192,7 +194,7 @@ void f0(void) {
tmp_V16c = __builtin_ia32_packsswb128(tmp_V8s, tmp_V8s);
tmp_V8s = __builtin_ia32_packssdw128(tmp_V4i, tmp_V4i);
tmp_V16c = __builtin_ia32_packuswb128(tmp_V8s, tmp_V8s);
- tmp_V8s = __builtin_ia32_pmulhuw128(tmp_V8s, tmp_V8s);
+ tmp_V8u = __builtin_ia32_pmulhuw128(tmp_V8u, tmp_V8u);
tmp_V4f = __builtin_ia32_addsubps(tmp_V4f, tmp_V4f);
tmp_V2d = __builtin_ia32_addsubpd(tmp_V2d, tmp_V2d);
tmp_V4f = __builtin_ia32_haddps(tmp_V4f, tmp_V4f);
diff --git a/clang/test/CodeGen/cleanup-destslot-simple.c b/clang/test/CodeGen/cleanup-destslot-simple.c
index a02841a..8ace332 100644
--- a/clang/test/CodeGen/cleanup-destslot-simple.c
+++ b/clang/test/CodeGen/cleanup-destslot-simple.c
@@ -13,14 +13,14 @@
// CHECK-LIFETIME-NEXT: entry:
// CHECK-LIFETIME-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK-LIFETIME-NEXT: [[P:%.*]] = alloca ptr, align 8
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
// CHECK-LIFETIME-NEXT: store i32 3, ptr [[X]], align 4, !dbg [[DBG10:![0-9]+]], !tbaa [[TBAA11:![0-9]+]]
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
// CHECK-LIFETIME-NEXT: store volatile ptr [[X]], ptr [[P]], align 8, !dbg [[DBG16:![0-9]+]], !tbaa [[TBAA17:![0-9]+]]
// CHECK-LIFETIME-NEXT: [[P_0_P_0_P_0_P_0_:%.*]] = load volatile ptr, ptr [[P]], align 8, !dbg [[DBG19:![0-9]+]], !tbaa [[TBAA17]]
// CHECK-LIFETIME-NEXT: [[TMP0:%.*]] = load i32, ptr [[P_0_P_0_P_0_P_0_]], align 4, !dbg [[DBG20:![0-9]+]], !tbaa [[TBAA11]]
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG21:![0-9]+]]
-// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG21]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[P]]), !dbg [[DBG21:![0-9]+]]
+// CHECK-LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG21]]
// CHECK-LIFETIME-NEXT: ret i32 [[TMP0]], !dbg [[DBG22:![0-9]+]]
//
// CHECK-OPTNONE-LABEL: @test(
@@ -37,13 +37,13 @@
// CHECK-MSAN-NEXT: entry:
// CHECK-MSAN-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK-MSAN-NEXT: [[P:%.*]] = alloca ptr, align 8
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) #[[ATTR2:[0-9]+]], !dbg [[DBG9:![0-9]+]]
// CHECK-MSAN-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG9]]
// CHECK-MSAN-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG9]]
// CHECK-MSAN-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG9]]
// CHECK-MSAN-NEXT: store i32 0, ptr [[TMP2]], align 4, !dbg [[DBG10:![0-9]+]]
// CHECK-MSAN-NEXT: store i32 3, ptr [[X]], align 4, !dbg [[DBG10]], !tbaa [[TBAA11:![0-9]+]]
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
// CHECK-MSAN-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG15]]
// CHECK-MSAN-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG15]]
// CHECK-MSAN-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG15]]
@@ -62,8 +62,8 @@
// CHECK-MSAN-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080, !dbg [[DBG20]]
// CHECK-MSAN-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG20]]
// CHECK-MSAN-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP11]], align 4, !dbg [[DBG20]]
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
-// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
+// CHECK-MSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
// CHECK-MSAN-NEXT: [[_MSCMP2_NOT:%.*]] = icmp eq i32 [[_MSLD1]], 0, !dbg [[DBG23:![0-9]+]]
// CHECK-MSAN-NEXT: br i1 [[_MSCMP2_NOT]], label [[TMP13:%.*]], label [[TMP12:%.*]], !dbg [[DBG23]], !prof [[PROF21]]
// CHECK-MSAN: 12:
@@ -77,13 +77,13 @@
// CHECK-KMSAN-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() #[[ATTR2:[0-9]+]]
// CHECK-KMSAN-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK-KMSAN-NEXT: [[P:%.*]] = alloca ptr, align 8
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG9:![0-9]+]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG9:![0-9]+]]
// CHECK-KMSAN-NEXT: call void @__msan_poison_alloca(ptr nonnull [[X]], i64 4, ptr nonnull @[[GLOB0:[0-9]+]]) #[[ATTR2]], !dbg [[DBG9]]
// CHECK-KMSAN-NEXT: [[TMP1:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG10:![0-9]+]]
// CHECK-KMSAN-NEXT: [[TMP2:%.*]] = extractvalue { ptr, ptr } [[TMP1]], 0, !dbg [[DBG10]]
// CHECK-KMSAN-NEXT: store i32 0, ptr [[TMP2]], align 4, !dbg [[DBG10]]
// CHECK-KMSAN-NEXT: store i32 3, ptr [[X]], align 4, !dbg [[DBG10]], !tbaa [[TBAA11:![0-9]+]]
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[P]]), !dbg [[DBG15:![0-9]+]]
// CHECK-KMSAN-NEXT: call void @__msan_poison_alloca(ptr nonnull [[P]], i64 8, ptr nonnull @[[GLOB1:[0-9]+]]) #[[ATTR2]], !dbg [[DBG15]]
// CHECK-KMSAN-NEXT: [[TMP3:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr nonnull [[P]]) #[[ATTR2]], !dbg [[DBG16:![0-9]+]]
// CHECK-KMSAN-NEXT: [[TMP4:%.*]] = extractvalue { ptr, ptr } [[TMP3]], 0, !dbg [[DBG16]]
@@ -109,8 +109,8 @@
// CHECK-KMSAN-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP12]], 1, !dbg [[DBG20]]
// CHECK-KMSAN-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg [[DBG20]]
// CHECK-KMSAN-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG20]]
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
-// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[P]]), !dbg [[DBG22:![0-9]+]]
+// CHECK-KMSAN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) #[[ATTR2]], !dbg [[DBG22]]
// CHECK-KMSAN-NEXT: store i32 [[_MSLD1]], ptr [[RETVAL_SHADOW]], align 8, !dbg [[DBG23:![0-9]+]]
// CHECK-KMSAN-NEXT: store i32 [[TMP15]], ptr [[RETVAL_ORIGIN]], align 4, !dbg [[DBG23]]
// CHECK-KMSAN-NEXT: ret i32 [[TMP11]], !dbg [[DBG23]]
diff --git a/clang/test/CodeGen/debug-info-version.c b/clang/test/CodeGen/debug-info-version.c
index c7c2bb9..485b80e 100644
--- a/clang/test/CodeGen/debug-info-version.c
+++ b/clang/test/CodeGen/debug-info-version.c
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: %clang -g -S -emit-llvm -o - %s | FileCheck %s
// RUN: %clang -S -emit-llvm -o - %s | FileCheck %s --check-prefix=NO_DEBUG
int main (void) {
diff --git a/clang/test/CodeGen/dominating-store-to-return.c b/clang/test/CodeGen/dominating-store-to-return.c
index 1c53e35..d095f3f 100644
--- a/clang/test/CodeGen/dominating-store-to-return.c
+++ b/clang/test/CodeGen/dominating-store-to-return.c
@@ -16,10 +16,10 @@
// LIFETIME-NEXT: [[FOO:%.*]] = alloca i32, align 4
// LIFETIME-NEXT: [[FOO2:%.*]] = alloca i32, align 4
// LIFETIME-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[FOO]]) #[[ATTR2:[0-9]+]]
-// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[FOO2]]) #[[ATTR2]]
-// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[FOO2]]) #[[ATTR2]]
-// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[FOO]]) #[[ATTR2]]
+// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr [[FOO]]) #[[ATTR2:[0-9]+]]
+// LIFETIME-NEXT: call void @llvm.lifetime.start.p0(ptr [[FOO2]]) #[[ATTR2]]
+// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr [[FOO2]]) #[[ATTR2]]
+// LIFETIME-NEXT: call void @llvm.lifetime.end.p0(ptr [[FOO]]) #[[ATTR2]]
// LIFETIME-NEXT: ret i32 0
//
int main() {
diff --git a/clang/test/CodeGen/lifetime-sanitizer.c b/clang/test/CodeGen/lifetime-sanitizer.c
index 68879fd..4d36bd7 100644
--- a/clang/test/CodeGen/lifetime-sanitizer.c
+++ b/clang/test/CodeGen/lifetime-sanitizer.c
@@ -18,14 +18,14 @@ extern int bar(char *A, int n);
// CHECK-O0-NOT: @llvm.lifetime.start
int foo(int n) {
if (n) {
- // LIFETIME: @llvm.lifetime.start.p0(i64 10, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.start.p0(ptr {{.*}})
char A[10];
return bar(A, 1);
- // LIFETIME: @llvm.lifetime.end.p0(i64 10, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.end.p0(ptr {{.*}})
} else {
- // LIFETIME: @llvm.lifetime.start.p0(i64 20, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.start.p0(ptr {{.*}})
char A[20];
return bar(A, 2);
- // LIFETIME: @llvm.lifetime.end.p0(i64 20, ptr {{.*}})
+ // LIFETIME: @llvm.lifetime.end.p0(ptr {{.*}})
}
}
diff --git a/clang/test/CodeGen/lifetime.c b/clang/test/CodeGen/lifetime.c
index 748a496..edb6867 100644
--- a/clang/test/CodeGen/lifetime.c
+++ b/clang/test/CodeGen/lifetime.c
@@ -8,14 +8,14 @@ extern void use(char *a);
// CHECK-LABEL: @helper_no_markers
__attribute__((always_inline)) void helper_no_markers(void) {
char a;
- // LIFETIME: call void @llvm.lifetime.start.p0(i64 1,
+ // LIFETIME: call void @llvm.lifetime.start.p0(
use(&a);
- // LIFETIME: call void @llvm.lifetime.end.p0(i64 1,
+ // LIFETIME: call void @llvm.lifetime.end.p0(
}
// CHECK-LABEL: @lifetime_test
void lifetime_test(void) {
-// LIFETIME: call void @llvm.lifetime.start.p0(i64 1,
+// LIFETIME: call void @llvm.lifetime.start.p0(
helper_no_markers();
-// LIFETIME: call void @llvm.lifetime.end.p0(i64 1,
+// LIFETIME: call void @llvm.lifetime.end.p0(
}
diff --git a/clang/test/CodeGen/lifetime2.c b/clang/test/CodeGen/lifetime2.c
index 88c35fc..560389f 100644
--- a/clang/test/CodeGen/lifetime2.c
+++ b/clang/test/CodeGen/lifetime2.c
@@ -7,21 +7,21 @@ extern int bar(char *A, int n);
// CHECK-LABEL: @foo
int foo (int n) {
if (n) {
-// O2: call void @llvm.lifetime.start.p0(i64 100,
+// O2: call void @llvm.lifetime.start.p0(
char A[100];
return bar(A, 1);
-// O2: call void @llvm.lifetime.end.p0(i64 100,
+// O2: call void @llvm.lifetime.end.p0(
} else {
-// O2: call void @llvm.lifetime.start.p0(i64 100,
+// O2: call void @llvm.lifetime.start.p0(
char A[100];
return bar(A, 2);
-// O2: call void @llvm.lifetime.end.p0(i64 100,
+// O2: call void @llvm.lifetime.end.p0(
}
}
// CHECK-LABEL: @no_goto_bypass
void no_goto_bypass(void) {
- // O2: call void @llvm.lifetime.start.p0(i64 1,
+ // O2: call void @llvm.lifetime.start.p0(
char x;
l1:
bar(&x, 1);
@@ -45,16 +45,16 @@ void goto_bypass(void) {
void no_switch_bypass(int n) {
switch (n) {
case 1: {
- // O2: call void @llvm.lifetime.start.p0(i64 1,
- // O2: call void @llvm.lifetime.end.p0(i64 1,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char x;
bar(&x, 1);
break;
}
case 2:
n = n;
- // O2: call void @llvm.lifetime.start.p0(i64 5,
- // O2: call void @llvm.lifetime.end.p0(i64 5,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char y[5];
bar(y, 5);
break;
diff --git a/clang/test/CodeGen/lifetime3.cpp b/clang/test/CodeGen/lifetime3.cpp
index 64a097c..476ca0d 100644
--- a/clang/test/CodeGen/lifetime3.cpp
+++ b/clang/test/CodeGen/lifetime3.cpp
@@ -6,30 +6,30 @@ extern int bar(char *A, int n);
// CHECK-LABEL: @no_switch_bypass
extern "C" void no_switch_bypass(int n) {
- // O2: call void @llvm.lifetime.start.p0(i64 4,
+ // O2: call void @llvm.lifetime.start.p0(
switch (n += 1; int b=n) {
case 1: {
- // O2: call void @llvm.lifetime.start.p0(i64 1,
- // O2: call void @llvm.lifetime.end.p0(i64 1,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char x;
bar(&x, 1);
break;
}
case 2:
n = n;
- // O2: call void @llvm.lifetime.start.p0(i64 5,
- // O2: call void @llvm.lifetime.end.p0(i64 5,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
char y[5];
bar(y, 5);
break;
}
- // O2: call void @llvm.lifetime.end.p0(i64 4,
+ // O2: call void @llvm.lifetime.end.p0(
}
// CHECK-LABEL: @switch_bypass
extern "C" void switch_bypass(int n) {
- // O2: call void @llvm.lifetime.start.p0(i64 4,
- // O2: call void @llvm.lifetime.end.p0(i64 4,
+ // O2: call void @llvm.lifetime.start.p0(
+ // O2: call void @llvm.lifetime.end.p0(
switch (n += 1; int b=n) {
case 1:
n = n;
diff --git a/clang/test/CodeGen/math-libcalls-tbaa.c b/clang/test/CodeGen/math-libcalls-tbaa.c
index f4e81ea..b2f502e 100644
--- a/clang/test/CodeGen/math-libcalls-tbaa.c
+++ b/clang/test/CodeGen/math-libcalls-tbaa.c
@@ -82,12 +82,12 @@ double test_remainder (double num[], double a) {
// CHECK-SAME: ptr noundef readonly captures(none) [[NUM:%.*]]) local_unnamed_addr #[[ATTR5:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[E:%.*]] = alloca i32, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[E]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[E]]) #[[ATTR9]]
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[NUM]], i64 16
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[TBAA8]]
// CHECK-NEXT: [[CALL:%.*]] = call double @frexp(double noundef [[TMP0]], ptr noundef nonnull [[E]]) #[[ATTR9]]
// CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP0]], [[CALL]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[E]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[E]]) #[[ATTR9]]
// CHECK-NEXT: ret double [[MUL]]
//
double test_frexp (double num[]) {
@@ -105,8 +105,8 @@ double test_frexp (double num[]) {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[SIN:%.*]] = alloca float, align 4
// CHECK-NEXT: [[COS:%.*]] = alloca float, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[SIN]]) #[[ATTR9]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[COS]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[SIN]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[COS]]) #[[ATTR9]]
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[NUM]], i64 8
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: call void @sincos(float noundef [[TMP0]], ptr noundef nonnull [[SIN]], ptr noundef nonnull [[COS]]) #[[ATTR9]]
@@ -115,8 +115,8 @@ double test_frexp (double num[]) {
// CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP1]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: [[ADD:%.*]] = fadd float [[MUL]], [[TMP3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[COS]]) #[[ATTR9]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[SIN]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[COS]]) #[[ATTR9]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[SIN]]) #[[ATTR9]]
// CHECK-NEXT: ret float [[ADD]]
//
float test_sincos (float num[]) {
diff --git a/clang/test/CodeGen/nofpclass.c b/clang/test/CodeGen/nofpclass.c
index 8e61ae2..6cd7ec1 100644
--- a/clang/test/CodeGen/nofpclass.c
+++ b/clang/test/CodeGen/nofpclass.c
@@ -927,14 +927,14 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12:[0-9]+]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12:[0-9]+]]
// CLFINITEONLY-NEXT: [[BYVAL_TEMP_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[BYVAL_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[BYVAL_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[BYVAL_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) @variadic(float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[BYVAL_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1180,14 +1180,14 @@ float call_variadic(float f32, double f64, _Float16 f16,
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: [[BYVAL_TEMP_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[BYVAL_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[BYVAL_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[BYVAL_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) [[FPTR]](float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[BYVAL_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
-// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BYVAL_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c
index 0bbd8c3..5cf53b2 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -32,15 +32,15 @@ int qq(void) {
// Test that fma and fma4 are both separately and combined valid for an fma intrinsic.
__m128 __attribute__((target("fma"))) fma_1(__m128 a, __m128 b, __m128 c) {
- return __builtin_ia32_vfmaddps(a, b, c);
+ return __builtin_ia32_vfmaddsubps(a, b, c);
}
__m128 __attribute__((target("fma4"))) fma_2(__m128 a, __m128 b, __m128 c) {
- return __builtin_ia32_vfmaddps(a, b, c);
+ return __builtin_ia32_vfmaddsubps(a, b, c);
}
__m128 __attribute__((target("fma,fma4"))) fma_3(__m128 a, __m128 b, __m128 c) {
- return __builtin_ia32_vfmaddps(a, b, c);
+ return __builtin_ia32_vfmaddsubps(a, b, c);
}
void verifyfeaturestrings(void) {
diff --git a/clang/test/CodeGen/temporary-lifetime-exceptions.cpp b/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
index 50e4a0f..45400c2 100644
--- a/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
+++ b/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
@@ -8,15 +8,15 @@ A Baz(const A&);
void Test1() {
// CHECK-LABEL: @_Z5Test1v(
- // CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TMP:[^ ]+]])
- // CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TMP1:[^ ]+]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP:[^ ]+]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP1:[^ ]+]])
// Normal exit
- // CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP1]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP1]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP]])
// Exception exit
- // CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP1]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP1]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP]])
Baz(Baz(A()));
}
diff --git a/clang/test/CodeGen/temporary-lifetime.cpp b/clang/test/CodeGen/temporary-lifetime.cpp
index 9f085d4..0408729 100644
--- a/clang/test/CodeGen/temporary-lifetime.cpp
+++ b/clang/test/CodeGen/temporary-lifetime.cpp
@@ -21,27 +21,27 @@ T Baz();
void Test1() {
// CHECK-DTOR-LABEL: Test1
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-DTOR: }
// CHECK-NO-DTOR-LABEL: Test1
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-NO-DTOR: }
{
const A &a = A{};
@@ -55,27 +55,27 @@ void Test1() {
void Test2() {
// CHECK-DTOR-LABEL: Test2
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR1:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR1:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR1:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR2:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR2:.+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR2:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR2]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR2]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR2]])
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[VAR1]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR1]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR1]])
// CHECK-DTOR: }
// CHECK-NO-DTOR-LABEL: Test2
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR1:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR1:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR1:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR2:.+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR2:.+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(ptr nonnull {{[^,]*}} %[[VAR2:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR2]])
- // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR1]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR2]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR1]])
// CHECK-NO-DTOR: }
const A &a = A{};
Foo(a);
@@ -135,16 +135,16 @@ int Test5() {
void Test6() {
// CHECK-DTOR-LABEL: Test6
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call i32 @_Z3BazIiET_v()
// CHECK-DTOR: store
// CHECK-DTOR: call void @_Z3FooIiEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call i32 @_Z3BazIiET_v()
// CHECK-DTOR: store
// CHECK-DTOR: call void @_Z3FooIiEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 {{[0-9]+}}, ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-DTOR: }
Foo(Baz<int>());
Foo(Baz<int>());
@@ -152,16 +152,16 @@ void Test6() {
void Test7() {
// CHECK-DTOR-LABEL: Test7
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_Z3BazI1AET_v({{.*}} %[[SLOT:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooI1AEvOT_({{.*}} %[[SLOT]])
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[SLOT]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %[[ADDR:.+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0(ptr nonnull %[[ADDR:.+]])
// CHECK-DTOR: call void @_Z3BazI1AET_v({{.*}} %[[SLOT:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooI1AEvOT_({{.*}} %[[SLOT]])
// CHECK-DTOR: call void @_ZN1AD1Ev(ptr nonnull {{[^,]*}} %[[SLOT]])
- // CHECK-DTOR: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0(ptr nonnull %[[ADDR]])
// CHECK-DTOR: }
Foo(Baz<A>());
Foo(Baz<A>());
diff --git a/clang/test/CodeGen/union-tbaa1.c b/clang/test/CodeGen/union-tbaa1.c
index 1e2f384..3322aaa 100644
--- a/clang/test/CodeGen/union-tbaa1.c
+++ b/clang/test/CodeGen/union-tbaa1.c
@@ -11,7 +11,7 @@ void bar(vect32 p[][2]);
// CHECK-SAME: (i32 noundef [[NUM:%.*]], ptr noundef writeonly captures(none) initializes((0, 8)) [[VEC:%.*]], ptr noundef readonly captures(none) [[INDEX:%.*]], ptr noundef readonly captures(none) [[ARR:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP:%.*]] = alloca [4 x [2 x %union.vect32]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[TMP]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2:![0-9]+]]
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]]
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
@@ -33,7 +33,7 @@ void bar(vect32 p[][2]);
// CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds nuw i8, ptr [[VEC]], i32 4
// CHECK-NEXT: store i32 [[CONV16]], ptr [[ARRAYIDX17]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: call void @bar(ptr noundef nonnull [[TMP]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[TMP]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP]]) #[[ATTR3]]
// CHECK-NEXT: ret void
//
void fred(unsigned Num, int Vec[2], int *Index, int Arr[4][2]) {
diff --git a/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp b/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp
index 0ce8801..c784e60 100644
--- a/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp
+++ b/clang/test/CodeGenCXX/amdgcn_declspec_get.cpp
@@ -15,10 +15,10 @@ extern const A a;
// CHECK-LABEL: define{{.*}} void @_Z4testv()
// CHECK: %i = alloca i32, align 4, addrspace(5)
// CHECK: %[[ii:.*]] = addrspacecast ptr addrspace(5) %i to ptr
-// CHECK: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %i)
+// CHECK: call void @llvm.lifetime.start.p5(ptr addrspace(5) %i)
// CHECK: %call = call noundef i32 @_ZN1A6_get_xEv()
// CHECK: store i32 %call, ptr %[[ii]]
-// CHECK: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %i)
+// CHECK: call void @llvm.lifetime.end.p5(ptr addrspace(5) %i)
void test()
{
int i = a.x;
diff --git a/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp b/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp
index fd9786d..151b77a 100644
--- a/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp
+++ b/clang/test/CodeGenCXX/attr-likelihood-iteration-stmt.cpp
@@ -68,7 +68,7 @@ void w_branch_elided(unsigned e){
// CHECK-NEXT: [[E_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[E:%.*]], ptr [[E_ADDR]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
@@ -78,7 +78,7 @@ void w_branch_elided(unsigned e){
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 true)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: br label [[FOR_INC:%.*]]
@@ -100,7 +100,7 @@ void fl(unsigned e)
// CHECK-NEXT: [[E_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[E:%.*]], ptr [[E_ADDR]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
@@ -110,7 +110,7 @@ void fl(unsigned e)
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 false)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: br label [[FOR_INC:%.*]]
@@ -146,14 +146,14 @@ void f_branch_elided()
// CHECK-NEXT: [[__END1:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[E:%.*]], ptr [[E_ADDR]], align 8, !tbaa [[TBAA14:![0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: store ptr [[TMP0]], ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP1]], i64 0, i64 0
// CHECK-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__END1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP2]], i64 0, i64 0
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAYDECAY1]], i64 4
@@ -166,16 +166,16 @@ void f_branch_elided()
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 true)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: store i32 [[TMP6]], ptr [[I]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
@@ -198,14 +198,14 @@ void frl(int (&&e) [4])
// CHECK-NEXT: [[__END1:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[E:%.*]], ptr [[E_ADDR]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: store ptr [[TMP0]], ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP1]], i64 0, i64 0
// CHECK-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[__END1]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [4 x i32], ptr [[TMP2]], i64 0, i64 0
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAYDECAY1]], i64 4
@@ -218,16 +218,16 @@ void frl(int (&&e) [4])
// CHECK-NEXT: [[CMP_EXPVAL:%.*]] = call i1 @llvm.expect.i1(i1 [[CMP]], i1 false)
// CHECK-NEXT: br i1 [[CMP_EXPVAL]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
// CHECK: for.cond.cleanup:
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__END1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__BEGIN1]]) #[[ATTR3]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[__RANGE1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__END1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__BEGIN1]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[__RANGE1]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_END:%.*]]
// CHECK: for.body:
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: store i32 [[TMP6]], ptr [[I]], align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR3]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR3]]
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[__BEGIN1]], align 8, !tbaa [[TBAA14]]
diff --git a/clang/test/CodeGenCXX/destructors.cpp b/clang/test/CodeGenCXX/destructors.cpp
index 99c82ec..1bf8b16 100644
--- a/clang/test/CodeGenCXX/destructors.cpp
+++ b/clang/test/CodeGenCXX/destructors.cpp
@@ -308,7 +308,7 @@ namespace test5 {
// CHECK5: [[ELEMS:%.*]] = alloca [5 x [[A:%.*]]], align
// CHECK5v03-NEXT: [[EXN:%.*]] = alloca ptr
// CHECK5v03-NEXT: [[SEL:%.*]] = alloca i32
- // CHECK5-NEXT: call void @llvm.lifetime.start.p0(i64 5, ptr [[ELEMS]])
+ // CHECK5-NEXT: call void @llvm.lifetime.start.p0(ptr [[ELEMS]])
// CHECK5-NEXT: [[BEGIN:%.*]] = getelementptr inbounds [5 x [[A]]], ptr [[ELEMS]], i32 0, i32 0
// CHECK5-NEXT: [[END:%.*]] = getelementptr inbounds [[A]], ptr [[BEGIN]], i64 5
// CHECK5-NEXT: br label
@@ -482,24 +482,24 @@ namespace test11 {
// CHECK6: {{^}}invoke.cont
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T1]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T1]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T1]])
// CHECK6: {{^}}lpad
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T1]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T1]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T1]])
// CHECK6: {{^}}invoke.cont
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T2]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T2]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T2]])
// CHECK6: {{^}}lpad
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T2]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T2]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T2]])
// CHECK6: {{^}}invoke.cont
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T3]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T3]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T3]])
// CHECK6: {{^}}lpad
// CHECK6: call void @_ZN6test112S1D1Ev(ptr {{[^,]*}} [[T3]])
-// CHECK6: call void @llvm.lifetime.end.p0(i64 32, ptr [[T3]])
+// CHECK6: call void @llvm.lifetime.end.p0(ptr [[T3]])
struct S1 {
~S1();
diff --git a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
index 5e2403b..a60781c 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
@@ -277,13 +277,13 @@ void f() {
// WIN32-LIFETIME-LABEL: define dso_local void @"?f@lifetime_marker@@YAXXZ"()
// WIN32-LIFETIME: %[[c:.*]] = alloca %"struct.lifetime_marker::C"
-// WIN32-LIFETIME: call void @llvm.lifetime.start.p0(i64 1, ptr %c)
+// WIN32-LIFETIME: call void @llvm.lifetime.start.p0(ptr %c)
// WIN32-LIFETIME: invoke void @"?g@lifetime_marker@@YAXXZ"()
// WIN32-LIFETIME-NEXT: to label %[[cont:[^ ]*]] unwind label %[[lpad0:[^ ]*]]
//
// WIN32-LIFETIME: [[cont]]
// WIN32-LIFETIME: call x86_thiscallcc void @"??1C@lifetime_marker@@QAE@XZ"({{.*}})
-// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(i64 1, ptr %[[c]])
+// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(ptr %[[c]])
//
// WIN32-LIFETIME: [[lpad0]]
// WIN32-LIFETIME-NEXT: cleanuppad
@@ -292,7 +292,7 @@ void f() {
//
// WIN32-LIFETIME: [[lpad1]]
// WIN32-LIFETIME-NEXT: cleanuppad
-// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(i64 1, ptr %[[c]])
+// WIN32-LIFETIME: call void @llvm.lifetime.end.p0(ptr %[[c]])
}
struct class_2 {
diff --git a/clang/test/CodeGenCXX/modules-vtable.cppm b/clang/test/CodeGenCXX/modules-vtable.cppm
index 6589b9f..75f7598 100644
--- a/clang/test/CodeGenCXX/modules-vtable.cppm
+++ b/clang/test/CodeGenCXX/modules-vtable.cppm
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: rm -rf %t
// RUN: split-file %s %t
diff --git a/clang/test/CodeGenCXX/pr70585.cppm b/clang/test/CodeGenCXX/pr70585.cppm
index ad4e135..d44a4f4 100644
--- a/clang/test/CodeGenCXX/pr70585.cppm
+++ b/clang/test/CodeGenCXX/pr70585.cppm
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: rm -rf %t
// RUN: split-file %s %t
diff --git a/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp b/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp
index e036bca..27123a6 100644
--- a/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp
+++ b/clang/test/CodeGenCXX/stack-reuse-exceptions.cpp
@@ -20,32 +20,32 @@ struct NontrivialDtor {
// CHECK-LABEL: define{{.*}} void @_Z33cleanupsAreEmittedWithoutTryCatchv
void cleanupsAreEmittedWithoutTryCatch() {
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[CLEAN:.*]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T1:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[CLEAN:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T1:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT:[^ ]+]] unwind label %[[LPAD:[^ ]+]]
//
// CHECK: [[CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T2:.*]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T2:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT2:[^ ]+]] unwind label %[[LPAD2:.+]]
//
// CHECK: [[CONT2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
// CHECK: ret void
//
// CHECK: [[LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br label %[[EHCLEANUP:.+]]
//
// CHECK: [[LPAD2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[EHCLEANUP]]
//
// CHECK: [[EHCLEANUP]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
NontrivialDtor clean;
@@ -55,27 +55,27 @@ void cleanupsAreEmittedWithoutTryCatch() {
// CHECK-LABEL: define{{.*}} void @_Z30cleanupsAreEmittedWithTryCatchv
void cleanupsAreEmittedWithTryCatch() {
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[CLEAN:.*]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T1:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[CLEAN:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T1:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT:[^ ]+]] unwind label %[[LPAD:[^ ]+]]
//
// CHECK: [[CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T2:.*]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T2:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT2:[^ ]+]] unwind label %[[LPAD2:.+]]
//
// CHECK: [[CONT2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[TRY_CONT:.+]]
//
// CHECK: [[LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br label %[[CATCH:.+]]
//
// CHECK: [[LPAD2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[CATCH]]
//
// CHECK: [[CATCH]]:
@@ -84,13 +84,13 @@ void cleanupsAreEmittedWithTryCatch() {
// CHECK-NEXT: to label %[[TRY_CONT]] unwind label %[[OUTER_LPAD:.+]]
//
// CHECK: [[TRY_CONT]]:
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T_OUTER:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T_OUTER:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[OUTER_CONT:[^ ]+]] unwind label %[[OUTER_LPAD2:.+]]
//
// CHECK: [[OUTER_CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T_OUTER]])
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T_OUTER]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
// CHECK: ret void
//
// CHECK: [[OUTER_LPAD]]:
@@ -98,11 +98,11 @@ void cleanupsAreEmittedWithTryCatch() {
// CHECK: br label %[[EHCLEANUP:.+]]
//
// CHECK: [[OUTER_LPAD2]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T_OUTER]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T_OUTER]])
// CHECK: br label %[[EHCLEANUP]]
//
// CHECK: [[EHCLEANUP]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[CLEAN]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[CLEAN]])
NontrivialDtor clean;
@@ -116,44 +116,44 @@ void cleanupsAreEmittedWithTryCatch() {
// CHECK-LABEL: define{{.*}} void @_Z39cleanupInTryHappensBeforeCleanupInCatchv
void cleanupInTryHappensBeforeCleanupInCatch() {
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T1:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T1:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CONT:[^ ]+]] unwind label %[[LPAD:[^ ]+]]
//
// CHECK: [[CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br label %[[TRY_CONT]]
//
// CHECK: [[LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T1]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T1]])
// CHECK: br i1 {{[^,]+}}, label %[[CATCH_INT_MATCH:[^,]+]], label %[[CATCH_ALL:.+]]
//
// CHECK: [[CATCH_INT_MATCH]]:
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T2:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T2:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CATCH_INT_CONT:[^ ]+]] unwind label %[[CATCH_INT_LPAD:[^ ]+]]
//
// CHECK: [[CATCH_INT_CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK: br label %[[TRY_CONT]]
//
// CHECK: [[TRY_CONT]]:
// CHECK: ret void
//
// CHECK: [[CATCH_ALL]]:
-// CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr nonnull %[[T3:.*]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %[[T3:.*]])
// CHECK-NEXT: invoke void @_Z8getLargev
// CHECK-NEXT: to label %[[CATCH_ALL_CONT:[^ ]+]] unwind label %[[CATCH_ALL_LPAD:[^ ]+]]
//
// CHECK: [[CATCH_ALL_CONT]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T3]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T3]])
// CHECK: br label %[[TRY_CONT]]
//
// CHECK: [[CATCH_ALL_LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T3]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T3]])
//
// CHECK: [[CATCH_INT_LPAD]]:
-// CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr nonnull %[[T2]])
+// CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %[[T2]])
// CHECK-NOT: call void @llvm.lifetime
try {
diff --git a/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp b/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp
index 50a8d16..67fa9f9 100644
--- a/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp
+++ b/clang/test/CodeGenCXX/stack-reuse-miscompile.cpp
@@ -28,12 +28,12 @@ const char * f(S s)
//
// FIXME: We could defer starting the lifetime of the return object of concat
// until the call.
-// CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr [[T1]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr [[T1]])
//
-// CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr [[T2]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr [[T2]])
// CHECK: [[T4:%.*]] = call noundef ptr @_ZN1TC1EPKc(ptr {{[^,]*}} [[T2]], ptr noundef @.str)
//
-// CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr [[T3]])
+// CHECK: call void @llvm.lifetime.start.p0(ptr [[T3]])
// CHECK: [[T5:%.*]] = call noundef ptr @_ZN1TC1E1S(ptr {{[^,]*}} [[T3]], [2 x i32] %{{.*}})
//
// CHECK: call void @_ZNK1T6concatERKS_(ptr dead_on_unwind writable sret(%class.T) align 4 [[T1]], ptr {{[^,]*}} [[T2]], ptr noundef nonnull align 4 dereferenceable(16) [[T3]])
diff --git a/clang/test/CodeGenObjC/arc-blocks.m b/clang/test/CodeGenObjC/arc-blocks.m
index 72bf35c..605dda7 100644
--- a/clang/test/CodeGenObjC/arc-blocks.m
+++ b/clang/test/CodeGenObjC/arc-blocks.m
@@ -73,7 +73,7 @@ void test3(void (^sink)(id*)) {
// CHECK-NEXT: [[TEMP:%.*]] = alloca ptr
// CHECK-NEXT: call ptr @llvm.objc.retain(
// CHECK-NEXT: store ptr {{%.*}}, ptr [[SINK]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[STRONG]])
// CHECK-NEXT: store ptr null, ptr [[STRONG]]
// CHECK-NEXT: [[BLOCK:%.*]] = load ptr, ptr [[SINK]]
@@ -91,7 +91,7 @@ void test3(void (^sink)(id*)) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[STRONG]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[STRONG]])
// CHECK-NEXT: load ptr, ptr [[SINK]]
// CHECK-NEXT: call void @llvm.objc.release
@@ -161,7 +161,7 @@ void test5(void) {
// CHECK-LABEL: define{{.*}} void @test5()
// CHECK: [[VAR:%.*]] = alloca ptr
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VAR]])
// CHECK: [[T1:%.*]] = call ptr @test5_source() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: store ptr [[T1]], ptr [[VAR]],
@@ -172,7 +172,7 @@ void test5(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[VAR]]
// CHECK-NEXT: store ptr [[T0]], ptr [[CAPTURE]]
// CHECK: call void @test5_helper
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VAR]])
// CHECK-NEXT: ret void
}
@@ -185,7 +185,7 @@ void test6(void) {
// CHECK-LABEL: define{{.*}} void @test6()
// CHECK: [[VAR:%.*]] = alloca [[BYREF_T:%.*]],
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 48, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VAR]])
// CHECK: [[T0:%.*]] = getelementptr inbounds nuw [[BYREF_T]], ptr [[VAR]], i32 0, i32 2
// 0x02000000 - has copy/dispose helpers weak
// CHECK-NEXT: store i32 1107296256, ptr [[T0]]
@@ -203,7 +203,7 @@ void test6(void) {
// CHECK: call void @test6_helper(
// CHECK: call void @_Block_object_dispose(ptr [[VAR]], i32 8)
// CHECK-NEXT: call void @llvm.objc.destroyWeak(ptr [[SLOT]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VAR]])
// CHECK-NEXT: ret void
// CHECK-LABEL: define internal void @__Block_byref_object_copy_.{{[0-9]+}}(ptr noundef %0, ptr noundef %1) #{{[0-9]+}} {
@@ -449,7 +449,7 @@ void test13(id x) {
// CHECK-NEXT: [[CLEANUP_ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.objc.retain(ptr {{%.*}})
// CHECK-NEXT: store ptr [[T0]], ptr [[X]], align 8
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[B]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]], align 8
// CHECK-NEXT: [[T1:%.*]] = icmp ne ptr [[T0]], null
// CHECK-NEXT: store i1 false, ptr [[CLEANUP_ACTIVE]]
@@ -479,7 +479,7 @@ void test13(id x) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
// CHECK-NEXT: br label
- // CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr [[B]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr [[B]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
// CHECK-NEXT: ret void
@@ -501,7 +501,7 @@ void test16(void) {
// CHECK-LABEL: define{{.*}} void @test16(
// CHECK: [[BLKVAR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[BLKVAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BLKVAR]])
// CHECK-NEXT: store ptr null, ptr [[BLKVAR]], align 8
}
diff --git a/clang/test/CodeGenObjC/arc-precise-lifetime.m b/clang/test/CodeGenObjC/arc-precise-lifetime.m
index 473c0b0..ac761ba 100644
--- a/clang/test/CodeGenObjC/arc-precise-lifetime.m
+++ b/clang/test/CodeGenObjC/arc-precise-lifetime.m
@@ -7,7 +7,7 @@ void test0(void) {
PRECISE_LIFETIME id x = test0_helper();
x = 0;
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[CALL:%.*]] = call ptr @test0_helper()
// CHECK-NEXT: store ptr [[CALL]], ptr [[X]]
@@ -20,7 +20,7 @@ void test0(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]]) [[NUW:#[0-9]+]]
// CHECK-NOT: clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -37,20 +37,20 @@ extern Test1 *test1_helper(void);
void test1a_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *c = [(ptr) interior];
@@ -61,20 +61,20 @@ void test1a_message(void) {
void test1a_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *c = ptr.interior;
@@ -85,20 +85,20 @@ void test1a_property(void) {
void test1b_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T3:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T3]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
// CHECK-NOT: clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *c = [ptr interior];
@@ -108,20 +108,20 @@ void test1b_message(void) {
void test1b_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[C:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T3:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T3]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[C]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
// CHECK-NOT: clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *c = ptr.interior;
@@ -131,20 +131,20 @@ void test1b_property(void) {
void test1c_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *pc = [ptr PropertyReturnsInnerPointer];
@@ -154,20 +154,20 @@ void test1c_message(void) {
void test1c_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T0]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[T6:%.*]] = call ptr
// CHECK-NEXT: store ptr [[T6]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
Test1 *ptr = test1_helper();
char *pc = ptr.PropertyReturnsInnerPointer;
@@ -177,19 +177,19 @@ void test1c_property(void) {
void test1d_message(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[SEL:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[CALL1:%.*]] = call ptr @objc_msgSend(ptr noundef [[T0]], ptr noundef [[SEL]])
// CHECK-NEXT: store ptr [[CALL1]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[NINE:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[NINE]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *pc = [ptr PropertyReturnsInnerPointer];
@@ -199,19 +199,19 @@ void test1d_message(void) {
void test1d_property(void) {
// CHECK: [[PTR:%.*]] = alloca ptr, align 8
// CHECK: [[PC:%.*]] = alloca ptr, align 8
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[PTR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[PTR]])
// CHECK: [[T0:%.*]] = call ptr @test1_helper() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
// CHECK-NEXT: store ptr [[T0]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PC]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr
// CHECK-NEXT: [[SEL:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
// CHECK-NEXT: [[CALL1:%.*]] = call ptr @objc_msgSend(ptr noundef [[T0]], ptr noundef [[SEL]])
// CHECK-NEXT: store ptr [[CALL1]], ptr
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PC]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PC]])
// CHECK-NEXT: [[NINE:%.*]] = load ptr, ptr
// CHECK-NEXT: call void @llvm.objc.release(ptr [[NINE]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PTR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
// CHECK-NEXT: ret void
PRECISE_LIFETIME Test1 *ptr = test1_helper();
char *pc = ptr.PropertyReturnsInnerPointer;
diff --git a/clang/test/CodeGenObjC/arc-ternary-op.m b/clang/test/CodeGenObjC/arc-ternary-op.m
index 4a3c00c..46529b4 100644
--- a/clang/test/CodeGenObjC/arc-ternary-op.m
+++ b/clang/test/CodeGenObjC/arc-ternary-op.m
@@ -12,7 +12,7 @@ void test0(_Bool cond) {
// CHECK-NEXT: [[RELCOND:%.*]] = alloca i1
// CHECK-NEXT: zext
// CHECK-NEXT: store
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = load i8, ptr [[COND]]
// CHECK-NEXT: [[T1:%.*]] = trunc i8 [[T0]] to i1
// CHECK-NEXT: store i1 false, ptr [[RELCOND]]
@@ -32,7 +32,7 @@ void test0(_Bool cond) {
// CHECK-NEXT: br label
// CHECK: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x = (cond ? 0 : test0_helper());
}
@@ -53,9 +53,9 @@ void test1(int cond) {
// CHECK-NEXT: [[CONDCLEANUPSAVE:%.*]] = alloca ptr
// CHECK-NEXT: [[CONDCLEANUP:%.*]] = alloca i1
// CHECK-NEXT: store i32
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[STRONG]])
// CHECK-NEXT: store ptr null, ptr [[STRONG]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[WEAK]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[WEAK]])
// CHECK-NEXT: call ptr @llvm.objc.initWeak(ptr [[WEAK]], ptr null)
// CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[COND]]
@@ -99,8 +99,8 @@ void test1(int cond) {
// CHECK-NEXT: br label
// CHECK: call void @llvm.objc.destroyWeak(ptr [[WEAK]])
- // CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr [[WEAK]])
- // CHECK: call void @llvm.lifetime.end.p0(i64 8, ptr [[STRONG]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr [[WEAK]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr [[STRONG]])
// CHECK: ret void
}
diff --git a/clang/test/CodeGenObjC/arc.m b/clang/test/CodeGenObjC/arc.m
index 57afe9c..2d860c1 100644
--- a/clang/test/CodeGenObjC/arc.m
+++ b/clang/test/CodeGenObjC/arc.m
@@ -48,13 +48,13 @@ id test1(id x) {
// CHECK-NEXT: [[Y:%.*]] = alloca ptr
// CHECK-NEXT: [[PARM:%.*]] = call ptr @llvm.objc.retain(ptr {{%.*}})
// CHECK-NEXT: store ptr [[PARM]], ptr [[X]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: store ptr null, ptr [[Y]]
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: [[RET:%.*]] = call ptr @llvm.objc.retain(ptr [[T0]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]])
// CHECK-NEXT: [[T1:%.*]] = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr [[RET]])
@@ -99,7 +99,7 @@ void test3_unelided(void) {
extern void test3_helper(void);
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]], align
Test3 *x;
@@ -118,14 +118,14 @@ void test3_unelided(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
// CHECK-LABEL: define{{.*}} void @test3()
void test3(void) {
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
id x = [[Test3 alloc] initWith: 5];
@@ -155,7 +155,7 @@ void test3(void) {
// Cleanup for x.
// CHECK-NEXT: [[TMP:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[TMP]]) [[NUW]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -225,12 +225,12 @@ id test6_helper(void) __attribute__((ns_returns_retained));
// CHECK-LABEL: define{{.*}} void @test6()
void test6(void) {
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[CALL:%.*]] = call ptr @test6_helper()
// CHECK-NEXT: store ptr [[CALL]], ptr [[X]]
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x = test6_helper();
}
@@ -239,14 +239,14 @@ void test7_helper(id __attribute__((ns_consumed)));
// CHECK-LABEL: define{{.*}} void @test7()
void test7(void) {
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: [[T1:%.*]] = call ptr @llvm.objc.retain(ptr [[T0]]) [[NUW]]
// CHECK-NEXT: call void @test7_helper(ptr noundef [[T1]])
// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x;
test7_helper(x);
@@ -256,11 +256,11 @@ id test8_helper(void) __attribute__((ns_returns_retained));
void test8(void) {
__unsafe_unretained id x = test8_helper();
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = call ptr @test8_helper()
// CHECK-NEXT: store ptr [[T0]], ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -274,9 +274,9 @@ void test10(void) {
// CHECK-LABEL: define{{.*}} void @test10()
// CHECK: [[X:%.*]] = alloca ptr, align
// CHECK-NEXT: [[Y:%.*]] = alloca ptr, align
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: load ptr, ptr [[X]], align
// CHECK-NEXT: load ptr, ptr @OBJC_SELECTOR_REFERENCES_{{[0-9]*}}
// CHECK-NEXT: [[V:%.*]] = call ptr @objc_msgSend{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
@@ -288,10 +288,10 @@ void test10(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[V]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -300,13 +300,13 @@ void test11(id (*f)(void) __attribute__((ns_returns_retained))) {
// CHECK: [[F:%.*]] = alloca ptr, align
// CHECK-NEXT: [[X:%.*]] = alloca ptr, align
// CHECK-NEXT: store ptr {{%.*}}, ptr [[F]], align
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[F]], align
// CHECK-NEXT: [[T1:%.*]] = call ptr [[T0]]()
// CHECK-NEXT: store ptr [[T1]], ptr [[X]], align
// CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T3]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
id x = f();
}
@@ -319,7 +319,7 @@ void test12(void) {
// CHECK-NEXT: [[Y:%.*]] = alloca ptr, align
__weak id x = test12_helper();
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test12_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: call ptr @llvm.objc.initWeak(ptr [[X]], ptr [[T1]])
@@ -332,15 +332,15 @@ void test12(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]])
id y = x;
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: [[T2:%.*]] = call ptr @llvm.objc.loadWeakRetained(ptr [[X]])
// CHECK-NEXT: store ptr [[T2]], ptr [[Y]], align
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T4]]) [[NUW]], !clang.imprecise_release
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: call void @llvm.objc.destroyWeak(ptr [[X]])
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK: ret void
}
@@ -348,7 +348,7 @@ void test12(void) {
void test13(void) {
// CHECK-LABEL: define{{.*}} void @test13()
// CHECK: [[X:%.*]] = alloca ptr, align
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]], align
id x;
@@ -371,7 +371,7 @@ void test13(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]]) [[NUW]]
- // CHECK-NEXT: void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -886,7 +886,7 @@ void test37(void) {
// CHECK-LABEL: define{{.*}} void @test37()
// CHECK: [[VAR:%.*]] = alloca ptr,
// CHECK-NEXT: [[TEMP:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VAR]])
// CHECK-NEXT: store ptr null, ptr [[VAR]]
// CHECK-NEXT: [[W0:%.*]] = load ptr, ptr [[VAR]]
@@ -901,7 +901,7 @@ void test37(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[VAR]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VAR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VAR]])
// CHECK-NEXT: ret void
}
@@ -956,7 +956,7 @@ void test47(void) {
// CHECK-LABEL: define{{.*}} void @test47()
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = call ptr @test47_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
@@ -969,7 +969,7 @@ void test47(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T3]])
// CHECK-NEXT: [[T4:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T4]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -978,7 +978,7 @@ void test48(void) {
__weak id x = x = test48_helper();
// CHECK-LABEL: define{{.*}} void @test48()
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.objc.initWeak(ptr [[X]], ptr null)
// CHECK-NEXT: [[T2:%.*]] = call ptr @test48_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T2]])
@@ -986,7 +986,7 @@ void test48(void) {
// CHECK-NEXT: [[T4:%.*]] = call ptr @llvm.objc.storeWeak(ptr [[X]], ptr [[T3]])
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T2]])
// CHECK-NEXT: call void @llvm.objc.destroyWeak(ptr [[X]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -995,7 +995,7 @@ void test49(void) {
__autoreleasing id x = x = test49_helper();
// CHECK-LABEL: define{{.*}} void @test49()
// CHECK: [[X:%.*]] = alloca ptr
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = call ptr @test49_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T0]])
@@ -1003,7 +1003,7 @@ void test49(void) {
// CHECK-NEXT: store ptr [[T1]], ptr [[X]]
// CHECK-NEXT: [[T3:%.*]] = call ptr @llvm.objc.retainAutorelease(ptr [[T1]])
// CHECK-NEXT: store ptr [[T3]], ptr [[X]]
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -1035,12 +1035,12 @@ id test52(void) {
// CHECK-LABEL: define{{.*}} ptr @test52()
// CHECK: [[X:%.*]] = alloca i32
// CHECK-NEXT: [[TMPALLOCA:%.*]] = alloca ptr
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store i32 5, ptr [[X]],
// CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[X]],
// CHECK-NEXT: [[T1:%.*]] = call ptr @test52_helper(i32 noundef [[T0]])
// CHECK-NEXT: store ptr [[T1]], ptr [[TMPALLOCA]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr [[TMPALLOCA]]
// CHECK-NEXT: [[T3:%.*]] = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr [[T2]])
// CHECK-NEXT: ret ptr [[T3]]
@@ -1054,8 +1054,8 @@ void test53(void) {
// CHECK: [[X:%.*]] = alloca ptr,
// CHECK-NEXT: [[Y:%.*]] = alloca ptr,
// CHECK-NEXT: [[TMPALLOCA:%.*]] = alloca ptr,
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test53_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: store ptr [[T1]], ptr [[Y]],
@@ -1064,13 +1064,13 @@ void test53(void) {
// CHECK-NEXT: store ptr [[T1]], ptr [[TMPALLOCA]]
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T2]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[TMPALLOCA]]
// CHECK-NEXT: store ptr [[T3]], ptr [[X]],
// CHECK-NEXT: load ptr, ptr [[X]],
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -1115,12 +1115,12 @@ void test56_test(void) {
id x = [Test56 make];
// CHECK-LABEL: define{{.*}} void @test56_test()
// CHECK: [[X:%.*]] = alloca ptr, align 8
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK: [[T0:%.*]] = call ptr @objc_msgSend(
// CHECK-NEXT: store ptr [[T0]], ptr [[X]]
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
@@ -1188,7 +1188,7 @@ void test61(void) {
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T1]])
[test61_make() performSelector: @selector(test61_void)];
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Y]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test61_make(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
@@ -1201,7 +1201,7 @@ void test61(void) {
// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[Y]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T0]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[Y]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
// CHECK-NEXT: ret void
}
@@ -1213,7 +1213,7 @@ void test62(void) {
extern id test62_make(void);
extern void test62_body(void);
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]])
// CHECK-NEXT: store i32 0, ptr [[I]], align 4
// CHECK-NEXT: br label
@@ -1300,10 +1300,10 @@ void test67(void) {
}
// CHECK-LABEL: define{{.*}} void @test67()
// CHECK: [[CL:%.*]] = alloca ptr, align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CL]])
// CHECK-NEXT: [[T0:%.*]] = call ptr @test67_helper()
// CHECK-NEXT: store ptr [[T0]], ptr [[CL]], align 8
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CL]])
// CHECK-NEXT: ret void
Class test68_helper(void);
@@ -1312,13 +1312,13 @@ void test68(void) {
}
// CHECK-LABEL: define{{.*}} void @test68()
// CHECK: [[CL:%.*]] = alloca ptr, align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CL]])
// CHECK-NEXT: [[T1:%.*]] = call ptr @test67_helper(){{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(ptr [[T1]])
// CHECK-NEXT: store ptr [[T1]], ptr [[CL]], align 8
// CHECK-NEXT: [[T2:%.*]] = load ptr, ptr [[CL]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[T2]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[CL]])
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CL]])
// CHECK-NEXT: ret void
@interface Test69 @end
@@ -1351,14 +1351,14 @@ struct AggDtor getAggDtor(void);
// CHECK-LABEL: define{{.*}} void @test71
void test71(void) {
- // CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr %[[T:.*]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr %[[T:.*]])
// CHECK: call void @getAggDtor(ptr dead_on_unwind writable sret(%struct.AggDtor) align 8 %[[T]])
// CHECK: call void @__destructor_8_s40(ptr %[[T]])
- // CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr %[[T]])
- // CHECK: call void @llvm.lifetime.start.p0({{[^,]+}}, ptr %[[T2:.*]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr %[[T]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr %[[T2:.*]])
// CHECK: call void @getAggDtor(ptr dead_on_unwind writable sret(%struct.AggDtor) align 8 %[[T2]])
// CHECK: call void @__destructor_8_s40(ptr %[[T2]])
- // CHECK: call void @llvm.lifetime.end.p0({{[^,]+}}, ptr %[[T2]])
+ // CHECK: call void @llvm.lifetime.end.p0(ptr %[[T2]])
getAggDtor();
getAggDtor();
}
diff --git a/clang/test/CodeGenObjC/exceptions.m b/clang/test/CodeGenObjC/exceptions.m
index 832d3a45..66dc051 100644
--- a/clang/test/CodeGenObjC/exceptions.m
+++ b/clang/test/CodeGenObjC/exceptions.m
@@ -79,7 +79,7 @@ void f3(void) {
extern void f3_helper(int, int*);
// CHECK: [[X:%.*]] = alloca i32
- // CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
// CHECK: store i32 0, ptr [[X]]
int x = 0;
@@ -120,7 +120,7 @@ void f3(void) {
}
// CHECK: call void @f3_helper(i32 noundef 4, ptr noundef nonnull [[X]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
// CHECK-NEXT: ret void
f3_helper(4, &x);
}
diff --git a/clang/test/CodeGenObjCXX/arc-move.mm b/clang/test/CodeGenObjCXX/arc-move.mm
index 9d25a6a..bda03c1 100644
--- a/clang/test/CodeGenObjCXX/arc-move.mm
+++ b/clang/test/CodeGenObjCXX/arc-move.mm
@@ -48,7 +48,7 @@ void library_move(__strong id &x, __strong id &y) {
void library_move(__strong id &y) {
// CHECK: [[X:%x]] = alloca ptr, align 8
// CHECK: [[I:%.*]] = alloca i32, align 4
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK: [[Y:%[a-zA-Z0-9]+]] = call noundef nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) ptr @_Z4moveIRU8__strongP11objc_objectEON16remove_referenceIT_E4typeEOS5_
// Load the object
// CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load ptr, ptr [[Y]]
@@ -58,13 +58,13 @@ void library_move(__strong id &y) {
// CHECK-NEXT: store ptr [[OBJ]], ptr [[X:%[a-zA-Z0-9]+]]
id x = move(y);
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]])
// CHECK-NEXT: store i32 17
int i = 17;
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]])
// CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load ptr, ptr [[X]]
// CHECK-NEXT: call void @llvm.objc.release(ptr [[OBJ]])
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[X]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
// CHECK-NEXT: ret void
}
diff --git a/clang/test/CodeGenObjCXX/arc-references.mm b/clang/test/CodeGenObjCXX/arc-references.mm
index 273e339..5fad327 100644
--- a/clang/test/CodeGenObjCXX/arc-references.mm
+++ b/clang/test/CodeGenObjCXX/arc-references.mm
@@ -45,7 +45,7 @@ void test3() {
// CHECK-NEXT: call void @_Z6calleev()
callee();
// CHECK-NEXT: call void @llvm.objc.destroyWeak
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF]])
// CHECK-NEXT: ret void
}
@@ -71,10 +71,10 @@ void test5(__strong id &x) {
sink(x);
// CHECK-NEXT: [[OBJ_A:%[a-zA-Z0-9]+]] = load ptr, ptr [[REFTMP]]
// CHECK-NEXT: call void @llvm.objc.release
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]])
// CHECK-NEXT: store i32 17, ptr
int i = 17;
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]])
// CHECK-NEXT: ret void
}
diff --git a/clang/test/CodeGenObjCXX/arc.mm b/clang/test/CodeGenObjCXX/arc.mm
index 7883378..20f1e37 100644
--- a/clang/test/CodeGenObjCXX/arc.mm
+++ b/clang/test/CodeGenObjCXX/arc.mm
@@ -61,9 +61,9 @@ void test34(int cond) {
// CHECK-NEXT: [[CONDCLEANUPSAVE:%.*]] = alloca ptr
// CHECK-NEXT: [[CONDCLEANUP:%.*]] = alloca i1
// CHECK-NEXT: store i32
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[STRONG]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[STRONG]])
// CHECK-NEXT: store ptr null, ptr [[STRONG]]
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[WEAK]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[WEAK]])
// CHECK-NEXT: call ptr @llvm.objc.initWeak(ptr [[WEAK]], ptr null)
// CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[COND]]
@@ -293,7 +293,7 @@ template void test40_helper<int>();
// CHECK-LABEL: define weak_odr void @_Z13test40_helperIiEvv()
// CHECK: [[X:%.*]] = alloca ptr
// CHECK-NEXT: [[TEMP:%.*]] = alloca ptr
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[X]])
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
// CHECK-NEXT: store ptr null, ptr [[X]]
// CHECK: [[T0:%.*]] = load ptr, ptr [[X]]
// CHECK-NEXT: store ptr [[T0]], ptr [[TEMP]]
diff --git a/clang/test/CodeGenObjCXX/literals.mm b/clang/test/CodeGenObjCXX/literals.mm
index 737aa9e..b7938ccc 100644
--- a/clang/test/CodeGenObjCXX/literals.mm
+++ b/clang/test/CodeGenObjCXX/literals.mm
@@ -22,16 +22,16 @@ void test_array() {
// CHECK: [[TMPY:%[a-zA-Z0-9.]+]] = alloca %
// Initializing first element
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[ARR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[ARR]])
// CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x ptr], ptr [[OBJECTS]], i64 0, i64 0
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMPX]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMPX]])
// CHECK-NEXT: call void @_ZN1XC1Ev({{.*}} [[TMPX]])
// CHECK-NEXT: [[OBJECT0:%[a-zA-Z0-9.]+]] = invoke noundef ptr @_ZNK1XcvP11objc_objectEv{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK: store ptr [[OBJECT0]], ptr [[ELEMENT0]]
// Initializing the second element
// CHECK: [[ELEMENT1:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x ptr], ptr [[OBJECTS]], i64 0, i64 1
- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMPY]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMPY]])
// CHECK-NEXT: invoke void @_ZN1YC1Ev({{.*}} [[TMPY]])
// CHECK: [[OBJECT1:%[a-zA-Z0-9.]+]] = invoke noundef ptr @_ZNK1YcvP11objc_objectEv{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
// CHECK: store ptr [[OBJECT1]], ptr [[ELEMENT1]]
@@ -50,7 +50,7 @@ void test_array() {
// CHECK-NEXT: call void @_ZN1XD1Ev
// CHECK-NOT: ret void
// CHECK: call void @llvm.objc.release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ARR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
// CHECK-NEXT: ret void
// Check cleanups
@@ -71,7 +71,7 @@ void test_array_instantiation() {
// CHECK: [[OBJECTS:%[a-zA-Z0-9.]+]] = alloca [2 x ptr]
// Initializing first element
- // CHECK: call void @llvm.lifetime.start.p0(i64 8, ptr [[ARR]])
+ // CHECK: call void @llvm.lifetime.start.p0(ptr [[ARR]])
// CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x ptr], ptr [[OBJECTS]], i64 0, i64 0
// CHECK: call void @_ZN1XC1Ev
// CHECK-NEXT: [[OBJECT0:%[a-zA-Z0-9.]+]] = invoke noundef ptr @_ZNK1XcvP11objc_objectEv{{.*}} [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
@@ -97,7 +97,7 @@ void test_array_instantiation() {
// CHECK-NEXT: call void @_ZN1XD1Ev
// CHECK-NOT: ret void
// CHECK: call void @llvm.objc.release
- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ARR]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
// CHECK-NEXT: ret void
// Check cleanups
diff --git a/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl b/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl
index bfbed79..d71c898 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl
@@ -523,10 +523,10 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: store i8 [[B]], ptr [[B_ADDR_ASCAST]], align 1, !tbaa [[TBAA16]]
// GFX900-NEXT: store ptr addrspace(1) [[C]], ptr [[C_ADDR_ASCAST]], align 8, !tbaa [[TBAA7]]
// GFX900-NEXT: store i64 [[D]], ptr [[D_ADDR_ASCAST]], align 8, !tbaa [[TBAA3]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9:[0-9]+]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9:[0-9]+]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
// GFX900-NEXT: store i32 0, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17:![0-9]+]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
// GFX900-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[DEFAULT_QUEUE]], align 8, !tbaa [[TBAA19:![0-9]+]]
// GFX900-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17]]
// GFX900-NEXT: call void @llvm.memcpy.p0.p5.i64(ptr align 4 [[TMP_ASCAST]], ptr addrspace(5) align 4 [[NDRANGE]], i64 4, i1 false), !tbaa.struct [[TBAA_STRUCT21:![0-9]+]]
@@ -586,12 +586,12 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: [[BLOCK_CAPTURED19:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr, ptr addrspace(1), ptr addrspace(1), i64, i8 }>, ptr [[BLOCK12_ASCAST]], i32 0, i32 5
// GFX900-NEXT: [[TMP17:%.*]] = load i64, ptr [[D_ADDR_ASCAST]], align 8, !tbaa [[TBAA3]]
// GFX900-NEXT: store i64 [[TMP17]], ptr [[BLOCK_CAPTURED19]], align 8, !tbaa [[TBAA3]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
// GFX900-NEXT: [[TMP18:%.*]] = getelementptr [1 x i64], ptr addrspace(5) [[BLOCK_SIZES]], i32 0, i32 0
// GFX900-NEXT: store i64 100, ptr addrspace(5) [[TMP18]], align 8
// GFX900-NEXT: [[TMP19:%.*]] = call i32 @__enqueue_kernel_varargs(ptr addrspace(1) [[TMP12]], i32 [[TMP13]], ptr addrspace(5) [[VARTMP11]], ptr addrspacecast (ptr addrspace(1) @__test_block_invoke_3_kernel.runtime.handle to ptr), ptr [[BLOCK12_ASCAST]], i32 1, ptr addrspace(5) [[TMP18]])
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[BLOCK_SIZES]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
// GFX900-NEXT: [[BLOCK_SIZE22:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr, i64, ptr addrspace(1) }>, ptr [[BLOCK21_ASCAST]], i32 0, i32 0
// GFX900-NEXT: store i32 32, ptr [[BLOCK_SIZE22]], align 8
// GFX900-NEXT: [[BLOCK_ALIGN23:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr, i64, ptr addrspace(1) }>, ptr [[BLOCK21_ASCAST]], i32 0, i32 1
@@ -610,10 +610,10 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: call void @llvm.memcpy.p0.p5.i64(ptr align 4 [[TMP27_ASCAST]], ptr addrspace(5) align 4 [[NDRANGE]], i64 4, i1 false), !tbaa.struct [[TBAA_STRUCT21]]
// GFX900-NEXT: [[TMP24:%.*]] = load ptr, ptr addrspace(5) [[BLOCK20]], align 8, !tbaa [[TBAA16]]
// GFX900-NEXT: [[TMP25:%.*]] = call i32 @__enqueue_kernel_basic(ptr addrspace(1) [[TMP22]], i32 [[TMP23]], ptr addrspace(5) [[VARTMP27]], ptr addrspacecast (ptr addrspace(1) @__test_block_invoke_4_kernel.runtime.handle to ptr), ptr [[BLOCK21_ASCAST]])
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[BLOCK20]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
// GFX900-NEXT: ret void
//
//
@@ -641,18 +641,18 @@ kernel void test_target_features_kernel(global int *i) {
// GFX900-NEXT: [[I_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I_ADDR]] to ptr
// GFX900-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// GFX900-NEXT: store ptr addrspace(1) [[I]], ptr [[I_ADDR_ASCAST]], align 8, !tbaa [[TBAA26]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
// GFX900-NEXT: store i32 0, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17]]
-// GFX900-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
// GFX900-NEXT: [[TMP0:%.*]] = call i64 @llvm.amdgcn.s.memtime()
// GFX900-NEXT: [[TMP1:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[DEFAULT_QUEUE]], align 8, !tbaa [[TBAA19]]
// GFX900-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(5) [[FLAGS]], align 4, !tbaa [[TBAA17]]
// GFX900-NEXT: call void @llvm.memcpy.p0.p5.i64(ptr align 4 [[TMP_ASCAST]], ptr addrspace(5) align 4 [[NDRANGE]], i64 4, i1 false), !tbaa.struct [[TBAA_STRUCT21]]
// GFX900-NEXT: [[TMP3:%.*]] = call i32 @__enqueue_kernel_basic(ptr addrspace(1) [[TMP1]], i32 [[TMP2]], ptr addrspace(5) [[TMP]], ptr addrspacecast (ptr addrspace(1) @__test_target_features_kernel_block_invoke_kernel.runtime.handle to ptr), ptr addrspacecast (ptr addrspace(1) @__block_literal_global to ptr))
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
-// GFX900-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[NDRANGE]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[FLAGS]]) #[[ATTR9]]
+// GFX900-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[DEFAULT_QUEUE]]) #[[ATTR9]]
// GFX900-NEXT: ret void
//
//
diff --git a/clang/test/CodeGenOpenCL/amdgpu-printf.cl b/clang/test/CodeGenOpenCL/amdgpu-printf.cl
index 33fee66..b9e2517 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-printf.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-printf.cl
@@ -65,12 +65,12 @@ __kernel void test_printf_str_int(int i) {
// CHECK-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[S:%.*]] = alloca [4 x i8], align 1, addrspace(5)
// CHECK-NEXT: store i32 [[I]], ptr addrspace(5) [[I_ADDR]], align 4, !tbaa [[TBAA9]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[S]]) #[[ATTR7:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[S]]) #[[ATTR7:[0-9]+]]
// CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) align 1 [[S]], ptr addrspace(4) align 1 @__const.test_printf_str_int.s, i64 4, i1 false)
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(5) [[S]], i64 0, i64 0
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[I_ADDR]], align 4, !tbaa [[TBAA9]]
// CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) noundef @.str.2, ptr addrspace(5) noundef [[ARRAYDECAY]], i32 noundef [[TMP0]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[S]]) #[[ATTR7]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[S]]) #[[ATTR7]]
// CHECK-NEXT: ret void
//
//.
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl
index 6c85e73..b0dbf60 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl
@@ -128,9 +128,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES1]])
+ // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES1]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES1]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES1]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES1]], i32 0, i32 0
// B32: store i32 256, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES1]], i32 0, i32 0
@@ -153,9 +153,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES2]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES2]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES2]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES2]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES2]], i32 0, i32 0
// B32: store i32 %{{.*}}, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES2]], i32 0, i32 0
@@ -181,9 +181,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// X86: [[AD:%arraydecay[0-9]*]] = getelementptr inbounds [1 x ptr], ptr %event_wait_list2, i{{32|64}} 0, i{{32|64}} 0
// COMMON: [[WAIT_EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr [[AD]] to ptr addrspace(4)
// COMMON: [[EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr %clk_event to ptr addrspace(4)
- // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES3]])
+ // CHECK-LIFETIMES: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES3]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_events_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES3]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES3]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES3]], i32 0, i32 0
// B32: store i32 256, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES3]], i32 0, i32 0
@@ -209,9 +209,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// X86: [[AD:%arraydecay[0-9]*]] = getelementptr inbounds [1 x ptr], ptr %event_wait_list2, i{{32|64}} 0, i{{32|64}} 0
// COMMON: [[WAIT_EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr [[AD]] to ptr addrspace(4)
// COMMON: [[EVNT:%[0-9]+]] ={{.*}} addrspacecast ptr %clk_event to ptr addrspace(4)
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES4]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES4]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_events_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES4]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES4]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES4]], i32 0, i32 0
// B32: store i32 %{{.*}}, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES4]], i32 0, i32 0
@@ -234,9 +234,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES5]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES5]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES5]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES5]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES5]], i32 0, i32 0
// B32: store i32 %{{.*}}, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES5]], i32 0, i32 0
@@ -258,9 +258,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %[[BLOCK_SIZES6]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES6]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %[[BLOCK_SIZES6]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES6]])
// B32: %[[TMP:.*]] = getelementptr [3 x i32], ptr %[[BLOCK_SIZES6]], i32 0, i32 0
// B32: store i32 1, ptr %[[TMP]], align 4
// B32: %[[BLOCK_SIZES62:.*]] = getelementptr [3 x i32], ptr %[[BLOCK_SIZES6]], i32 0, i32 1
@@ -290,9 +290,9 @@ kernel void device_side_enqueue(global int *a, global int *b, int i) {
// SPIR: [[DEF_Q:%[0-9]+]] = load target("spirv.Queue"), ptr %default_queue
// X86: [[DEF_Q:%[0-9]+]] = load ptr, ptr %default_queue
// COMMON: [[FLAGS:%[0-9]+]] = load i32, ptr %flags
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %[[BLOCK_SIZES7]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %[[BLOCK_SIZES7]])
// CHECK-LIFETIMES-LABEL: call {{(spir_func )?}}i32 @__enqueue_kernel_varargs(
- // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %[[BLOCK_SIZES7]])
+ // CHECK-LIFETIMES-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %[[BLOCK_SIZES7]])
// B32: %[[TMP:.*]] = getelementptr [1 x i32], ptr %[[BLOCK_SIZES7]], i32 0, i32 0
// B32: store i32 0, ptr %[[TMP]], align 4
// B64: %[[TMP:.*]] = getelementptr [1 x i64], ptr %[[BLOCK_SIZES7]], i32 0, i32 0
diff --git a/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl b/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl
index 8845ffe..4e40073 100644
--- a/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl
+++ b/clang/test/CodeGenOpenCL/implicit-addrspacecast-function-parameter.cl
@@ -32,12 +32,12 @@ __kernel void use_of_local_var()
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[X:%.*]] = alloca i32, align 4, addrspace(5)
-// CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[X]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[X]]) #[[ATTR5:[0-9]+]]
// CHECK-NEXT: store i32 0, ptr addrspace(5) [[X]], align 4, !tbaa [[TBAA4:![0-9]+]]
// CHECK-NEXT: call void @private_ptr(ptr addrspace(5) noundef [[X]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
// CHECK-NEXT: call void @generic_ptr(ptr noundef [[X_ASCAST]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[X]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[X]]) #[[ATTR5]]
// CHECK-NEXT: ret void
//
//
diff --git a/clang/test/CoverageMapping/logical.cpp b/clang/test/CoverageMapping/logical.cpp
index 2a22d6c..caa773c 100644
--- a/clang/test/CoverageMapping/logical.cpp
+++ b/clang/test/CoverageMapping/logical.cpp
@@ -1,27 +1,31 @@
// RUN: %clang_cc1 -mllvm -emptyline-comment-coverage=false -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only -main-file-name logical.cpp %s | FileCheck %s
// RUN: %clang_cc1 -mllvm -emptyline-comment-coverage=false -fcoverage-mcdc -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only -main-file-name logical.cpp %s | FileCheck %s -check-prefix=MCDC
-int main() { // CHECK: File 0, [[@LINE]]:12 -> [[@LINE+23]]:2 = #0
+int main() { // CHECK: File 0, [[@LINE]]:12 -> [[@LINE+27]]:2 = #0
bool bt = true;
bool bf = false; // MCDC: Decision,File 0, [[@LINE+1]]:12 -> [[@LINE+1]]:20 = M:3, C:2
bool a = bt && bf; // CHECK-NEXT: File 0, [[@LINE]]:12 -> [[@LINE]]:14 = #0
// CHECK-NEXT: Branch,File 0, [[@LINE-1]]:12 -> [[@LINE-1]]:14 = #1, (#0 - #1)
- // CHECK-NEXT: File 0, [[@LINE-2]]:18 -> [[@LINE-2]]:20 = #1
- // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:18 -> [[@LINE-3]]:20 = #2, (#1 - #2)
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:14 -> [[@LINE-2]]:18 = #1
+ // CHECK-NEXT: File 0, [[@LINE-3]]:18 -> [[@LINE-3]]:20 = #1
+ // CHECK-NEXT: Branch,File 0, [[@LINE-4]]:18 -> [[@LINE-4]]:20 = #2, (#1 - #2)
// MCDC: Decision,File 0, [[@LINE+1]]:7 -> [[@LINE+2]]:9 = M:6, C:2
a = bt && // CHECK-NEXT: File 0, [[@LINE]]:7 -> [[@LINE]]:9 = #0
bf; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = #3, (#0 - #3)
- // CHECK-NEXT: File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = #3
- // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = #4, (#3 - #4)
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:9 -> [[@LINE-1]]:7 = #3
+ // CHECK-NEXT: File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = #3
+ // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:7 -> [[@LINE-3]]:9 = #4, (#3 - #4)
// MCDC: Decision,File 0, [[@LINE+1]]:7 -> [[@LINE+1]]:15 = M:9, C:2
a = bf || bt; // CHECK-NEXT: File 0, [[@LINE]]:7 -> [[@LINE]]:9 = #0
// CHECK-NEXT: Branch,File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = (#0 - #5), #5
- // CHECK-NEXT: File 0, [[@LINE-2]]:13 -> [[@LINE-2]]:15 = #5
- // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:13 -> [[@LINE-3]]:15 = (#5 - #6), #6
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:9 -> [[@LINE-2]]:13 = #5
+ // CHECK-NEXT: File 0, [[@LINE-3]]:13 -> [[@LINE-3]]:15 = #5
+ // CHECK-NEXT: Branch,File 0, [[@LINE-4]]:13 -> [[@LINE-4]]:15 = (#5 - #6), #6
// MCDC: Decision,File 0, [[@LINE+1]]:7 -> [[@LINE+2]]:9 = M:12, C:2
a = bf || // CHECK-NEXT: File 0, [[@LINE]]:7 -> [[@LINE]]:9 = #0
bt; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = (#0 - #7), #7
- // CHECK-NEXT: File 0, [[@LINE-1]]:7 -> [[@LINE-1]]:9 = #7
- // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = (#7 - #8), #8
+ // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:9 -> [[@LINE-1]]:7 = #7
+ // CHECK-NEXT: File 0, [[@LINE-2]]:7 -> [[@LINE-2]]:9 = #7
+ // CHECK-NEXT: Branch,File 0, [[@LINE-3]]:7 -> [[@LINE-3]]:9 = (#7 - #8), #8
return 0;
}
diff --git a/clang/test/DebugInfo/KeyInstructions/lit.local.cfg b/clang/test/DebugInfo/KeyInstructions/lit.local.cfg
deleted file mode 100644
index 482bd5c..0000000
--- a/clang/test/DebugInfo/KeyInstructions/lit.local.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-if not config.has_key_instructions:
- config.unsupported = True
diff --git a/clang/test/Driver/clang-offload-bundler-zlib.c b/clang/test/Driver/clang-offload-bundler-zlib.c
index b026e2e..211601c 100644
--- a/clang/test/Driver/clang-offload-bundler-zlib.c
+++ b/clang/test/Driver/clang-offload-bundler-zlib.c
@@ -66,6 +66,30 @@
// NOHOST-V3-DAG: hip-amdgcn-amd-amdhsa--gfx900
// NOHOST-V3-DAG: hip-amdgcn-amd-amdhsa--gfx906
+// Check compression/decompression of offload bundle using version 2 format.
+//
+// RUN: env OFFLOAD_BUNDLER_COMPRESS=1 OFFLOAD_BUNDLER_VERBOSE=1 COMPRESSED_BUNDLE_FORMAT_VERSION=2 \
+// RUN: clang-offload-bundler -type=bc -targets=hip-amdgcn-amd-amdhsa--gfx900,hip-amdgcn-amd-amdhsa--gfx906 \
+// RUN: -input=%t.tgt1 -input=%t.tgt2 -output=%t.hip.bundle.bc 2>&1 | \
+// RUN: FileCheck -check-prefix=COMPRESS-V2 %s
+// RUN: clang-offload-bundler -type=bc -list -input=%t.hip.bundle.bc | FileCheck -check-prefix=NOHOST-V2 %s
+// RUN: env OFFLOAD_BUNDLER_VERBOSE=1 \
+// RUN: clang-offload-bundler -type=bc -targets=hip-amdgcn-amd-amdhsa--gfx900,hip-amdgcn-amd-amdhsa--gfx906 \
+// RUN: -output=%t.res.tgt1 -output=%t.res.tgt2 -input=%t.hip.bundle.bc -unbundle 2>&1 | \
+// RUN: FileCheck -check-prefix=DECOMPRESS-V2 %s
+// RUN: diff %t.tgt1 %t.res.tgt1
+// RUN: diff %t.tgt2 %t.res.tgt2
+//
+// COMPRESS-V2: Compressed bundle format version: 2
+// COMPRESS-V2: Compression method used: zlib
+// COMPRESS-V2: Compression level: 6
+// DECOMPRESS-V2: Compressed bundle format version: 2
+// DECOMPRESS-V2: Decompression method: zlib
+// DECOMPRESS-V2: Hashes match: Yes
+// NOHOST-V2-NOT: host-
+// NOHOST-V2-DAG: hip-amdgcn-amd-amdhsa--gfx900
+// NOHOST-V2-DAG: hip-amdgcn-amd-amdhsa--gfx906
+
// Check -compression-level= option
// RUN: clang-offload-bundler -type=bc -targets=hip-amdgcn-amd-amdhsa--gfx900,hip-amdgcn-amd-amdhsa--gfx906 \
diff --git a/clang/test/Driver/clang-offload-bundler-zstd.c b/clang/test/Driver/clang-offload-bundler-zstd.c
index 667d955..c1123ae 100644
--- a/clang/test/Driver/clang-offload-bundler-zstd.c
+++ b/clang/test/Driver/clang-offload-bundler-zstd.c
@@ -29,11 +29,11 @@
// RUN: diff %t.tgt1 %t.res.tgt1
// RUN: diff %t.tgt2 %t.res.tgt2
//
-// CHECK: Compressed bundle format version: 2
+// CHECK: Compressed bundle format version: 3
// CHECK: Total file size (including headers): [[SIZE:[0-9]*]] bytes
// CHECK: Compression method used: zstd
// CHECK: Compression level: 3
-// CHECK: Compressed bundle format version: 2
+// CHECK: Compressed bundle format version: 3
// CHECK: Total file size (from header): [[SIZE]] bytes
// CHECK: Decompression method: zstd
// CHECK: Hashes match: Yes
diff --git a/clang/test/Driver/cuda-detect-path.cu b/clang/test/Driver/cuda-detect-path.cu
index 8d249bd..ce42ed7 100644
--- a/clang/test/Driver/cuda-detect-path.cu
+++ b/clang/test/Driver/cuda-detect-path.cu
@@ -1,5 +1,5 @@
// This tests uses the PATH environment variable.
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
// RUN: env PATH=%S/Inputs/CUDA/usr/local/cuda/bin \
// RUN: %clang -v --target=i386-unknown-linux --sysroot=%S/no-cuda-there \
diff --git a/clang/test/Driver/hipspv-toolchain.hip b/clang/test/Driver/hipspv-toolchain.hip
index b2187ac..3c175eb 100644
--- a/clang/test/Driver/hipspv-toolchain.hip
+++ b/clang/test/Driver/hipspv-toolchain.hip
@@ -1,4 +1,4 @@
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
// RUN: %clang -### -target x86_64-linux-gnu --offload=spirv64 \
// RUN: --no-offload-new-driver --hip-path=%S/Inputs/hipspv -nohipwrapperinc %s \
diff --git a/clang/test/Driver/ld-path.c b/clang/test/Driver/ld-path.c
index bc10b9e..e00b63d 100644
--- a/clang/test/Driver/ld-path.c
+++ b/clang/test/Driver/ld-path.c
@@ -1,5 +1,5 @@
/// This tests uses the PATH environment variable.
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
// RUN: cd %S
diff --git a/clang/test/Driver/program-path-priority.c b/clang/test/Driver/program-path-priority.c
index c940c4c..b88c0f2 100644
--- a/clang/test/Driver/program-path-priority.c
+++ b/clang/test/Driver/program-path-priority.c
@@ -1,5 +1,5 @@
/// Don't create symlinks on Windows
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
/// Check the priority used when searching for tools
/// Names and locations are usually in this order:
diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c
index 2698612f..2a9c4e7 100644
--- a/clang/test/Driver/riscv-cpus.c
+++ b/clang/test/Driver/riscv-cpus.c
@@ -403,6 +403,11 @@
// MCPU-MARCH: "-nostdsysteminc" "-target-cpu" "sifive-e31" "-target-feature" "+m" "-target-feature" "+c"
// MCPU-MARCH: "-target-abi" "ilp32"
+// -march=unset erases previous march
+// RUN: %clang --target=riscv32 -### -c %s 2>&1 -march=rv32imc -march=unset -mcpu=sifive-e31 | FileCheck -check-prefix=MARCH-UNSET %s
+// MARCH-UNSET: "-nostdsysteminc" "-target-cpu" "sifive-e31" "-target-feature" "+m" "-target-feature" "+a" "-target-feature" "+c"
+// MARCH-UNSET-SAME: "-target-abi" "ilp32"
+
// Check interaction between -mcpu and mtune, -mtune won't affect arch related
// target feature, but -mcpu will.
//
diff --git a/clang/test/Driver/spirv-toolchain.cl b/clang/test/Driver/spirv-toolchain.cl
index 53e8455..54c794c 100644
--- a/clang/test/Driver/spirv-toolchain.cl
+++ b/clang/test/Driver/spirv-toolchain.cl
@@ -92,7 +92,7 @@
// RUN: mkdir -p %t/versioned
// RUN: touch %t/versioned/spirv-as-%llvm-version-major \
// RUN: && chmod +x %t/versioned/spirv-as-%llvm-version-major
-// RUN: %if !system-windows %{ env "PATH=%t/versioned" %clang -### --target=spirv64 -x cl -c --save-temps %s 2>&1 \
+// RUN: %if !system-windows && !system-cygwin %{ env "PATH=%t/versioned" %clang -### --target=spirv64 -x cl -c --save-temps %s 2>&1 \
// RUN: | FileCheck -DVERSION=%llvm-version-major --check-prefix=VERSIONED %s %}
// VERSIONED: {{.*}}spirv-as-[[VERSION]]
diff --git a/clang/test/Headers/__clang_hip_cmath.hip b/clang/test/Headers/__clang_hip_cmath.hip
index fcd7499..2e0b776 100644
--- a/clang/test/Headers/__clang_hip_cmath.hip
+++ b/clang/test/Headers/__clang_hip_cmath.hip
@@ -124,12 +124,12 @@ namespace user_namespace {
// DEFAULT-NEXT: [[B:%.*]] = alloca [[STRUCT_USER_BFLOAT16]], align 1, addrspace(5)
// DEFAULT-NEXT: [[A_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A]] to ptr
// DEFAULT-NEXT: [[B_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B]] to ptr
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
// DEFAULT-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[A_ASCAST]], float noundef 1.000000e+00) #[[ATTR10]]
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
// DEFAULT-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[B_ASCAST]], float noundef 2.000000e+00) #[[ATTR10]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[A]]) #[[ATTR11]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @_ZN14user_namespace8test_fmaEv(
@@ -138,12 +138,12 @@ namespace user_namespace {
// FINITEONLY-NEXT: [[B:%.*]] = alloca [[STRUCT_USER_BFLOAT16]], align 1, addrspace(5)
// FINITEONLY-NEXT: [[A_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A]] to ptr
// FINITEONLY-NEXT: [[B_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B]] to ptr
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[A]]) #[[ATTR11:[0-9]+]]
// FINITEONLY-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[A_ASCAST]], float noundef nofpclass(nan inf) 1.000000e+00) #[[ATTR10]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
// FINITEONLY-NEXT: call void @_ZN13user_bfloat16C1Ef(ptr noundef nonnull align 1 dereferenceable(1) [[B_ASCAST]], float noundef nofpclass(nan inf) 2.000000e+00) #[[ATTR10]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[B]]) #[[ATTR11]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 1, ptr addrspace(5) [[A]]) #[[ATTR11]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[B]]) #[[ATTR11]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[A]]) #[[ATTR11]]
// FINITEONLY-NEXT: ret void
//
__global__ void test_fma() {
diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip
index 81c5f43..15bdb75 100644
--- a/clang/test/Headers/__clang_hip_math.hip
+++ b/clang/test/Headers/__clang_hip_math.hip
@@ -3636,52 +3636,52 @@ extern "C" __device__ long int test_lround(double x) {
// DEFAULT-LABEL: @test_modff(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_modf_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16:![0-9]+]]
// DEFAULT-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret float [[CALL_I]]
//
// FINITEONLY-LABEL: @test_modff(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) float @__ocml_modf_f32(float noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16:![0-9]+]]
// FINITEONLY-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret float [[CALL_I]]
//
// APPROX-LABEL: @test_modff(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_modf_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16:![0-9]+]]
// APPROX-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret float [[CALL_I]]
//
// NCRDIV-LABEL: @test_modff(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17:[0-9]+]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_modf_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA17:![0-9]+]]
// NCRDIV-NEXT: store float [[TMP0]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA17]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret float [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_modff(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15:[0-9]+]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15:[0-9]+]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) float @__ocml_modf_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17:![0-9]+]]
// AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret float [[CALL_I]]
//
extern "C" __device__ float test_modff(float x, float* y) {
@@ -3691,52 +3691,52 @@ extern "C" __device__ float test_modff(float x, float* y) {
// DEFAULT-LABEL: @test_modf(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_modf_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18:![0-9]+]]
// DEFAULT-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret double [[CALL_I]]
//
// FINITEONLY-LABEL: @test_modf(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) double @__ocml_modf_f64(double noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18:![0-9]+]]
// FINITEONLY-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret double [[CALL_I]]
//
// APPROX-LABEL: @test_modf(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_modf_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18:![0-9]+]]
// APPROX-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret double [[CALL_I]]
//
// NCRDIV-LABEL: @test_modf(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_modf_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA19:![0-9]+]]
// NCRDIV-NEXT: store double [[TMP0]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA19]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret double [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_modf(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) double @__ocml_modf_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19:![0-9]+]]
// AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret double [[CALL_I]]
//
extern "C" __device__ double test_modf(double x, double* y) {
@@ -5362,52 +5362,52 @@ extern "C" __device__ double test_remainder(double x, double y) {
// DEFAULT-LABEL: @test_remquof(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// DEFAULT-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret float [[CALL_I]]
//
// FINITEONLY-LABEL: @test_remquof(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) float @__ocml_remquo_f32(float noundef nofpclass(nan inf) [[X:%.*]], float noundef nofpclass(nan inf) [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// FINITEONLY-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret float [[CALL_I]]
//
// APPROX-LABEL: @test_remquof(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// APPROX-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret float [[CALL_I]]
//
// NCRDIV-LABEL: @test_remquof(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA13]]
// NCRDIV-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret float [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_remquof(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA13]]
// AMDGCNSPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret float [[CALL_I]]
//
extern "C" __device__ float test_remquof(float x, float y, int* z) {
@@ -5417,52 +5417,52 @@ extern "C" __device__ float test_remquof(float x, float y, int* z) {
// DEFAULT-LABEL: @test_remquo(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// DEFAULT-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret double [[CALL_I]]
//
// FINITEONLY-LABEL: @test_remquo(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract noundef nofpclass(nan inf) double @__ocml_remquo_f64(double noundef nofpclass(nan inf) [[X:%.*]], double noundef nofpclass(nan inf) [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// FINITEONLY-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret double [[CALL_I]]
//
// APPROX-LABEL: @test_remquo(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA12]]
// APPROX-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA12]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret double [[CALL_I]]
//
// NCRDIV-LABEL: @test_remquo(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract noundef double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA13]]
// NCRDIV-NEXT: store i32 [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret double [[CALL_I]]
//
// AMDGCNSPIRV-LABEL: @test_remquo(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA13]]
// AMDGCNSPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA13]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret double [[CALL_I]]
//
extern "C" __device__ double test_remquo(double x, double y, int* z) {
@@ -6198,57 +6198,57 @@ extern "C" __device__ BOOL_TYPE test___signbit(double x) {
// DEFAULT-LABEL: @test_sincosf(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincosf(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) float @__ocml_sincos_f32(float noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincosf(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincosf(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincosf(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store float [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincosf(float x, float *y, float *z) {
@@ -6258,57 +6258,57 @@ extern "C" __device__ void test_sincosf(float x, float *y, float *z) {
// DEFAULT-LABEL: @test_sincos(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincos(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) double @__ocml_sincos_f64(double noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincos(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincos(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincos(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store double [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincos(double x, double *y, double *z) {
@@ -6318,57 +6318,57 @@ extern "C" __device__ void test_sincos(double x, double *y, double *z) {
// DEFAULT-LABEL: @test_sincospif(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// DEFAULT-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincospif(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) float @__ocml_sincospi_f32(float noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// FINITEONLY-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincospif(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA16]]
// APPROX-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA16]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincospif(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store float [[CALL_I]], ptr [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(5) [[__TMP_I]], align 4, !tbaa [[TBAA17]]
// NCRDIV-NEXT: store float [[TMP0]], ptr [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincospif(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store float [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17]]
// AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA17]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincospif(float x, float *y, float *z) {
@@ -6378,57 +6378,57 @@ extern "C" __device__ void test_sincospif(float x, float *y, float *z) {
// DEFAULT-LABEL: @test_sincospi(
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// DEFAULT-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// DEFAULT-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// DEFAULT-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// DEFAULT-NEXT: ret void
//
// FINITEONLY-LABEL: @test_sincospi(
// FINITEONLY-NEXT: entry:
// FINITEONLY-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: [[CALL_I:%.*]] = call nnan ninf contract nofpclass(nan inf) double @__ocml_sincospi_f64(double noundef nofpclass(nan inf) [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// FINITEONLY-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// FINITEONLY-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// FINITEONLY-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// FINITEONLY-NEXT: ret void
//
// APPROX-LABEL: @test_sincospi(
// APPROX-NEXT: entry:
// APPROX-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// APPROX-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// APPROX-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA18]]
// APPROX-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA18]]
-// APPROX-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// APPROX-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// APPROX-NEXT: ret void
//
// NCRDIV-LABEL: @test_sincospi(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8, addrspace(5)
-// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: [[CALL_I:%.*]] = call contract double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr addrspace(5) noundef [[__TMP_I]]) #[[ATTR16]]
// NCRDIV-NEXT: store double [[CALL_I]], ptr [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(5) [[__TMP_I]], align 8, !tbaa [[TBAA19]]
// NCRDIV-NEXT: store double [[TMP0]], ptr [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
+// NCRDIV-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[__TMP_I]]) #[[ATTR17]]
// NCRDIV-NEXT: ret void
//
// AMDGCNSPIRV-LABEL: @test_sincospi(
// AMDGCNSPIRV-NEXT: entry:
// AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8
// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4)
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]]
// AMDGCNSPIRV-NEXT: store double [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19]]
// AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 8, !tbaa [[TBAA19]]
-// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]]
+// AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(ptr nonnull [[__TMP_I]]) #[[ATTR15]]
// AMDGCNSPIRV-NEXT: ret void
//
extern "C" __device__ void test_sincospi(double x, double *y, double *z) {
diff --git a/clang/test/Headers/__cpuidex_conflict.c b/clang/test/Headers/__cpuidex_conflict.c
index 74f4532..d14ef29 100644
--- a/clang/test/Headers/__cpuidex_conflict.c
+++ b/clang/test/Headers/__cpuidex_conflict.c
@@ -5,6 +5,7 @@
// Ensure that we do not run into conflicts when offloading.
// RUN: %clang_cc1 %s -DIS_STATIC=static -ffreestanding -fopenmp -fopenmp-is-target-device -aux-triple x86_64-unknown-linux-gnu
+// RUN: %clang_cc1 -DIS_STATIC="" -triple nvptx64-nvidia-cuda -aux-triple x86_64-unknown-linux-gnu -aux-target-cpu x86-64 -fcuda-is-device -internal-isystem /home/gha/llvm-project/build/lib/clang/22/include -x cuda %s -o -
typedef __SIZE_TYPE__ size_t;
diff --git a/clang/test/Index/Core/index-instantiated-source.cpp b/clang/test/Index/Core/index-instantiated-source.cpp
index 2a67a3a..91be4a4 100644
--- a/clang/test/Index/Core/index-instantiated-source.cpp
+++ b/clang/test/Index/Core/index-instantiated-source.cpp
@@ -73,8 +73,8 @@ void canonicalizeInstaniationReferences(TemplateClass<int, float> &object) {
typedef TemplateClass<int, float> TT;
TT::NestedType::SubNestedType subNestedType(0);
-// CHECK: [[@LINE-1]]:7 | struct/C++ | NestedType | c:@ST>2#T#T@TemplateClass@S@NestedType |
-// CHECK: [[@LINE-2]]:19 | class/C++ | SubNestedType | c:@ST>2#T#T@TemplateClass@S@NestedType@S@SubNestedType |
+// CHECK: [[@LINE-1]]:19 | class/C++ | SubNestedType | c:@ST>2#T#T@TemplateClass@S@NestedType@S@SubNestedType |
+// CHECK: [[@LINE-2]]:7 | struct/C++ | NestedType | c:@ST>2#T#T@TemplateClass@S@NestedType |
TT::NestedType::TypeAlias nestedTypeAlias;
// CHECK: [[@LINE-1]]:19 | type-alias/C++ | TypeAlias | c:@ST>2#T#T@TemplateClass@S@NestedType@TypeAlias |
diff --git a/clang/test/Index/Core/index-source.cpp b/clang/test/Index/Core/index-source.cpp
index 043e616..36bc663 100644
--- a/clang/test/Index/Core/index-source.cpp
+++ b/clang/test/Index/Core/index-source.cpp
@@ -525,9 +525,9 @@ struct Outer {
template<>
struct rd33122110::Outer::Nested<int>;
-// CHECK: [[@LINE-1]]:8 | namespace/C++ | rd33122110 | c:@N@rd33122110 | <no-cgname> | Ref,RelCont | rel: 1
+// CHECK: [[@LINE-1]]:20 | struct/C++ | Outer | c:@N@rd33122110@S@Outer | <no-cgname> | Ref,RelCont | rel: 1
// CHECK-NEXT: RelCont | Nested | c:@N@rd33122110@S@Outer@S@Nested>#I
-// CHECK: [[@LINE-3]]:20 | struct/C++ | Outer | c:@N@rd33122110@S@Outer | <no-cgname> | Ref,RelCont | rel: 1
+// CHECK: [[@LINE-3]]:8 | namespace/C++ | rd33122110 | c:@N@rd33122110 | <no-cgname> | Ref,RelCont | rel: 1
// CHECK-NEXT: RelCont | Nested | c:@N@rd33122110@S@Outer@S@Nested>#I
namespace index_offsetof {
diff --git a/clang/test/Index/c-index-api-loadTU-test.m b/clang/test/Index/c-index-api-loadTU-test.m
index eb3fde0..8ddb193 100644
--- a/clang/test/Index/c-index-api-loadTU-test.m
+++ b/clang/test/Index/c-index-api-loadTU-test.m
@@ -164,7 +164,7 @@ struct X0 {};
// CHECK: c-index-api-loadTU-test.m:66:28: TypeRef=id:0:0 Extent=[66:28 - 66:30]
// CHECK: c-index-api-loadTU-test.m:69:16: StructDecl=X0:69:16 Extent=[69:9 - 69:18]
// CHECK: c-index-api-loadTU-test.m:69:19: TypedefDecl=X1:69:19 (Definition) Extent=[69:1 - 69:21]
-// CHECK: c-index-api-loadTU-test.m:69:16: TypeRef=struct X0:71:8 Extent=[69:16 - 69:18]
+// CHECK: c-index-api-loadTU-test.m:69:16: TypeRef=struct X0:69:16 Extent=[69:16 - 69:18]
// CHECK: c-index-api-loadTU-test.m:70:8: StructDecl=X0:70:8 Extent=[70:1 - 70:10]
// CHECK: c-index-api-loadTU-test.m:71:8: StructDecl=X0:71:8 (Definition) Extent=[71:1 - 71:14]
// CHECK: c-index-api-loadTU-test.m:73:12: ObjCCategoryDecl=:73:12 Extent=[73:1 - 76:5]
diff --git a/clang/test/Index/copy-assignment-operator.cpp b/clang/test/Index/copy-assignment-operator.cpp
index 7f74193..46902c0 100644
--- a/clang/test/Index/copy-assignment-operator.cpp
+++ b/clang/test/Index/copy-assignment-operator.cpp
@@ -32,7 +32,7 @@ class Bar {
// CHECK: CXXMethod=operator=:4:10 (copy-assignment operator) [type=bool (Foo &)] [typekind=FunctionProto] [canonicaltype=bool (Foo &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:5:10 (copy-assignment operator) [type=bool (volatile Foo &)] [typekind=FunctionProto] [canonicaltype=bool (volatile Foo &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [volatile Foo &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:6:10 (copy-assignment operator) [type=bool (const volatile Foo &)] [typekind=FunctionProto] [canonicaltype=bool (const volatile Foo &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const volatile Foo &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
-// CHECK: CXXMethod=operator=:7:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Elaborated]] [isPOD=0] [isAnonRecDecl=0]
+// CHECK: CXXMethod=operator=:7:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Record]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: FunctionTemplate=operator=:11:10 [type=bool (const T &)] [typekind=FunctionProto] [canonicaltype=bool (const type-parameter-0-0 &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:12:10 [type=bool (const bool &)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const bool &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:13:10 [type=bool (char &)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [char &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
@@ -44,4 +44,4 @@ class Bar {
// CHECK: CXXMethod=operator=:23:10 (copy-assignment operator) [type=bool (Bar<T> &)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T> &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T> &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:24:10 (copy-assignment operator) [type=bool (volatile Bar<T> &)] [typekind=FunctionProto] [canonicaltype=bool (volatile Bar<T> &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [volatile Bar<T> &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:25:10 (copy-assignment operator) [type=bool (const volatile Bar<T> &)] [typekind=FunctionProto] [canonicaltype=bool (const volatile Bar<T> &)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const volatile Bar<T> &] [LValueReference]] [isPOD=0] [isAnonRecDecl=0]
-// CHECK: CXXMethod=operator=:26:10 (copy-assignment operator) [type=bool (Bar<T>)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T>)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T>] [Elaborated]] [isPOD=0] [isAnonRecDecl=0]
+// CHECK: CXXMethod=operator=:26:10 (copy-assignment operator) [type=bool (Bar<T>)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T>)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T>] [Unexposed]] [isPOD=0] [isAnonRecDecl=0]
diff --git a/clang/test/Index/index-refs.cpp b/clang/test/Index/index-refs.cpp
index 1494684..22eb753 100644
--- a/clang/test/Index/index-refs.cpp
+++ b/clang/test/Index/index-refs.cpp
@@ -81,13 +81,13 @@ int ginitlist[] = {EnumVal};
// CHECK-NEXT: [indexDeclaration]: kind: enum
// CHECK-NEXT: [indexDeclaration]: kind: enumerator | name: EnumVal
// CHECK-NEXT: [indexDeclaration]: kind: variable | name: gx
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexEntityReference]: kind: typedef | name: MyInt
// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexEntityReference]: kind: enumerator | name: EnumVal
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexEntityReference]: kind: typedef | name: MyInt
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS
// CHECK-NEXT: [indexDeclaration]: kind: enum
// CHECK-NEXT: [indexDeclaration]: kind: enumerator | name: SecondVal
// CHECK-NEXT: [indexEntityReference]: kind: enumerator | name: EnumVal
diff --git a/clang/test/Index/keep-going.cpp b/clang/test/Index/keep-going.cpp
index 6354151..0b2df72 100644
--- a/clang/test/Index/keep-going.cpp
+++ b/clang/test/Index/keep-going.cpp
@@ -26,10 +26,10 @@ class C : public A<float> { };
// CHECK: FieldDecl=a:4:13 (Definition) [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
// CHECK: TypeRef=T:3:16 [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
// CHECK: ClassDecl=B:6:7 (Definition) [type=B] [typekind=Record] [isPOD=0]
-// CHECK: C++ base class specifier=A<int>:4:7 [access=public isVirtual=false] [type=A<int>] [typekind=Elaborated] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=A<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=0] [nbFields=1]
+// CHECK: C++ base class specifier=A<int>:4:7 [access=public isVirtual=false] [type=A<int>] [typekind=Unexposed] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=A<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=0] [nbFields=1]
// CHECK: TemplateRef=A:4:7 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: ClassDecl=C:10:7 (Definition) [type=C] [typekind=Record] [isPOD=0]
-// CHECK: C++ base class specifier=A<float>:4:7 [access=public isVirtual=false] [type=A<float>] [typekind=Elaborated] [templateargs/1= [type=float] [typekind=Float]] [canonicaltype=A<float>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=float] [typekind=Float]] [isPOD=0] [nbFields=1]
+// CHECK: C++ base class specifier=A<float>:4:7 [access=public isVirtual=false] [type=A<float>] [typekind=Unexposed] [templateargs/1= [type=float] [typekind=Float]] [canonicaltype=A<float>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=float] [typekind=Float]] [isPOD=0] [nbFields=1]
// CHECK: TemplateRef=A:4:7 [type=] [typekind=Invalid] [isPOD=0]
// CHECK-KEEP-GOING-ONLY: VarDecl=global_var:1:12 [type=int] [typekind=Int] [isPOD=1]
diff --git a/clang/test/Index/move-assignment-operator.cpp b/clang/test/Index/move-assignment-operator.cpp
index a2c71e6..d6c4e9a 100644
--- a/clang/test/Index/move-assignment-operator.cpp
+++ b/clang/test/Index/move-assignment-operator.cpp
@@ -37,7 +37,7 @@ class Bar {
// CHECK: CXXMethod=operator=:13:10 [type=bool (volatile unsigned int &&)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [volatile unsigned int &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:14:10 [type=bool (const volatile unsigned char &&)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const volatile unsigned char &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:15:10 [type=bool (int)] [typekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [int] [Int]] [isPOD=0] [isAnonRecDecl=0]
-// CHECK: CXXMethod=operator=:16:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Elaborated]] [isPOD=0] [isAnonRecDecl=0]
+// CHECK: CXXMethod=operator=:16:10 (copy-assignment operator) [type=bool (Foo)] [typekind=FunctionProto] [canonicaltype=bool (Foo)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Foo] [Record]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: ClassTemplate=Bar:21:7 (Definition) [type=] [typekind=Invalid] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:22:10 (move-assignment operator) [type=bool (const Bar<T> &&)] [typekind=FunctionProto] [canonicaltype=bool (const Bar<T> &&)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [const Bar<T> &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
// CHECK: CXXMethod=operator=:23:10 (move-assignment operator) [type=bool (Bar<T> &&)] [typekind=FunctionProto] [canonicaltype=bool (Bar<T> &&)] [canonicaltypekind=FunctionProto] [resulttype=bool] [resulttypekind=Bool] [args= [Bar<T> &&] [RValueReference]] [isPOD=0] [isAnonRecDecl=0]
diff --git a/clang/test/Index/opencl-types.cl b/clang/test/Index/opencl-types.cl
index 13d7937..4850601 100644
--- a/clang/test/Index/opencl-types.cl
+++ b/clang/test/Index/opencl-types.cl
@@ -17,11 +17,11 @@ void kernel testFloatTypes() {
}
// CHECK: VarDecl=scalarHalf:11:8 (Definition){{( \(invalid\))?}} [type=__private half] [typekind=Half] [isPOD=1]
-// CHECK: VarDecl=vectorHalf:12:9 (Definition) [type=__private half4] [typekind=Elaborated] [canonicaltype=half __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
+// CHECK: VarDecl=vectorHalf:12:9 (Definition) [type=__private half4] [typekind=Typedef] [canonicaltype=half __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
// CHECK: VarDecl=scalarFloat:13:9 (Definition) [type=__private float] [typekind=Float] [isPOD=1]
-// CHECK: VarDecl=vectorFloat:14:10 (Definition) [type=__private float4] [typekind=Elaborated] [canonicaltype=float __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
+// CHECK: VarDecl=vectorFloat:14:10 (Definition) [type=__private float4] [typekind=Typedef] [canonicaltype=float __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
// CHECK: VarDecl=scalarDouble:15:10 (Definition){{( \(invalid\))?}} [type=__private double] [typekind=Double] [isPOD=1]
-// CHECK: VarDecl=vectorDouble:16:11 (Definition){{( \(invalid\))?}} [type=__private double4] [typekind=Elaborated] [canonicaltype=double __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
+// CHECK: VarDecl=vectorDouble:16:11 (Definition){{( \(invalid\))?}} [type=__private double4] [typekind=Typedef] [canonicaltype=double __private __attribute__((ext_vector_type(4)))] [canonicaltypekind=ExtVector] [isPOD=1]
#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
@@ -120,10 +120,10 @@ void kernel testMiscOpenCLTypes() {
reserve_id_t scalarOCLReserveID;
}
-// CHECK: VarDecl=scalarOCLSampler:117:19 (Definition) [type=const sampler_t] [typekind=Elaborated] const [canonicaltype=const sampler_t] [canonicaltypekind=OCLSampler] [isPOD=1]
-// CHECK: VarDecl=scalarOCLEvent:118:15 (Definition) [type=__private clk_event_t] [typekind=Elaborated] [canonicaltype=__private clk_event_t] [canonicaltypekind=Unexposed] [isPOD=1]
-// CHECK: VarDecl=scalarOCLQueue:119:11 (Definition) [type=__private queue_t] [typekind=Elaborated] [canonicaltype=__private queue_t] [canonicaltypekind=OCLQueue] [isPOD=1]
-// CHECK: VarDecl=scalarOCLReserveID:120:16 (Definition) [type=__private reserve_id_t] [typekind=Elaborated] [canonicaltype=__private reserve_id_t] [canonicaltypekind=OCLReserveID] [isPOD=1]
+// CHECK: VarDecl=scalarOCLSampler:117:19 (Definition) [type=const sampler_t] [typekind=Typedef] const [canonicaltype=const sampler_t] [canonicaltypekind=OCLSampler] [isPOD=1]
+// CHECK: VarDecl=scalarOCLEvent:118:15 (Definition) [type=__private clk_event_t] [typekind=Typedef] [canonicaltype=__private clk_event_t] [canonicaltypekind=Unexposed] [isPOD=1]
+// CHECK: VarDecl=scalarOCLQueue:119:11 (Definition) [type=__private queue_t] [typekind=Typedef] [canonicaltype=__private queue_t] [canonicaltypekind=OCLQueue] [isPOD=1]
+// CHECK: VarDecl=scalarOCLReserveID:120:16 (Definition) [type=__private reserve_id_t] [typekind=Typedef] [canonicaltype=__private reserve_id_t] [canonicaltypekind=OCLReserveID] [isPOD=1]
#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : enable
@@ -131,4 +131,4 @@ void kernel testExtOpenCLTypes() {
intel_sub_group_avc_mce_payload_t mce_payload;
}
-// CHECK: VarDecl=mce_payload:131:37 (Definition){{( \(invalid\))?}} [type=__private intel_sub_group_avc_mce_payload_t] [typekind=Elaborated] [canonicaltype=__private intel_sub_group_avc_mce_payload_t] [canonicaltypekind=OCLIntelSubgroupAVCMcePayload] [isPOD=1]
+// CHECK: VarDecl=mce_payload:131:37 (Definition){{( \(invalid\))?}} [type=__private intel_sub_group_avc_mce_payload_t] [typekind=Typedef] [canonicaltype=__private intel_sub_group_avc_mce_payload_t] [canonicaltypekind=OCLIntelSubgroupAVCMcePayload] [isPOD=1]
diff --git a/clang/test/Index/paren-type.c b/clang/test/Index/paren-type.c
index 0975191..14a7785 100644
--- a/clang/test/Index/paren-type.c
+++ b/clang/test/Index/paren-type.c
@@ -9,7 +9,7 @@ extern int (VariableWithParentheses);
typedef int MyTypedef;
// CHECK-TYPE: VarDecl=VariableWithParentheses2:
-// CHECK-TYPE-SAME: [type=MyTypedef] [typekind=Elaborated]
+// CHECK-TYPE-SAME: [type=MyTypedef] [typekind=Typedef]
// CHECK-TYPE-SAME: [canonicaltype=int] [canonicaltypekind=Int]
// CHECK-TYPEDECL: VarDecl=VariableWithParentheses2
// CHECK-TYPEDECL-SAME: [typedeclaration=MyTypedef] [typekind=Typedef]
diff --git a/clang/test/Index/print-type-size.cpp b/clang/test/Index/print-type-size.cpp
index a365528f..6a5a02a 100644
--- a/clang/test/Index/print-type-size.cpp
+++ b/clang/test/Index/print-type-size.cpp
@@ -45,8 +45,8 @@ union u {
struct simple s1;
};
-// CHECK64: VarDecl=s1:[[@LINE+2]]:8 (Definition) [type=simple] [typekind=Elaborated] [sizeof=48] [alignof=8]
-// CHECK32: VarDecl=s1:[[@LINE+1]]:8 (Definition) [type=simple] [typekind=Elaborated] [sizeof=36] [alignof=4]
+// CHECK64: VarDecl=s1:[[@LINE+2]]:8 (Definition) [type=simple] [typekind=Record] [sizeof=48] [alignof=8]
+// CHECK32: VarDecl=s1:[[@LINE+1]]:8 (Definition) [type=simple] [typekind=Record] [sizeof=36] [alignof=4]
simple s1;
struct Test {
@@ -354,8 +354,8 @@ struct BaseStruct
BaseStruct(){}
double v0;
float v1;
-// CHECK64: FieldDecl=fg:[[@LINE+2]]:7 (Definition) [type=C] [typekind=Elaborated] [sizeof=88] [alignof=8] [offsetof=128]
-// CHECK32: FieldDecl=fg:[[@LINE+1]]:7 (Definition) [type=C] [typekind=Elaborated] [sizeof=60] [alignof=4] [offsetof=96]
+// CHECK64: FieldDecl=fg:[[@LINE+2]]:7 (Definition) [type=C] [typekind=Record] [sizeof=88] [alignof=8] [offsetof=128]
+// CHECK32: FieldDecl=fg:[[@LINE+1]]:7 (Definition) [type=C] [typekind=Record] [sizeof=60] [alignof=4] [offsetof=96]
C fg;
// CHECK64: FieldDecl=rg:[[@LINE+2]]:8 (Definition) [type=C &] [typekind=LValueReference] [sizeof=88] [alignof=8] [offsetof=832]
// CHECK32: FieldDecl=rg:[[@LINE+1]]:8 (Definition) [type=C &] [typekind=LValueReference] [sizeof=60] [alignof=4] [offsetof=576]
diff --git a/clang/test/Index/print-type.c b/clang/test/Index/print-type.c
index 7375644..d30f4be 100644
--- a/clang/test/Index/print-type.c
+++ b/clang/test/Index/print-type.c
@@ -32,10 +32,10 @@ void fun(struct { int x; int y; } *param);
_Atomic(unsigned long) aul;
// RUN: c-index-test -test-print-type %s | FileCheck %s
-// CHECK: FunctionDecl=f:3:6 (Definition) [type=int *(int *, char *, FooType, int *, void (*)(int))] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int, int *, void (*)(int))] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Elaborated] [int[5]] [ConstantArray] [void (*)(int)] [Pointer]] [isPOD=0]
+// CHECK: FunctionDecl=f:3:6 (Definition) [type=int *(int *, char *, FooType, int *, void (*)(int))] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int, int *, void (*)(int))] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Typedef] [int[5]] [ConstantArray] [void (*)(int)] [Pointer]] [isPOD=0]
// CHECK: ParmDecl=p:3:13 (Definition) [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: ParmDecl=x:3:22 (Definition) [type=char *] [typekind=Pointer] [isPOD=1] [pointeetype=char] [pointeekind=Char_{{[US]}}]
-// CHECK: ParmDecl=z:3:33 (Definition) [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: ParmDecl=z:3:33 (Definition) [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: TypeRef=FooType:1:13 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ParmDecl=arr:3:40 (Definition) [type=int[5]] [typekind=ConstantArray] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
@@ -47,14 +47,14 @@ _Atomic(unsigned long) aul;
// CHECK: UnaryOperator= [type=int] [typekind=Int] [isPOD=1]
// CHECK: DeclRefExpr=p:3:13 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: DeclStmt= [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: VarDecl=w:5:17 (Definition) [type=const FooType] [typekind=Elaborated] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: VarDecl=w:5:17 (Definition) [type=const FooType] [typekind=Typedef] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: TypeRef=FooType:1:13 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ReturnStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: BinaryOperator=+ [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: BinaryOperator=+ [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: DeclRefExpr=p:3:13 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
-// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:3:33 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ArraySubscriptExpr= [type=int] [typekind=Int] [isPOD=1]
// CHECK: UnexposedExpr=arr:3:40 [type=int[5]] [typekind=ConstantArray] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
@@ -64,10 +64,10 @@ _Atomic(unsigned long) aul;
// CHECK: VarDecl=x:10:38 [type=__attribute__((__vector_size__(4 * sizeof(int)))) int] [typekind=Vector] [isPOD=1]
// CHECK: TypedefDecl=int4_t:11:46 (Definition) [type=int4_t] [typekind=Typedef] [canonicaltype=__attribute__((__vector_size__(4 * sizeof(int)))) int] [canonicaltypekind=Vector] [isPOD=1]
// CHECK: ParmDecl=incompletearray:13:12 (Definition) [type=int[]] [typekind=IncompleteArray] [isPOD=1]
-// CHECK: FunctionDecl=elaboratedEnumType:15:25 [type=enum Enum ()] [typekind=FunctionNoProto] [canonicaltype=enum Enum ()] [canonicaltypekind=FunctionNoProto] [resulttype=enum Enum] [resulttypekind=Elaborated] [isPOD=0]
+// CHECK: FunctionDecl=elaboratedEnumType:15:25 [type=enum Enum ()] [typekind=FunctionNoProto] [canonicaltype=enum Enum ()] [canonicaltypekind=FunctionNoProto] [resulttype=enum Enum] [resulttypekind=Enum] [isPOD=0]
// CHECK: TypeRef=enum Enum:15:6 [type=enum Enum] [typekind=Enum] [isPOD=1]
// CHECK: StructDecl=Struct:16:8 (Definition) [type=struct Struct] [typekind=Record] [isPOD=1]
-// CHECK: FunctionDecl=elaboratedStructType:16:32 [type=struct Struct ()] [typekind=FunctionNoProto] [canonicaltype=struct Struct ()] [canonicaltypekind=FunctionNoProto] [resulttype=struct Struct] [resulttypekind=Elaborated] [isPOD=0]
+// CHECK: FunctionDecl=elaboratedStructType:16:32 [type=struct Struct ()] [typekind=FunctionNoProto] [canonicaltype=struct Struct ()] [canonicaltypekind=FunctionNoProto] [resulttype=struct Struct] [resulttypekind=Record] [isPOD=0]
// CHECK: TypeRef=struct Struct:16:8 [type=struct Struct] [typekind=Record] [isPOD=1]
// CHECK: StructDecl=struct (unnamed at {{.*}}):18:1 (Definition) [type=struct (unnamed at {{.*}}print-type.c:18:1)] [typekind=Record] [isPOD=1] [nbFields=2] [isAnon=1] [isAnonRecDecl=0]
// CHECK: StructDecl=struct (unnamed at {{.*}}):23:1 (Definition) [type=struct (unnamed at {{.*}}print-type.c:23:1)] [typekind=Record] [isPOD=1] [nbFields=1] [isAnon=1] [isAnonRecDecl=0]
diff --git a/clang/test/Index/print-type.cpp b/clang/test/Index/print-type.cpp
index 141895d..ef9805f 100644
--- a/clang/test/Index/print-type.cpp
+++ b/clang/test/Index/print-type.cpp
@@ -105,38 +105,38 @@ inline namespace InlineNS {}
// CHECK: Namespace=inner:14:11 (Definition) [type=] [typekind=Invalid] [isPOD=0]
// CHECK: StructDecl=Bar:16:8 (Definition) [type=outer::inner::Bar] [typekind=Record] [isPOD=0] [nbFields=3]
// CHECK: CXXConstructor=Bar:17:3 (Definition) (converting constructor) [type=void (outer::Foo<bool> *){{.*}}] [typekind=FunctionProto] [canonicaltype=void (outer::Foo<bool> *){{.*}}] [canonicaltypekind=FunctionProto] [resulttype=void] [resulttypekind=Void] [args= [outer::Foo<bool> *] [Pointer]] [isPOD=0]
-// CHECK: ParmDecl=foo:17:25 (Definition) [type=outer::Foo<bool> *] [typekind=Pointer] [canonicaltype=outer::Foo<bool> *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=outer::Foo<bool>] [pointeekind=Elaborated]
+// CHECK: ParmDecl=foo:17:25 (Definition) [type=outer::Foo<bool> *] [typekind=Pointer] [canonicaltype=outer::Foo<bool> *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=outer::Foo<bool>] [pointeekind=Unexposed]
// CHECK: NamespaceRef=outer:1:11 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: CompoundStmt= [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: TypedefDecl=FooType:19:15 (Definition) [type=outer::inner::Bar::FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypeAliasDecl=AliasType:20:9 (Definition) [type=outer::inner::Bar::AliasType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
+// CHECK: TypedefDecl=FooType:19:15 (Definition) [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypeAliasDecl=AliasType:20:9 (Definition) [type=AliasType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
// CHECK: FieldDecl=p:21:8 (Definition) [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
-// CHECK: CXXMethod=f:22:8 (Definition) [type=int *(int *, char *, FooType){{.*}}] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int){{.*}}] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Elaborated]] [isPOD=0]
+// CHECK: CXXMethod=f:22:8 (Definition) [type=int *(int *, char *, FooType){{.*}}] [typekind=FunctionProto] [canonicaltype=int *(int *, char *, int){{.*}}] [canonicaltypekind=FunctionProto] [resulttype=int *] [resulttypekind=Pointer] [args= [int *] [Pointer] [char *] [Pointer] [FooType] [Typedef]] [isPOD=0]
// CHECK: ParmDecl=p:22:15 (Definition) [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: ParmDecl=x:22:24 (Definition) [type=char *] [typekind=Pointer] [isPOD=1] [pointeetype=char] [pointeekind=Char_{{[US]}}]
-// CHECK: ParmDecl=z:22:35 (Definition) [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=outer::inner::Bar::FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: ParmDecl=z:22:35 (Definition) [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: CompoundStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: DeclStmt= [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: VarDecl=w:23:19 (Definition) [type=const FooType] [typekind=Elaborated] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=outer::inner::Bar::FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: VarDecl=w:23:19 (Definition) [type=const FooType] [typekind=Typedef] const [canonicaltype=const int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypeRef=outer::inner::Bar::FooType:19:15 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: ReturnStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: BinaryOperator=+ [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: UnexposedExpr=p:22:15 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
// CHECK: DeclRefExpr=p:22:15 [type=int *] [typekind=Pointer] [isPOD=1] [pointeetype=int] [pointeekind=Int]
-// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Elaborated] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
-// CHECK: TypedefDecl=OtherType:26:18 (Definition) [type=outer::inner::Bar::OtherType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
-// CHECK: TypedefDecl=ArrayType:27:15 (Definition) [type=outer::inner::Bar::ArrayType] [typekind=Typedef] [canonicaltype=int[5]] [canonicaltypekind=ConstantArray] [isPOD=1]
+// CHECK: UnexposedExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: DeclRefExpr=z:22:35 [type=FooType] [typekind=Typedef] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
+// CHECK: TypedefDecl=OtherType:26:18 (Definition) [type=OtherType] [typekind=Typedef] [canonicaltype=double] [canonicaltypekind=Double] [isPOD=1]
+// CHECK: TypedefDecl=ArrayType:27:15 (Definition) [type=ArrayType] [typekind=Typedef] [canonicaltype=int[5]] [canonicaltypekind=ConstantArray] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
-// CHECK: FieldDecl=baz:28:20 (Definition) [type=Baz<int, 1, Foo>] [typekind=Elaborated] [templateargs/3= [type=int] [typekind=Int]] [canonicaltype=outer::Baz<int, 1, outer::Foo>] [canonicaltypekind=Record] [canonicaltemplateargs/3= [type=int] [typekind=Int]] [isPOD=1]
+// CHECK: FieldDecl=baz:28:20 (Definition) [type=Baz<int, 1, Foo>] [typekind=Unexposed] [templateargs/3= [type=int] [typekind=Int]] [canonicaltype=outer::Baz<int, 1, outer::Foo>] [canonicaltypekind=Record] [canonicaltemplateargs/3= [type=int] [typekind=Int]] [isPOD=1]
// CHECK: TemplateRef=Baz:9:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: FieldDecl=qux:29:38 (Definition) [type=Qux<int, char *, Foo<int>, FooType>] [typekind=Elaborated] [templateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=Foo<int>] [typekind=Elaborated] [type=FooType] [typekind=Elaborated]] [canonicaltype=outer::Qux<int, char *, outer::Foo<int>, int>] [canonicaltypekind=Record] [canonicaltemplateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo<int>] [typekind=Record] [type=int] [typekind=Int]] [isPOD=1]
+// CHECK: FieldDecl=qux:29:38 (Definition) [type=Qux<int, char *, Foo<int>, FooType>] [typekind=Unexposed] [templateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=Foo<int>] [typekind=Unexposed] [type=FooType] [typekind=Typedef]] [canonicaltype=outer::Qux<int, char *, outer::Foo<int>, int>] [canonicaltypekind=Record] [canonicaltemplateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo<int>] [typekind=Record] [type=int] [typekind=Int]] [isPOD=1]
// CHECK: TemplateRef=Qux:12:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: FunctionTemplate=tbar:36:3 [type=T (int)] [typekind=FunctionProto] [canonicaltype=type-parameter-0-0 (int)] [canonicaltypekind=FunctionProto] [resulttype=T] [resulttypekind=Unexposed] [isPOD=0]
@@ -163,11 +163,11 @@ inline namespace InlineNS {}
// CHECK: DeclRefExpr=i:44:14 [type=int] [typekind=Int] [isPOD=1]
// CHECK: StructDecl=Blob:46:8 (Definition) [type=Blob] [typekind=Record] [isPOD=1] [nbFields=2]
// CHECK: FieldDecl=i:47:7 (Definition) [type=int] [typekind=Int] [isPOD=1]
-// CHECK: VarDecl=member_pointer:50:12 (Definition) [type=int Blob::*] [typekind=MemberPointer] [isPOD=1] [pointeetype=int] [pointeekind=Int] [isAnonRecDecl=0]
-// CHECK: FunctionDecl=elaboratedNamespaceType:52:42 [type=NS::Type (const NS::Type)] [typekind=FunctionProto] [canonicaltype=NS::Type (NS::Type)] [canonicaltypekind=FunctionProto] [resulttype=NS::Type] [resulttypekind=Elaborated] [args= [const NS::Type] [Elaborated]] [isPOD=0]
+// CHECK: VarDecl=member_pointer:50:12 (Definition) [type=int Blob::*] [typekind=MemberPointer] [canonicaltype=int Blob::*] [canonicaltypekind=MemberPointer] [isPOD=1] [pointeetype=int] [pointeekind=Int] [isAnonRecDecl=0]
+// CHECK: FunctionDecl=elaboratedNamespaceType:52:42 [type=NS::Type (const NS::Type)] [typekind=FunctionProto] [canonicaltype=NS::Type (NS::Type)] [canonicaltypekind=FunctionProto] [resulttype=NS::Type] [resulttypekind=Record] [args= [const NS::Type] [Record]] [isPOD=0]
// CHECK: NamespaceRef=NS:52:11 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TypeRef=struct NS::Type:52:23 [type=NS::Type] [typekind=Record] [isPOD=1]
-// CHECK: ParmDecl=t:52:81 (Definition) [type=const NS::Type] [typekind=Elaborated] const [canonicaltype=const NS::Type] [canonicaltypekind=Record] [isPOD=1]
+// CHECK: ParmDecl=t:52:81 (Definition) [type=const NS::Type] [typekind=Record] const [canonicaltype=const NS::Type] [canonicaltypekind=Record] [isPOD=1]
// CHECK: VarDecl=autoI:54:6 (Definition) [type=int] [typekind=Auto] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: VarDecl=autoTbar:55:6 (Definition) [type=int] [typekind=Auto] [canonicaltype=int] [canonicaltypekind=Int] [isPOD=1]
@@ -176,9 +176,9 @@ inline namespace InlineNS {}
// CHECK: DeclRefExpr=tbar:36:3 RefName=[55:17 - 55:21] RefName=[55:21 - 55:26] [type=int (int)] [typekind=FunctionProto] [canonicaltype=int (int)] [canonicaltypekind=FunctionProto] [isPOD=0]
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: VarDecl=autoBlob:56:6 (Definition) [type=Blob *] [typekind=Auto] [canonicaltype=Blob *] [canonicaltypekind=Pointer] [isPOD=1]
-// CHECK: CXXNewExpr= [type=Blob *] [typekind=Pointer] [canonicaltype=Blob *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=Blob] [pointeekind=Elaborated]
+// CHECK: CXXNewExpr= [type=Blob *] [typekind=Pointer] [canonicaltype=Blob *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=Blob] [pointeekind=Record]
// CHECK: TypeRef=struct Blob:46:8 [type=Blob] [typekind=Record] [isPOD=1] [nbFields=2]
-// CHECK: CallExpr=Blob:46:8 [type=Blob] [typekind=Elaborated] [canonicaltype=Blob] [canonicaltypekind=Record] [isPOD=1] [nbFields=2]
+// CHECK: CallExpr=Blob:46:8 [type=Blob] [typekind=Record] [canonicaltype=Blob] [canonicaltypekind=Record] [isPOD=1] [nbFields=2]
// CHECK: FunctionDecl=autoFunction:57:6 (Definition) [type=int ()] [typekind=FunctionProto] [canonicaltype=int ()] [canonicaltypekind=FunctionProto] [resulttype=int] [resulttypekind=Auto] [isPOD=0]
// CHECK: CompoundStmt= [type=] [typekind=Invalid] [isPOD=0]
// CHECK: ReturnStmt= [type=] [typekind=Invalid] [isPOD=0]
@@ -187,20 +187,20 @@ inline namespace InlineNS {}
// CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1]
// CHECK: TypeAliasTemplateDecl=TypeAlias:61:1 (Definition) [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateTypeParameter=T:60:20 (Definition) [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
-// CHECK: FieldDecl=foo:63:39 (Definition) [type=TypeAlias<int>] [typekind=Elaborated] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=outer::Qux<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=1]
+// CHECK: FieldDecl=foo:63:39 (Definition) [type=TypeAlias<int>] [typekind=Unexposed] [templateargs/1= [type=int] [typekind=Int]] [canonicaltype=outer::Qux<int>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=int] [typekind=Int]] [isPOD=1]
// CHECK: TemplateRef=TypeAlias:61:1 [type=] [typekind=Invalid] [isPOD=0]
// CHECK: ClassTemplate=Specialization:66:8 (Definition) [type=] [typekind=Invalid] [isPOD=0]
// CHECK: TemplateTypeParameter=T:65:19 (Definition) [type=T] [typekind=Unexposed] [canonicaltype=type-parameter-0-0] [canonicaltypekind=Unexposed] [isPOD=0]
// CHECK: StructDecl=Specialization:69:8 [Specialization of Specialization:66:8] [Template arg 0: kind: 1, type: int] [type=Specialization<int>] [typekind=Record] [templateargs/1= [type=int] [typekind=Int]] [isPOD=0]
-// CHECK: VarDecl=templRefParam:71:40 (Definition) [type=Specialization<Specialization<bool> &>] [typekind=Elaborated] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
+// CHECK: VarDecl=templRefParam:71:40 (Definition) [type=Specialization<Specialization<bool> &>] [typekind=Unexposed] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
// CHECK: TemplateRef=Specialization:66:8 [type=] [typekind=Invalid] [isPOD=0]
-// CHECK: CallExpr=Specialization:66:8 [type=Specialization<Specialization<bool> &>] [typekind=Elaborated] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
+// CHECK: CallExpr=Specialization:66:8 [type=Specialization<Specialization<bool> &>] [typekind=Unexposed] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
// CHECK: VarDecl=autoTemplRefParam:72:6 (Definition) [type=Specialization<Specialization<bool> &>] [typekind=Auto] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
-// CHECK: UnexposedExpr=templRefParam:71:40 [type=const Specialization<Specialization<bool> &>] [typekind=Elaborated] const [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=const Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1] [isAnonRecDecl=0]
-// CHECK: DeclRefExpr=templRefParam:71:40 [type=Specialization<Specialization<bool> &>] [typekind=Elaborated] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
-// CHECK: TypeAliasDecl=baz:76:7 (Definition) [type=baz] [typekind=Typedef] [templateargs/1= [type=A<void>] [typekind=Elaborated]] [canonicaltype=A<void>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=void] [typekind=Void]] [isPOD=0]
+// CHECK: UnexposedExpr=templRefParam:71:40 [type=const Specialization<Specialization<bool> &>] [typekind=Record] const [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=const Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1] [isAnonRecDecl=0]
+// CHECK: DeclRefExpr=templRefParam:71:40 [type=Specialization<Specialization<bool> &>] [typekind=Unexposed] [templateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [canonicaltype=Specialization<Specialization<bool> &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization<bool> &] [typekind=LValueReference]] [isPOD=1]
+// CHECK: TypeAliasDecl=baz:76:7 (Definition) [type=baz] [typekind=Typedef] [templateargs/1= [type=A<void>] [typekind=Unexposed]] [canonicaltype=A<void>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=void] [typekind=Void]] [isPOD=0]
// CHECK: VarDecl=autoTemplPointer:78:6 (Definition) [type=Specialization<Specialization<bool> &> *] [typekind=Auto] [canonicaltype=Specialization<Specialization<bool> &> *] [canonicaltypekind=Pointer] [isPOD=1] [pointeetype=Specialization<Specialization<bool> &>] [pointeekind=Auto]
-// CHECK: CallExpr=Bar:17:3 [type=outer::inner::Bar] [typekind=Elaborated] [canonicaltype=outer::inner::Bar] [canonicaltypekind=Record] [args= [outer::Foo<bool> *] [Pointer]] [isPOD=0] [nbFields=3]
+// CHECK: CallExpr=Bar:17:3 [type=outer::inner::Bar] [typekind=Record] [canonicaltype=outer::inner::Bar] [canonicaltypekind=Record] [args= [outer::Foo<bool> *] [Pointer]] [isPOD=0] [nbFields=3]
// CHECK: StructDecl=(anonymous struct at {{.*}}):84:3 (Definition) [type=X::(anonymous struct at {{.*}}print-type.cpp:84:3)] [typekind=Record] [isPOD=1] [nbFields=1] [isAnon=1]
// CHECK: ClassDecl=(anonymous class at {{.*}}:85:3 (Definition) [type=X::(anonymous class at {{.*}}print-type.cpp:85:3)] [typekind=Record] [isPOD=1] [nbFields=1] [isAnon=1]
// CHECK: UnionDecl=(anonymous union at {{.*}}:86:3 (Definition) [type=X::(anonymous union at {{.*}}print-type.cpp:86:3)] [typekind=Record] [isPOD=1] [nbFields=2] [isAnon=1]
diff --git a/clang/test/Index/recursive-cxx-member-calls.cpp b/clang/test/Index/recursive-cxx-member-calls.cpp
index 11c011a..b378975 100644
--- a/clang/test/Index/recursive-cxx-member-calls.cpp
+++ b/clang/test/Index/recursive-cxx-member-calls.cpp
@@ -402,7 +402,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK-tokens: Identifier: "getKind" [33:17 - 33:24] CXXMethod=getKind:33:17 (static)
// CHECK-tokens: Punctuation: "(" [33:24 - 33:25] CXXMethod=getKind:33:17 (static)
// CHECK-tokens: Keyword: "const" [33:25 - 33:30] ParmDecl=Name:33:48 (Definition)
-// CHECK-tokens: Identifier: "IdentifierInfo" [33:31 - 33:45] TypeRef=class clang::IdentifierInfo:66:7
+// CHECK-tokens: Identifier: "IdentifierInfo" [33:31 - 33:45] TypeRef=class clang::IdentifierInfo:11:9
// CHECK-tokens: Punctuation: "*" [33:46 - 33:47] ParmDecl=Name:33:48 (Definition)
// CHECK-tokens: Identifier: "Name" [33:48 - 33:52] ParmDecl=Name:33:48 (Definition)
// CHECK-tokens: Punctuation: ")" [33:52 - 33:53] CXXMethod=getKind:33:17 (static)
@@ -823,18 +823,18 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK-tokens: Punctuation: ";" [85:18 - 85:19] ClassTemplate=StringSwitch:83:47 (Definition)
// CHECK-tokens: Keyword: "public" [86:1 - 86:7] CXXAccessSpecifier=:86:1 (Definition)
// CHECK-tokens: Punctuation: ":" [86:7 - 86:8] CXXAccessSpecifier=:86:1 (Definition)
-// CHECK-tokens: Keyword: "explicit" [87:3 - 87:11] CXXConstructor=StringSwitch<T, R>:87:12 (Definition)
-// CHECK-tokens: Identifier: "StringSwitch" [87:12 - 87:24] CXXConstructor=StringSwitch<T, R>:87:12 (Definition) (explicit)
-// CHECK-tokens: Punctuation: "(" [87:24 - 87:25] CXXConstructor=StringSwitch<T, R>:87:12 (Definition)
+// CHECK-tokens: Keyword: "explicit" [87:3 - 87:11] CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition)
+// CHECK-tokens: Identifier: "StringSwitch" [87:12 - 87:24] CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition) (explicit)
+// CHECK-tokens: Punctuation: "(" [87:24 - 87:25] CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition)
// CHECK-tokens: Identifier: "StringRef" [87:25 - 87:34] TypeRef=class llvm::StringRef:38:7
// CHECK-tokens: Identifier: "Str" [87:35 - 87:38] ParmDecl=Str:87:35 (Definition)
-// CHECK-tokens: Punctuation: ")" [87:38 - 87:39] CXXConstructor=StringSwitch<T, R>:87:12 (Definition)
-// CHECK-tokens: Punctuation: ":" [87:40 - 87:41] CXXConstructor=StringSwitch<T, R>:87:12 (Definition)
+// CHECK-tokens: Punctuation: ")" [87:38 - 87:39] CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition)
+// CHECK-tokens: Punctuation: ":" [87:40 - 87:41] CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition)
// CHECK-tokens: Identifier: "Str" [87:42 - 87:45] MemberRef=Str:84:13
// CHECK-tokens: Punctuation: "(" [87:45 - 87:46] CallExpr=StringRef:38:7
// CHECK-tokens: Identifier: "Str" [87:46 - 87:49] DeclRefExpr=Str:87:35
// CHECK-tokens: Punctuation: ")" [87:49 - 87:50] CallExpr=StringRef:38:7
-// CHECK-tokens: Punctuation: "," [87:50 - 87:51] CXXConstructor=StringSwitch<T, R>:87:12 (Definition)
+// CHECK-tokens: Punctuation: "," [87:50 - 87:51] CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition)
// CHECK-tokens: Identifier: "Result" [87:52 - 87:58] MemberRef=Result:85:12
// CHECK-tokens: Punctuation: "(" [87:58 - 87:59] UnexposedExpr=
// CHECK-tokens: Literal: "0" [87:59 - 87:60] IntegerLiteral=
@@ -846,7 +846,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK-tokens: Keyword: "unsigned" [88:14 - 88:22] NonTypeTemplateParameter=N:88:23 (Definition)
// CHECK-tokens: Identifier: "N" [88:23 - 88:24] NonTypeTemplateParameter=N:88:23 (Definition)
// CHECK-tokens: Punctuation: ">" [88:25 - 88:26] FunctionTemplate=Case:88:42 (Definition)
-// CHECK-tokens: Identifier: "StringSwitch" [88:27 - 88:39] TypeRef=StringSwitch<T, R>:83:47
+// CHECK-tokens: Identifier: "StringSwitch" [88:27 - 88:39] TypeRef=llvm::StringSwitch<T, R>:83:47
// CHECK-tokens: Punctuation: "&" [88:40 - 88:41] FunctionTemplate=Case:88:42 (Definition)
// CHECK-tokens: Identifier: "Case" [88:42 - 88:46] FunctionTemplate=Case:88:42 (Definition)
// CHECK-tokens: Punctuation: "(" [88:46 - 88:47] FunctionTemplate=Case:88:42 (Definition)
@@ -1619,7 +1619,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK: 33:17: CXXMethod=getKind:33:17 (static) Extent=[33:5 - 33:53]
// CHECK: 33:12: TypeRef=enum clang::AttributeList::Kind:13:10 Extent=[33:12 - 33:16]
// CHECK: 33:48: ParmDecl=Name:33:48 (Definition) Extent=[33:25 - 33:52]
-// CHECK: 33:31: TypeRef=class clang::IdentifierInfo:66:7 Extent=[33:31 - 33:45]
+// CHECK: 33:31: TypeRef=class clang::IdentifierInfo:11:9 Extent=[33:31 - 33:45]
// CHECK: 36:8: FunctionDecl=magic_length:36:8 Extent=[36:1 - 36:35]
// CHECK: 36:1: TypeRef=size_t:2:25 Extent=[36:1 - 36:7]
// CHECK: 36:33: ParmDecl=s:36:33 (Definition) Extent=[36:21 - 36:34]
@@ -1839,7 +1839,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) {
// CHECK: 84:3: TypeRef=class llvm::StringRef:38:7 Extent=[84:3 - 84:12]
// CHECK: 85:12: FieldDecl=Result:85:12 (Definition) Extent=[85:3 - 85:18]
// CHECK: 86:1: CXXAccessSpecifier=:86:1 (Definition) Extent=[86:1 - 86:8]
-// CHECK: 87:12: CXXConstructor=StringSwitch<T, R>:87:12 (Definition) (explicit) Extent=[87:3 - 87:64]
+// CHECK: 87:12: CXXConstructor=llvm::StringSwitch<T, R>:87:12 (Definition) (explicit) Extent=[87:3 - 87:64]
// CHECK: 87:35: ParmDecl=Str:87:35 (Definition) Extent=[87:25 - 87:38]
// CHECK: 87:25: TypeRef=class llvm::StringRef:38:7 Extent=[87:25 - 87:34]
// CHECK: 87:42: MemberRef=Str:84:13 Extent=[87:42 - 87:45]
diff --git a/clang/test/Index/redeclarations.cpp b/clang/test/Index/redeclarations.cpp
index 11dc932..ea5e970 100644
--- a/clang/test/Index/redeclarations.cpp
+++ b/clang/test/Index/redeclarations.cpp
@@ -17,5 +17,5 @@ class A
// CHECK: redeclarations.h:19:19: FieldDecl=x:19:19 (Definition) Extent=[19:5 - 19:20]
// CHECK: redeclarations.h:19:5: TemplateRef=B:8:7 Extent=[19:5 - 19:6]
// CHECK: redeclarations.h:19:7: TypeRef=class D:17:7 Extent=[19:7 - 19:8]
-// CHECK: redeclarations.h:19:16: TypeRef=class A:3:7 Extent=[19:16 - 19:17]
+// CHECK: redeclarations.h:19:16: TypeRef=class A:19:16 Extent=[19:16 - 19:17]
// CHECK: redeclarations.cpp:3:7: ClassDecl=A:3:7 (Definition) Extent=[3:1 - 5:2]
diff --git a/clang/test/Index/skip-parsed-bodies/compile_commands.json b/clang/test/Index/skip-parsed-bodies/compile_commands.json
index 991227a8..e087a28 100644
--- a/clang/test/Index/skip-parsed-bodies/compile_commands.json
+++ b/clang/test/Index/skip-parsed-bodies/compile_commands.json
@@ -25,8 +25,8 @@
// CHECK-NEXT: [indexEntityReference]: kind: variable | name: some_val | {{.*}} | loc: .{{/|\\\\?}}t.h:9:27
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 0 | isDef: 0 | isContainer: 0
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 1 | isDef: 1 | isContainer: 1
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: c++-class | name: C |
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: variable | name: some_val | {{.*}} | loc: .{{/|\\\\?}}t.h:15:5
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: 1
// CHECK-NEXT: [indexEntityReference]: kind: variable | name: some_val | {{.*}} | loc: .{{/|\\\\?}}t.h:19:5
@@ -40,8 +40,8 @@
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 0 | isDef: 0 | isContainer: 0
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isContainer: skipped
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: c++-class | name: C |
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [ppIncludedFile]: .{{/|\\\\?}}pragma_once.h
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo2 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: 1
@@ -60,8 +60,8 @@
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 0 | isDef: 0 | isContainer: 0
// CHECK-NEXT: [indexDeclaration]: kind: c++-instance-method | name: method_def2 | {{.*}} | isRedecl: 1 | isDef: 1 | isContainer: skipped
-// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexEntityReference]: kind: c++-class | name: C |
+// CHECK-NEXT: [indexEntityReference]: kind: namespace | name: NS |
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo1 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
// CHECK-NEXT: [ppIncludedFile]: .{{/|\\\\?}}pragma_once.h
// CHECK-NEXT: [indexDeclaration]: kind: function | name: foo2 | {{.*}} | isRedecl: 0 | isDef: 1 | isContainer: skipped
diff --git a/clang/test/Interpreter/simple-exception.cpp b/clang/test/Interpreter/simple-exception.cpp
index 651e8d9..8f7b515 100644
--- a/clang/test/Interpreter/simple-exception.cpp
+++ b/clang/test/Interpreter/simple-exception.cpp
@@ -1,7 +1,7 @@
// clang-format off
// UNSUPPORTED: system-aix
// XFAIL for arm, or running on Windows.
-// XFAIL: target=arm-{{.*}}, target=armv{{.*}}, system-windows
+// XFAIL: target=arm-{{.*}}, target=armv{{.*}}, system-windows, system-cygwin
// RUN: cat %s | clang-repl | FileCheck %s
// Incompatible with msan. It passes with -O3 but fail -Oz. Interpreter
diff --git a/clang/test/Lexer/cross-windows-on-linux.cpp b/clang/test/Lexer/cross-windows-on-linux.cpp
index 3932ffc..ece16b1 100644
--- a/clang/test/Lexer/cross-windows-on-linux.cpp
+++ b/clang/test/Lexer/cross-windows-on-linux.cpp
@@ -10,4 +10,4 @@
// on non-Windows unless -fms-extensions is passed. It won't fail in this way on
// Windows because the filesystem will interpret the backslash as a directory
// separator.
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
diff --git a/clang/test/Lexer/has_feature_cfi.c b/clang/test/Lexer/has_feature_cfi.c
new file mode 100644
index 0000000..a4e5803
--- /dev/null
+++ b/clang/test/Lexer/has_feature_cfi.c
@@ -0,0 +1,87 @@
+// REQUIRES: target={{x86_64.*-linux.*}}
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi -fsanitize-cfi-cross-dso -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi -fno-sanitize=cfi-nvcall,cfi-vcall,cfi-mfcall,cfi-icall -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI
+// CHECK-CFI: CFISanitizerEnabled
+
+// RUN: %clang -E -c %s -o - | FileCheck %s --check-prefix=CHECK-NO-CFI
+// CHECK-NO-CFI: CFISanitizerDisabled
+
+// RUN: %clang -E -fsanitize=kcfi -c %s -o - | FileCheck %s --check-prefixes=CHECK-KCFI,CHECK-NO-CFI
+// CHECK-KCFI: KCFISanitizerEnabled
+
+// RUN: %clang -E -fsanitize=cfi-cast-strict -c %s -o - | FileCheck %s --check-prefix=CHECK-CFI-CAST-STRICT
+// CHECK-CFI-CAST-STRICT: CFICastStrictSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-derived-cast -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-DERIVED-CAST
+// CHECK-CFI-DERIVED-CAST: CFIDerivedCastSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-icall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-ICALL
+// CHECK-CFI-ICALL: CFIICallSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-mfcall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-MFCALL
+// CHECK-CFI-MFCALL: CFIMFCallSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-unrelated-cast -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-UNRELATED-CAST
+// CHECK-CFI-UNRELATED-CAST: CFIUnrelatedCastSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-nvcall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-NVCALL
+// CHECK-CFI-NVCALL: CFINVCallSanitizerEnabled
+
+// RUN: %clang -E -fvisibility=hidden -flto -fno-sanitize-ignorelist -fsanitize=cfi-vcall -c %s -o - | FileCheck %s --check-prefixes=CHECK-CFI,CHECK-CFI-VCALL
+// CHECK-CFI-VCALL: CFIVCallSanitizerEnabled
+
+#if __has_feature(cfi_sanitizer)
+int CFISanitizerEnabled();
+#else
+int CFISanitizerDisabled();
+#endif
+
+#if __has_feature(kcfi)
+int KCFISanitizerEnabled();
+#else
+int KCFISanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_cast_strict_sanitizer)
+int CFICastStrictSanitizerEnabled();
+#else
+int CFICastStrictSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_derived_cast_sanitizer)
+int CFIDerivedCastSanitizerEnabled();
+#else
+int CFIDerivedCastSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_icall_sanitizer)
+int CFIICallSanitizerEnabled();
+#else
+int CFIICallSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_mfcall_sanitizer)
+int CFIMFCallSanitizerEnabled();
+#else
+int CFIMFCallSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_unrelated_cast_sanitizer)
+int CFIUnrelatedCastSanitizerEnabled();
+#else
+int CFIUnrelatedCastSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_nvcall_sanitizer)
+int CFINVCallSanitizerEnabled();
+#else
+int CFINVCallSanitizerDisabled();
+#endif
+
+#if __has_feature(cfi_vcall_sanitizer)
+int CFIVCallSanitizerEnabled();
+#else
+int CFIVCallSanitizerDisabled();
+#endif
diff --git a/clang/test/Misc/diag-template-diffing-cxx11.cpp b/clang/test/Misc/diag-template-diffing-cxx11.cpp
index c62bffe..0b145475 100644
--- a/clang/test/Misc/diag-template-diffing-cxx11.cpp
+++ b/clang/test/Misc/diag-template-diffing-cxx11.cpp
@@ -24,17 +24,17 @@ namespace std {
}
} // end namespace std
// CHECK-ELIDE-NOTREE: no matching function for call to 'f'
-// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::string>' to 'vector<string>' for 1st argument
+// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::basic_string>' to 'vector<versa_string>' for 1st argument
// CHECK-NOELIDE-NOTREE: no matching function for call to 'f'
-// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::string>' to 'vector<string>' for 1st argument
+// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector<std::basic_string>' to 'vector<versa_string>' for 1st argument
// CHECK-ELIDE-TREE: no matching function for call to 'f'
// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument
// CHECK-ELIDE-TREE: vector<
-// CHECK-ELIDE-TREE: [std::string != string]>
+// CHECK-ELIDE-TREE: [std::basic_string != versa_string]>
// CHECK-NOELIDE-TREE: no matching function for call to 'f'
// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument
// CHECK-NOELIDE-TREE: vector<
-// CHECK-NOELIDE-TREE: [std::string != string]>
+// CHECK-NOELIDE-TREE: [std::basic_string != versa_string]>
template <int... A>
class I1{};
diff --git a/clang/test/Modules/modules-merge-enum.m b/clang/test/Modules/modules-merge-enum.m
index f1010c1..fc07c46 100644
--- a/clang/test/Modules/modules-merge-enum.m
+++ b/clang/test/Modules/modules-merge-enum.m
@@ -18,9 +18,8 @@ typedef enum MyEnum1 { MyVal_A } MyEnum1;
// CHECK-NEXT: | |-also in ModB
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} imported in ModA.ModAFile1 referenced MyVal_A 'int'
// CHECK-NEXT: |-TypedefDecl 0x{{.*}} imported in ModA.ModAFile1 hidden MyEnum1 'enum MyEnum1'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum1' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
enum MyEnum2 { MyVal_B };
@@ -33,10 +32,9 @@ typedef enum { MyVal_C } MyEnum3;
// CHECK: |-EnumDecl 0x{{.*}} imported in ModA.ModAFile1 <undeserialized declarations>
// CHECK-NEXT: | |-also in ModB
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} imported in ModA.ModAFile1 referenced MyVal_C 'int'
-// CHECK-NEXT: |-TypedefDecl 0x{{.*}} imported in ModA.ModAFile1 hidden MyEnum3 'enum MyEnum3':'MyEnum3'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum3' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'MyEnum3' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}}
+// CHECK-NEXT: |-TypedefDecl 0x{{.*}} imported in ModA.ModAFile1 hidden MyEnum3 'enum MyEnum3'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum3' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}}
struct MyStruct {
enum MyEnum5 { MyVal_D } Field;
@@ -57,17 +55,15 @@ enum { MyVal_E };
// Redeclarations coming from ModB.
// CHECK: |-TypedefDecl 0x{{.*}} prev 0x{{.*}} imported in ModB MyEnum1 'enum MyEnum1'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum1' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum1' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}} 'MyEnum1'
// CHECK: |-EnumDecl 0x{{.*}} prev 0x{{.*}} imported in ModB <undeserialized declarations>
// CHECK-NEXT: | |-also in ModB
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} imported in ModB MyVal_C 'int'
-// CHECK-NEXT: |-TypedefDecl 0x{{.*}} prev 0x{{.*}} imported in ModB MyEnum3 'enum MyEnum3':'MyEnum3'
-// CHECK-NEXT: | `-ElaboratedType 0x{{.*}} 'enum MyEnum3' sugar imported
-// CHECK-NEXT: | `-EnumType 0x{{.*}} 'MyEnum3' imported
-// CHECK-NEXT: | `-Enum 0x{{.*}}
+// CHECK-NEXT: |-TypedefDecl 0x{{.*}} prev 0x{{.*}} imported in ModB MyEnum3 'enum MyEnum3'
+// CHECK-NEXT: | `-EnumType 0x{{.*}} 'enum MyEnum3' imported
+// CHECK-NEXT: | `-Enum 0x{{.*}}
// CHECK: |-EnumDecl 0x{{.*}} imported in ModB <undeserialized declarations>
// CHECK-NEXT: | `-EnumConstantDecl 0x{{.*}} first 0x{{.*}} imported in ModB referenced MyVal_E 'int'
diff --git a/clang/test/Modules/odr_hash.cpp b/clang/test/Modules/odr_hash.cpp
index 8ef53e3..f22f3c7 100644
--- a/clang/test/Modules/odr_hash.cpp
+++ b/clang/test/Modules/odr_hash.cpp
@@ -1314,7 +1314,7 @@ class S1 {
#else
template<class T>
using U1 = S1<T>;
-// expected-error@first.h:* {{'DependentType::S1::x' from module 'FirstModule' is not present in definition of 'S1<T>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'DependentType::S1::x' from module 'FirstModule' is not present in definition of 'DependentType::S1<T>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
#endif
@@ -2343,7 +2343,7 @@ struct S1 {
};
#else
using TemplateTypeParmType::S1;
-// expected-error@first.h:* {{'TemplateTypeParmType::S1::x' from module 'FirstModule' is not present in definition of 'S1<T1, T2>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'TemplateTypeParmType::S1::x' from module 'FirstModule' is not present in definition of 'TemplateTypeParmType::S1<T1, T2>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
#endif
@@ -2365,9 +2365,9 @@ class S2 {
};
#else
using TemplateTypeParmType::S2;
-// expected-error@first.h:* {{'TemplateTypeParmType::S2::x' from module 'FirstModule' is not present in definition of 'S2<T, U>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'TemplateTypeParmType::S2::x' from module 'FirstModule' is not present in definition of 'TemplateTypeParmType::S2<T, U>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
-// expected-error@first.h:* {{'TemplateTypeParmType::S2::type' from module 'FirstModule' is not present in definition of 'S2<T, U>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'TemplateTypeParmType::S2::type' from module 'FirstModule' is not present in definition of 'TemplateTypeParmType::S2<T, U>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'type' does not match}}
#endif
@@ -4020,7 +4020,7 @@ struct Valid {
};
#else
Invalid::L2<1>::L3<1> invalid;
-// expected-error@second.h:* {{'Types::InjectedClassName::Invalid::L2::L3::x' from module 'SecondModule' is not present in definition of 'L3<value-parameter-1-0>' in module 'FirstModule'}}
+// expected-error@second.h:* {{'Types::InjectedClassName::Invalid::L2::L3::x' from module 'SecondModule' is not present in definition of 'Types::InjectedClassName::Invalid::L2::L3<value-parameter-1-0>' in module 'FirstModule'}}
// expected-note@first.h:* {{declaration of 'x' does not match}}
Valid::L2<1>::L3<1> valid;
#endif
@@ -4291,7 +4291,7 @@ struct Valid {
};
#else
template <class T> using I = Invalid<T>;
-// expected-error@first.h:* {{'Types::UnresolvedUsing::Invalid::x' from module 'FirstModule' is not present in definition of 'Invalid<T>' in module 'SecondModule'}}
+// expected-error@first.h:* {{'Types::UnresolvedUsing::Invalid::x' from module 'FirstModule' is not present in definition of 'Types::UnresolvedUsing::Invalid<T>' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
template <class T> using V = Valid<T>;
diff --git a/clang/test/Modules/pr97313.cppm b/clang/test/Modules/pr97313.cppm
index 32c7112..99795d6 100644
--- a/clang/test/Modules/pr97313.cppm
+++ b/clang/test/Modules/pr97313.cppm
@@ -1,4 +1,4 @@
-// REQUIRES: !system-windows
+// REQUIRES: !system-windows, !system-cygwin
//
// RUN: rm -rf %t
// RUN: mkdir -p %t
diff --git a/clang/test/OpenMP/allocate_modifiers_messages.cpp b/clang/test/OpenMP/allocate_modifiers_messages.cpp
index 6867e78..83314a5 100644
--- a/clang/test/OpenMP/allocate_modifiers_messages.cpp
+++ b/clang/test/OpenMP/allocate_modifiers_messages.cpp
@@ -88,7 +88,7 @@ int main() {
// expected-warning@+2 {{extra tokens at the end of '#pragma omp scope' are ignored}}
// expected-note@+1 {{to match this '('}}
#pragma omp scope private(a,b,c) allocate(allocator(omp_const_mem_alloc):c:b;a)
- // expected-error@+1 {{initializing 'const omp_allocator_handle_t' with an expression of incompatible type 'int'}}
+ // expected-error@+1 {{initializing 'const omp_allocator_handle_t' (aka 'const enum omp_allocator_handle_t') with an expression of incompatible type 'int'}}
#pragma omp scope private(c,a,b) allocate(allocator(myAlloc()):a,b,c)
// expected-error@+2 {{missing ':' after allocate clause modifier}}
// expected-error@+1 {{expected expression}}
diff --git a/clang/test/OpenMP/bug54082.c b/clang/test/OpenMP/bug54082.c
index da32be2..bda4bd2 100644
--- a/clang/test/OpenMP/bug54082.c
+++ b/clang/test/OpenMP/bug54082.c
@@ -68,14 +68,14 @@ void foo() {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[X_TRAITS:%.*]] = alloca [1 x %struct.omp_alloctrait_t], align 16
// CHECK-NEXT: [[X_ALLOC:%.*]] = alloca i64, align 8
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X_TRAITS]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X_TRAITS]]) #[[ATTR5:[0-9]+]]
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) [[X_TRAITS]], ptr noundef nonnull align 16 dereferenceable(16) @__const.foo.x_traits, i64 16, i1 false)
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
// CHECK-NEXT: [[CALL:%.*]] = call i64 @omp_init_allocator(i64 noundef 0, i32 noundef 1, ptr noundef nonnull [[X_TRAITS]]) #[[ATTR5]]
// CHECK-NEXT: store i64 [[CALL]], ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3:![0-9]+]]
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @[[GLOB2:[0-9]+]], i32 1, ptr nonnull @foo.omp_outlined, ptr nonnull [[X_ALLOC]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X_TRAITS]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X_TRAITS]]) #[[ATTR5]]
// CHECK-NEXT: ret void
//
//
@@ -86,13 +86,13 @@ void foo() {
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
// CHECK-NEXT: store i32 1023, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA7]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA7]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA7]]
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA7]]
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3]]
@@ -106,9 +106,9 @@ void foo() {
// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3]]
// CHECK-NEXT: [[CONV5:%.*]] = inttoptr i64 [[TMP3]] to ptr
// CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTX__VOID_ADDR]], ptr [[CONV5]])
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_IS_LAST]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_STRIDE]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_UB]]) #[[ATTR5]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[DOTOMP_LB]]) #[[ATTR5]]
// CHECK-NEXT: ret void
//
diff --git a/clang/test/OpenMP/bug56913.c b/clang/test/OpenMP/bug56913.c
index cc72316..fad9e17 100644
--- a/clang/test/OpenMP/bug56913.c
+++ b/clang/test/OpenMP/bug56913.c
@@ -20,12 +20,12 @@ void loop(int n) {
// CHECK-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK: simd.if.then:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @j, align 4, !tbaa [[TBAA2:![0-9]+]]
-// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[J]]) #[[ATTR2:[0-9]+]]
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[J]]) #[[ATTR2:[0-9]+]]
// CHECK-NEXT: store ptr [[J]], ptr @u, align 8, !tbaa [[TBAA6:![0-9]+]], !llvm.access.group [[ACC_GRP8:![0-9]+]]
// CHECK-NEXT: [[INC_LE:%.*]] = add i32 [[TMP0]], [[N]]
// CHECK-NEXT: store i32 [[INC_LE]], ptr [[J]], align 4, !tbaa [[TBAA2]]
// CHECK-NEXT: store i32 [[INC_LE]], ptr @j, align 4, !tbaa [[TBAA2]]
-// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[J]]) #[[ATTR2]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[J]]) #[[ATTR2]]
// CHECK-NEXT: br label [[SIMD_IF_END]]
// CHECK: simd.if.end:
// CHECK-NEXT: ret void
diff --git a/clang/test/OpenMP/bug57757.cpp b/clang/test/OpenMP/bug57757.cpp
index eabf233..caf53a5b 100644
--- a/clang/test/OpenMP/bug57757.cpp
+++ b/clang/test/OpenMP/bug57757.cpp
@@ -46,7 +46,7 @@ void foo() {
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 52
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 48
// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP5]], align 8, !tbaa [[TBAA19:![0-9]+]], !noalias [[META13]]
-// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4, !tbaa [[TBAA16]], !noalias [[META13]]
+// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 8, !tbaa [[TBAA16]], !noalias [[META13]]
// CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP6]], align 4, !tbaa [[TBAA20:![0-9]+]], !noalias [[META13]]
// CHECK-NEXT: tail call void [[TMP8]](i32 noundef [[TMP9]], float noundef [[TMP10]]) #[[ATTR2:[0-9]+]], !noalias [[META13]]
// CHECK-NEXT: br label [[DOTOMP_OUTLINED__EXIT]]
diff --git a/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp b/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp
index 232cfda..20e344f 100644
--- a/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp
+++ b/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp
@@ -71,16 +71,16 @@ void test() {
// CHECK1-NEXT: [[ISTART:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[IEND:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[PARTIAL_SUM:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 8)
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[IB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -110,13 +110,13 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[IB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA19:![0-9]+]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP2]], align 4, !tbaa [[TBAA19]]
// CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP2]]) #[[ATTR11:[0-9]+]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP8]], 4
// CHECK1-NEXT: store i32 [[MUL3]], ptr [[ISTART]], align 4, !tbaa [[TBAA15]]
@@ -143,12 +143,12 @@ void test() {
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP1]])
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[PARTIAL_SUM]], i64 8)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[IEND]], i64 4)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[ISTART]], i64 4)
@@ -205,14 +205,14 @@ void test() {
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ISTART_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[IEND_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PARTIAL_SUM_ADDR]], align 8, !tbaa [[TBAA23]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP5]], [[TMP6]]
@@ -221,33 +221,33 @@ void test() {
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP7]], ptr [[I]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA19]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP6]], align 4, !tbaa [[TBAA19]]
// CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP6]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP6]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I7]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3:[0-9]+]], i32 [[TMP12]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -291,20 +291,20 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP23]], 1
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD13]], ptr [[I7]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP14]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP24]] to float
// CHECK1-NEXT: store float [[CONV]], ptr [[REF_TMP15]], align 4, !tbaa [[TBAA19]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP25]] to float
// CHECK1-NEXT: store float [[CONV17]], ptr [[REF_TMP16]], align 4, !tbaa [[TBAA19]]
// CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[REF_TMP14]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP15]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP16]]) #[[ATTR11]]
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) ptr @_ZNSt7complexIfEpLIfEERS0_RKS_IT_E(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]], ptr nonnull align 4 dereferenceable(8) [[REF_TMP14]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP16]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP15]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
@@ -338,18 +338,18 @@ void test() {
// CHECK1-NEXT: [[CALL21:%.*]] = call nonnull align 4 dereferenceable(8) ptr @_ZNSt7complexIfEpLIfEERS0_RKS_IT_E(ptr nonnull align 4 dereferenceable(8) [[TMP2]], ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]]) #[[ATTR11]]
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK1: .omp.reduction.done:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
@@ -563,16 +563,16 @@ void test() {
// CHECK1-NEXT: [[ISTART:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[IEND:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[PARTIAL_SUM:%.*]] = call align 16 ptr @__kmpc_alloc_shared(i64 16)
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[IB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -602,13 +602,13 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], ptr [[IB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA36:![0-9]+]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP2]], align 8, !tbaa [[TBAA36]]
// CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP2]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP8]], 4
// CHECK1-NEXT: store i32 [[MUL3]], ptr [[ISTART]], align 4, !tbaa [[TBAA15]]
@@ -635,12 +635,12 @@ void test() {
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP1]])
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[IB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[IB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[PARTIAL_SUM]], i64 16)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[IEND]], i64 4)
// CHECK1-NEXT: call void @__kmpc_free_shared(ptr [[ISTART]], i64 4)
@@ -697,14 +697,14 @@ void test() {
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ISTART_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[IEND_ADDR]], align 8, !tbaa [[TBAA17]]
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PARTIAL_SUM_ADDR]], align 8, !tbaa [[TBAA38]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP5]], [[TMP6]]
@@ -713,33 +713,33 @@ void test() {
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP7]], ptr [[I]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA36]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP6]], align 8, !tbaa [[TBAA36]]
// CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP6]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP6]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP6]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[I7]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3]], i32 [[TMP12]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
@@ -783,20 +783,20 @@ void test() {
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP23]], 1
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD13]], ptr [[I7]], align 4, !tbaa [[TBAA15]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[REF_TMP14]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP24]] to double
// CHECK1-NEXT: store double [[CONV]], ptr [[REF_TMP15]], align 8, !tbaa [[TBAA36]]
-// CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA15]]
// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP25]] to double
// CHECK1-NEXT: store double [[CONV17]], ptr [[REF_TMP16]], align 8, !tbaa [[TBAA36]]
// CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[REF_TMP14]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP15]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP16]]) #[[ATTR11]]
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 8 dereferenceable(16) ptr @_ZNSt7complexIdEpLIdEERS0_RKS_IT_E(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]], ptr nonnull align 8 dereferenceable(16) [[REF_TMP14]]) #[[ATTR11]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP16]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP15]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[REF_TMP14]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP16]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP15]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[REF_TMP14]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
@@ -830,18 +830,18 @@ void test() {
// CHECK1-NEXT: [[CALL21:%.*]] = call nonnull align 8 dereferenceable(16) ptr @_ZNSt7complexIdEpLIdEERS0_RKS_IT_E(ptr nonnull align 8 dereferenceable(16) [[TMP2]], ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]]) #[[ATTR11]]
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK1: .omp.reduction.done:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I7]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[I7]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[PARTIAL_SUM5]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IS_LAST]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_STRIDE]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_UB]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_LB]]) #[[ATTR4]]
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
-// CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR4]]
+// CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTOMP_IV]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
diff --git a/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp b/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp
index a584770..15b976f 100644
--- a/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp
+++ b/clang/test/OpenMP/target_map_array_of_structs_with_nested_mapper_ast_dump.cpp
@@ -27,7 +27,7 @@ void foo() {
// DUM-NEXT: |-OMPMapClause {{.*}}<<invalid sloc>> <implicit>
// DUM-NEXT: | |-MemberExpr {{.*}}<line:9:3> 'int' lvalue .e
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
-// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C' lvalue .f {{.*}}
+// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C':'struct C' lvalue .f {{.*}}
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
// DUM-NEXT: | `-MemberExpr {{.*}}<line:11:3> 'int' lvalue .h {{.*}}
// DUM-NEXT: | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
diff --git a/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp b/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp
index b2fb8fb..bdf3dd0 100644
--- a/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp
+++ b/clang/test/OpenMP/target_map_array_section_of_structs_with_nested_mapper_ast_dump.cpp
@@ -27,7 +27,7 @@ void foo() {
// DUM-NEXT: |-OMPMapClause {{.*}}<<invalid sloc>> <implicit>
// DUM-NEXT: | |-MemberExpr {{.*}}<line:9:3> 'int' lvalue .e
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
-// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C' lvalue .f {{.*}}
+// DUM-NEXT: | |-MemberExpr {{.*}}<line:10:3> 'C':'struct C' lvalue .f {{.*}}
// DUM-NEXT: | | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
// DUM-NEXT: | `-MemberExpr {{.*}}<line:11:3> 'int' lvalue .h {{.*}}
// DUM-NEXT: | `-DeclRefExpr {{.*}}<<invalid sloc>> 'D' lvalue Var {{.*}} '_s' 'D'
diff --git a/clang/test/PCH/cxx-explicit-specifier.cpp b/clang/test/PCH/cxx-explicit-specifier.cpp
index 84548fa..a30c669 100644
--- a/clang/test/PCH/cxx-explicit-specifier.cpp
+++ b/clang/test/PCH/cxx-explicit-specifier.cpp
@@ -79,13 +79,13 @@ struct A {
B<true> b_true;
B<false> b_false;
#else
-//expected-note@-8 {{candidate template ignored}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> A(A<b>) -> A<b>'}}
-//expected-note@-8 {{explicit constructor declared here}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> explicit(b) A(B<b>) -> A<b>'}}
+//expected-note@-8 {{candidate template ignored}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> A(templ::A<b>) -> templ::A<b>'}}
+//expected-note@-8 {{explicit constructor declared here}} expected-note@-8 {{implicit deduction guide declared as 'template <bool b> explicit(b) A(B<b>) -> templ::A<b>'}}
//expected-note@-15+ {{candidate constructor}}
//expected-note@-8+ {{explicit conversion function is not a candidate (explicit specifier}}
//expected-note@-11 {{explicit constructor is not a candidate (explicit specifier}}
-//CHECK: explicit(b){{ +}}A
+//CHECK: explicit(b){{ +}}templ::A<b>(B<b>)
//CHECK: explicit(b{{ +}}^{{ +}}T::value){{ +}}operator
A a = { b_true }; //expected-error {{class template argument deduction}}
diff --git a/clang/test/Parser/MicrosoftExtensions.cpp b/clang/test/Parser/MicrosoftExtensions.cpp
index 9102bca..e32d7fa 100644
--- a/clang/test/Parser/MicrosoftExtensions.cpp
+++ b/clang/test/Parser/MicrosoftExtensions.cpp
@@ -145,7 +145,7 @@ typedef COM_CLASS_TEMPLATE_REF<struct_with_uuid, __uuidof(struct_with_uuid)> COM
COM_CLASS_TEMPLATE_REF<int, __uuidof(struct_with_uuid)> good_template_arg;
-COM_CLASS_TEMPLATE<int, __uuidof(struct_with_uuid)> bad_template_arg; // expected-error {{non-type template argument for template parameter of pointer type 'const GUID *' (aka 'const _GUID *') must have its address taken}}
+COM_CLASS_TEMPLATE<int, __uuidof(struct_with_uuid)> bad_template_arg; // expected-error {{non-type template argument for template parameter of pointer type 'const GUID *' (aka 'const struct _GUID *') must have its address taken}}
namespace PR16911 {
struct __declspec(uuid("{12345678-1234-1234-1234-1234567890aB}")) uuid;
diff --git a/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp b/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp
index d29eed4..9d27f83 100644
--- a/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp
+++ b/clang/test/Parser/cxx1z-class-template-argument-deduction.cpp
@@ -196,8 +196,8 @@ namespace typename_specifier {
new typename T::A{0};
typename T::A a = 0;
const typename T::A b = 0;
- if (typename T::A a = 0) {} // expected-error {{value of type 'typename X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
- for (typename T::A a = 0; typename T::A b = 0; /**/) {} // expected-error {{value of type 'typename X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
+ if (typename T::A a = 0) {} // expected-error {{value of type 'typename typename_specifier::X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
+ for (typename T::A a = 0; typename T::A b = 0; /**/) {} // expected-error {{value of type 'typename typename_specifier::X::A<int>' (aka 'typename_specifier::X::A<int>') is not contextually convertible to 'bool'}}
{(void)(typename T::A)(0);} // expected-error{{refers to class template member}}
{(void)(typename T::A){0};} // expected-error{{refers to class template member}}
@@ -208,7 +208,7 @@ namespace typename_specifier {
{typename T::A arr[3] = 0;} // expected-error {{refers to class template member}}
{typename T::A F::*pm = 0;} // expected-error {{refers to class template member}}
{typename T::A (*fp)() = 0;} // expected-error {{refers to class template member}}
- {typename T::A [x, y] = 0;} // expected-error {{cannot be declared with type 'typename T::A'}} expected-error {{type 'typename X::A<int>' (aka 'typename_specifier::X::A<int>') decomposes into 0}}
+ {typename T::A [x, y] = 0;} // expected-error {{cannot be declared with type 'typename T::A'}} expected-error {{type 'typename typename_specifier::X::A<int>' (aka 'typename_specifier::X::A<int>') decomposes into 0}}
}
template void f<X>(); // expected-note {{instantiation of}}
diff --git a/clang/test/Preprocessor/file_test.c b/clang/test/Preprocessor/file_test.c
index 945882d..1e7e1df 100644
--- a/clang/test/Preprocessor/file_test.c
+++ b/clang/test/Preprocessor/file_test.c
@@ -1,4 +1,4 @@
-// UNSUPPORTED: system-windows
+// UNSUPPORTED: system-windows, system-cygwin
// RUN: %clang -E -ffile-prefix-map=%p=/UNLIKELY_PATH/empty -c -o - %s | FileCheck %s
// RUN: %clang -E -fmacro-prefix-map=%p=/UNLIKELY_PATH/empty -c -o - %s | FileCheck %s
// RUN: %clang -E -fmacro-prefix-map=%p=/UNLIKELY_PATH=empty -c -o - %s | FileCheck %s -check-prefix CHECK-EVIL
diff --git a/clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp b/clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp
new file mode 100644
index 0000000..4de3f39
--- /dev/null
+++ b/clang/test/Sema/aarch64-sve-intrinsics/acle_sve_compact.cpp
@@ -0,0 +1,18 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
+// RUN: -verify -verify-ignore-unexpected=error,note -emit-llvm -o - %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme \
+// RUN: -verify -verify-ignore-unexpected=error,note -emit-llvm -o - %s
+// REQUIRES: aarch64-registered-target
+// expected-no-diagnostics
+
+#include <arm_sve.h>
+
+__attribute__((target("sme2p2")))
+void test_svcompact(svbool_t pg, svfloat32_t op) __arm_streaming{
+ svcompact(pg, op);
+}
+
+void test_svcompact_nofeature(svbool_t pg, svfloat32_t op) __arm_streaming{
+ // expected-error@+1 {{'svcompact' needs target feature (sve)|(sme, sme2p2)}}
+ svcompact(pg, op);
+} \ No newline at end of file
diff --git a/clang/test/Sema/builtins-wasm.c b/clang/test/Sema/builtins-wasm.c
index a3486b1..9075e9e 100644
--- a/clang/test/Sema/builtins-wasm.c
+++ b/clang/test/Sema/builtins-wasm.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -verify -triple wasm32 -target-feature +reference-types %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple wasm32 -target-abi experimental-mv -DMULTIVALUE -target-feature +reference-types %s
#define EXPR_HAS_TYPE(expr, type) _Generic((expr), type : 1, default : 0)
@@ -57,8 +58,8 @@ void test_table_copy(int dst_idx, int src_idx, int nelem) {
typedef void (*F1)(void);
typedef int (*F2)(int);
-typedef int (*F3)(__externref_t);
-typedef __externref_t (*F4)(int);
+typedef void (*F3)(struct {int x; double y;});
+typedef struct {int x; double y;} (*F4)(void);
void test_function_pointer_signature() {
// Test argument count validation
@@ -68,8 +69,6 @@ void test_function_pointer_signature() {
// // Test argument type validation - should require function pointer
(void)__builtin_wasm_test_function_pointer_signature((void*)0); // expected-error {{used type 'void *' where function pointer is required}}
(void)__builtin_wasm_test_function_pointer_signature((int)0); // expected-error {{used type 'int' where function pointer is required}}
- (void)__builtin_wasm_test_function_pointer_signature((F3)0); // expected-error {{not supported for function pointers with a reference type parameter}}
- (void)__builtin_wasm_test_function_pointer_signature((F4)0); // expected-error {{not supported for function pointers with a reference type return value}}
// // Test valid usage
int res = __builtin_wasm_test_function_pointer_signature((F1)0);
@@ -77,4 +76,14 @@ void test_function_pointer_signature() {
// Test return type
_Static_assert(EXPR_HAS_TYPE(__builtin_wasm_test_function_pointer_signature((F1)0), int), "");
+
+#ifdef MULTIVALUE
+ // Test that struct arguments and returns are rejected with multivalue abi
+ (void)__builtin_wasm_test_function_pointer_signature((F3)0); // expected-error {{not supported with the multivalue ABI for function pointers with a struct/union as parameter}}
+ (void)__builtin_wasm_test_function_pointer_signature((F4)0); // expected-error {{not supported with the multivalue ABI for function pointers with a struct/union as return value}}
+#else
+ // with default abi they are fine
+ (void)__builtin_wasm_test_function_pointer_signature((F3)0);
+ (void)__builtin_wasm_test_function_pointer_signature((F4)0);
+#endif
}
diff --git a/clang/test/SemaCXX/MicrosoftExtensions.cpp b/clang/test/SemaCXX/MicrosoftExtensions.cpp
index 4dff2b1..ca072d8 100644
--- a/clang/test/SemaCXX/MicrosoftExtensions.cpp
+++ b/clang/test/SemaCXX/MicrosoftExtensions.cpp
@@ -126,17 +126,17 @@ __inline void FreeIDListArray(LPITEMIDLIST *ppidls) {
typedef struct in_addr {
public:
in_addr(in_addr &a) {} // precxx17-note {{candidate constructor not viable: expects an lvalue for 1st argument}}
- in_addr(in_addr *a) {} // precxx17-note {{candidate constructor not viable: no known conversion from 'IN_ADDR' (aka 'in_addr') to 'in_addr *' for 1st argument}}
+ in_addr(in_addr *a) {} // precxx17-note {{candidate constructor not viable: no known conversion from 'IN_ADDR' (aka 'struct in_addr') to 'in_addr *' for 1st argument}}
} IN_ADDR;
void f(IN_ADDR __unaligned *a) {
IN_ADDR local_addr = *a;
// FIXME: MSVC accepts the following; not sure why clang tries to
// copy-construct an in_addr.
- IN_ADDR local_addr2 = a; // precxx17-error {{no viable constructor copying variable of type 'IN_ADDR' (aka 'in_addr')}}
- // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
+ IN_ADDR local_addr2 = a; // precxx17-error {{no viable constructor copying variable of type 'IN_ADDR' (aka 'struct in_addr')}}
+ // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned struct in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
IN_ADDR local_addr3(a);
- // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
+ // expected-warning@-1 {{implicit cast from type '__unaligned IN_ADDR *' (aka '__unaligned struct in_addr *') to type 'in_addr *' drops __unaligned qualifier}}
}
template<typename T> void h1(T (__stdcall M::* const )()) { }
diff --git a/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp b/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp
index 273f9c3..b04b38d 100644
--- a/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp
+++ b/clang/test/SemaCXX/builtin-get-vtable-pointer.cpp
@@ -53,7 +53,7 @@ const void *getThing(const Bar<T> *b = nullptr) {
return __builtin_get_vtable_pointer(b->ty()); // expected-error{{__builtin_get_vtable_pointer requires an argument of class pointer type, but 'SubType *' (aka 'int *') was provided}}
// expected-error@-1{{__builtin_get_vtable_pointer requires an argument of polymorphic class pointer type, but 'Thing1' has no virtual methods}}
// expected-error@-2{{__builtin_get_vtable_pointer requires an argument of polymorphic class pointer type, but 'NonPolymorphic' has no virtual methods}}
- // expected-error@-3{{__builtin_get_vtable_pointer requires an argument with a complete type, but 'SubType' (aka 'basic::ForwardDeclaration') is incomplete}}
+ // expected-error@-3{{__builtin_get_vtable_pointer requires an argument with a complete type, but 'SubType' (aka 'ForwardDeclaration') is incomplete}}
}
template <typename>
struct IncompleteTemplate; // expected-note{{template is declared here}}
diff --git a/clang/test/SemaCXX/class-base-member-init.cpp b/clang/test/SemaCXX/class-base-member-init.cpp
index f5489e2..29b4545 100644
--- a/clang/test/SemaCXX/class-base-member-init.cpp
+++ b/clang/test/SemaCXX/class-base-member-init.cpp
@@ -83,7 +83,7 @@ namespace test5 {
A() : decltype(Base(1))(3) {
}
A(int) : Base(3), // expected-note {{previous initialization is here}}
- decltype(Base(1))(2), // expected-error {{multiple initializations given for base 'decltype(Base(1))' (aka 'test5::Base')}}
+ decltype(Base(1))(2), // expected-error {{multiple initializations given for base 'decltype(Base(1))' (aka 'Base')}}
decltype(int())() { // expected-error {{constructor initializer 'decltype(int())' (aka 'int') does not name a class}}
}
A(float) : decltype(A())(3) {
diff --git a/clang/test/SemaCXX/co_await-ast.cpp b/clang/test/SemaCXX/co_await-ast.cpp
index f792a2c..5be2004 100644
--- a/clang/test/SemaCXX/co_await-ast.cpp
+++ b/clang/test/SemaCXX/co_await-ast.cpp
@@ -48,9 +48,9 @@ awaitable foo() {
// CHECK: | `-ExprWithCleanups {{.*}} 'void'
// CHECK: | `-CoawaitExpr {{.*}} 'void'
// CHECK: | |-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
-// CHECK: | |-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | |-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | | `-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
@@ -58,27 +58,27 @@ awaitable foo() {
// CHECK: | | `-CXXMemberCallExpr {{.*}} 'bool'
// CHECK: | | `-MemberExpr {{.*}} '<bound member function type>' .await_ready {{.*}}
// CHECK: | | `-ImplicitCastExpr {{.*}} 'const awaitable_frame::result_t' lvalue <NoOp>
-// CHECK: | | `-OpaqueValueExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | | `-OpaqueValueExpr {{.*}} 'result_t' lvalue
+// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | | `-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
// CHECK: | |-ExprWithCleanups {{.*}} 'void'
// CHECK: | | `-CXXMemberCallExpr {{.*}} 'void'
// CHECK: | | |-MemberExpr {{.*}} '<bound member function type>' .await_suspend {{.*}}
-// CHECK: | | | `-OpaqueValueExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | | `-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | | | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | | | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | | | `-OpaqueValueExpr {{.*}} 'result_t' lvalue
+// CHECK: | | | `-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | | | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | | | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | | | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | | | `-CXXTemporaryObjectExpr {{.*}} 'executor' 'void (){{.*}} noexcept' zeroing
// CHECK: | | `-ImplicitCastExpr {{.*}} 'std::coroutine_handle<void>' <ConstructorConversion>
// CHECK: | | `-CXXConstructExpr {{.*}} 'std::coroutine_handle<void>' 'void (coroutine_handle<awaitable_frame> &&){{.*}} noexcept'
-// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'coroutine_handle<awaitable_frame>':'std::coroutine_handle<awaitable_frame>' xvalue
-// CHECK: | | `-CallExpr {{.*}} 'coroutine_handle<awaitable_frame>':'std::coroutine_handle<awaitable_frame>'
+// CHECK: | | `-MaterializeTemporaryExpr {{.*}} 'coroutine_handle<awaitable_frame>' xvalue
+// CHECK: | | `-CallExpr {{.*}} 'coroutine_handle<awaitable_frame>'
// CHECK: | | |-ImplicitCastExpr {{.*}} 'coroutine_handle<awaitable_frame> (*)(void *) noexcept' <FunctionToPointerDecay>
// CHECK: | | | `-DeclRefExpr {{.*}} 'coroutine_handle<awaitable_frame> (void *) noexcept' lvalue CXXMethod {{.*}} 'from_address' 'coroutine_handle<awaitable_frame> (void *) noexcept'
// CHECK: | | `-CallExpr {{.*}} 'void *'
@@ -87,10 +87,10 @@ awaitable foo() {
// CHECK: | `-CXXMemberCallExpr {{.*}} 'void'
// CHECK: | `-MemberExpr {{.*}} '<bound member function type>' .await_resume {{.*}}
// CHECK: | `-ImplicitCastExpr {{.*}} 'const awaitable_frame::result_t' lvalue <NoOp>
-// CHECK: | `-OpaqueValueExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | `-MaterializeTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' lvalue
-// CHECK: | `-CXXBindTemporaryExpr {{.*}} 'result_t':'awaitable_frame::result_t' (CXXTemporary {{.*}})
-// CHECK: | `-CXXMemberCallExpr {{.*}} 'result_t':'awaitable_frame::result_t'
+// CHECK: | `-OpaqueValueExpr {{.*}} 'result_t' lvalue
+// CHECK: | `-MaterializeTemporaryExpr {{.*}} 'result_t' lvalue
+// CHECK: | `-CXXBindTemporaryExpr {{.*}} 'result_t' (CXXTemporary {{.*}})
+// CHECK: | `-CXXMemberCallExpr {{.*}} 'result_t'
// CHECK: | |-MemberExpr {{.*}} '<bound member function type>' .await_transform {{.*}}
// CHECK: | | `-DeclRefExpr {{.*}} 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame' lvalue Var {{.*}} '__promise' 'std::coroutine_traits<awaitable>::promise_type':'awaitable_frame'
// CHECK: | `-CXXTemporaryObjectExpr {{.*}} <col:12, col:21> 'executor' 'void (){{.*}} noexcept' zeroing
diff --git a/clang/test/SemaCXX/compound-literal.cpp b/clang/test/SemaCXX/compound-literal.cpp
index 9c7c606..4b975a0 100644
--- a/clang/test/SemaCXX/compound-literal.cpp
+++ b/clang/test/SemaCXX/compound-literal.cpp
@@ -37,8 +37,8 @@ namespace brace_initializers {
POD p = (POD){1, 2};
// CHECK-NOT: CXXBindTemporaryExpr {{.*}} 'brace_initializers::POD'
- // CHECK: CompoundLiteralExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK-NEXT: InitListExpr {{.*}} 'POD':'brace_initializers::POD'
+ // CHECK: CompoundLiteralExpr {{.*}} 'POD'{{$}}
+ // CHECK-NEXT: InitListExpr {{.*}} 'POD'{{$}}
// CHECK-NEXT: ConstantExpr {{.*}}
// CHECK-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-NEXT: ConstantExpr {{.*}}
@@ -46,34 +46,34 @@ namespace brace_initializers {
void test() {
(void)(POD){1, 2};
- // CHECK-NOT: CXXBindTemporaryExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK-NOT: ConstantExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK: CompoundLiteralExpr {{.*}} 'POD':'brace_initializers::POD'
- // CHECK-NEXT: InitListExpr {{.*}} 'POD':'brace_initializers::POD'
+ // CHECK-NOT: CXXBindTemporaryExpr {{.*}} 'POD'
+ // CHECK-NOT: ConstantExpr {{.*}} 'POD'
+ // CHECK: CompoundLiteralExpr {{.*}} 'POD'{{$}}
+ // CHECK-NEXT: InitListExpr {{.*}} 'POD'{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 2{{$}}
(void)(HasDtor){1, 2};
- // CHECK: CXXBindTemporaryExpr {{.*}} 'HasDtor':'brace_initializers::HasDtor'
- // CHECK-NEXT: CompoundLiteralExpr {{.*}} 'HasDtor':'brace_initializers::HasDtor'
- // CHECK-NEXT: InitListExpr {{.*}} 'HasDtor':'brace_initializers::HasDtor'
+ // CHECK: CXXBindTemporaryExpr {{.*}} 'HasDtor'
+ // CHECK-NEXT: CompoundLiteralExpr {{.*}} 'HasDtor'{{$}}
+ // CHECK-NEXT: InitListExpr {{.*}} 'HasDtor'{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-NEXT: IntegerLiteral {{.*}} 2{{$}}
#if __cplusplus >= 201103L
(void)(HasCtor){1, 2};
- // CHECK-CXX11-NOT: CXXBindTemporaryExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
- // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
- // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
- // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtor':'brace_initializers::HasCtor'
+ // CHECK-CXX11-NOT: CXXBindTemporaryExpr {{.*}} 'HasCtor'
+ // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtor'
+ // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtor'{{$}}
+ // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtor'
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 2{{$}}
(void)(HasCtorDtor){1, 2};
- // CHECK-CXX11: CXXBindTemporaryExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
- // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
- // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
- // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtorDtor':'brace_initializers::HasCtorDtor'
+ // CHECK-CXX11: CXXBindTemporaryExpr {{.*}} 'HasCtorDtor'
+ // CHECK-CXX11-NOT: ConstantExpr {{.*}} 'HasCtorDtor'
+ // CHECK-CXX11: CompoundLiteralExpr {{.*}} 'HasCtorDtor'{{$}}
+ // CHECK-CXX11-NEXT: CXXTemporaryObjectExpr {{.*}} 'HasCtorDtor'
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 1{{$}}
// CHECK-CXX11-NEXT: IntegerLiteral {{.*}} 2{{$}}
#endif
diff --git a/clang/test/SemaCXX/constant-expression.cpp b/clang/test/SemaCXX/constant-expression.cpp
index cc041a4..ef48ee5 100644
--- a/clang/test/SemaCXX/constant-expression.cpp
+++ b/clang/test/SemaCXX/constant-expression.cpp
@@ -149,7 +149,7 @@ namespace PR31701 {
};
template <int M> class D;
template <int M>
- template<int i> void D<M>::set() { // expected-error {{from class 'D<M>' without definition}}
+ template<int i> void D<M>::set() { // expected-error {{from class 'PR31701::D<M>' without definition}}
const C c = C::n<i>;
}
}
diff --git a/clang/test/SemaCXX/constructor.cpp b/clang/test/SemaCXX/constructor.cpp
index b0b580b..b278fd5 100644
--- a/clang/test/SemaCXX/constructor.cpp
+++ b/clang/test/SemaCXX/constructor.cpp
@@ -92,7 +92,7 @@ namespace PR38286 {
template<typename T> A<T>::A() {} // expected-error {{incomplete type 'A' named in nested name specifier}}
/*FIXME: needed to recover properly from previous error*/;
template<typename> struct B;
- template<typename T> void B<T>::f() {} // expected-error {{out-of-line definition of 'f' from class 'B<type-parameter-0-0>'}}
+ template<typename T> void B<T>::f() {} // expected-error {{out-of-line definition of 'f' from class 'PR38286::B<type-parameter-0-0>'}}
template<typename> struct C; // expected-note {{non-type declaration found}}
template<typename T> C<T>::~C() {} // expected-error {{identifier 'C' after '~' in destructor name does not name a type}}
}
diff --git a/clang/test/SemaCXX/coroutine-allocs.cpp b/clang/test/SemaCXX/coroutine-allocs.cpp
index cce56de..e6b086bd 100644
--- a/clang/test/SemaCXX/coroutine-allocs.cpp
+++ b/clang/test/SemaCXX/coroutine-allocs.cpp
@@ -19,7 +19,7 @@ struct resumable {
};
};
-resumable f1() { // expected-error {{'operator new' provided by 'std::coroutine_traits<resumable>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f1'}}
+resumable f1() { // expected-error {{'operator new' provided by 'std::coroutine_traits<resumable>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f1'}}
co_return;
}
@@ -52,11 +52,11 @@ resumable f4(Allocator) {
co_return;
}
-resumable f5(const Allocator) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator>::promise_type' (aka 'resumable::promise_type') is not usable}}
+resumable f5(const Allocator) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator>::promise_type' (aka 'typename resumable::promise_type') is not usable}}
co_return;
}
-resumable f6(const Allocator &) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator &>::promise_type' (aka 'resumable::promise_type') is not usable}}
+resumable f6(const Allocator &) { // expected-error {{operator new' provided by 'std::coroutine_traits<resumable, const Allocator &>::promise_type' (aka 'typename resumable::promise_type') is not usable}}
co_return;
}
diff --git a/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp b/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp
index ea25cea..e7de0c8 100644
--- a/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp
+++ b/clang/test/SemaCXX/coroutine-traits-undefined-template.cpp
@@ -14,5 +14,5 @@ template <> struct coroutine_traits<void>; // expected-note {{forward declaratio
} // namespace std
void uses_forward_declaration() {
- co_return; // expected-error {{this function cannot be a coroutine: missing definition of specialization 'coroutine_traits<void>'}}
+ co_return; // expected-error {{this function cannot be a coroutine: missing definition of specialization 'std::coroutine_traits<void>'}}
}
diff --git a/clang/test/SemaCXX/coroutines.cpp b/clang/test/SemaCXX/coroutines.cpp
index c9cefeb..098c1c2 100644
--- a/clang/test/SemaCXX/coroutines.cpp
+++ b/clang/test/SemaCXX/coroutines.cpp
@@ -1396,7 +1396,7 @@ struct bad_promise_deleted_constructor {
coro<bad_promise_deleted_constructor>
bad_coroutine_calls_deleted_promise_constructor() {
- // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<CoroHandleMemberFunctionTest::bad_promise_deleted_constructor>>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_deleted_constructor')}}
+ // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<bad_promise_deleted_constructor>>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_deleted_constructor')}}
co_return;
}
@@ -1463,7 +1463,7 @@ struct bad_promise_no_matching_constructor {
coro<bad_promise_no_matching_constructor>
bad_coroutine_calls_with_no_matching_constructor(int, int) {
- // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<CoroHandleMemberFunctionTest::bad_promise_no_matching_constructor>, int, int>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_no_matching_constructor')}}
+ // expected-error@-1 {{call to deleted constructor of 'std::coroutine_traits<coro<bad_promise_no_matching_constructor>, int, int>::promise_type' (aka 'CoroHandleMemberFunctionTest::bad_promise_no_matching_constructor')}}
co_return;
}
diff --git a/clang/test/SemaCXX/ctad.cpp b/clang/test/SemaCXX/ctad.cpp
index 50b64e3..8380b56 100644
--- a/clang/test/SemaCXX/ctad.cpp
+++ b/clang/test/SemaCXX/ctad.cpp
@@ -186,7 +186,7 @@ namespace GH136624 {
template<class Y> using Alias = A<Y>;
}
- // FIXME: This diagnostic prints incorrect qualification for `A<int>`.
+ // FIXME: This diagnostic is missing 'foo::Alias', as written.
foo::Alias t = 0;
- // expected-error@-1 {{no viable conversion from 'int' to 'foo::A<int>' (aka 'A<int>')}}
+ // expected-error@-1 {{no viable conversion from 'int' to 'GH136624::A<int>' (aka 'A<int>')}}
} // namespace GH136624
diff --git a/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp b/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp
index 57a48fa..e18223d 100644
--- a/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp
+++ b/clang/test/SemaCXX/cxx1y-variable-templates_in_class.cpp
@@ -302,17 +302,17 @@ namespace in_class_template {
};
template<typename T> void f() {
- typename T::template A<int> a; // expected-error {{template name refers to non-type template 'S::template A'}}
+ typename T::template A<int> a; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
}
template<typename T> void g() {
- T::template A<int>::B = 0; // expected-error {{template name refers to non-type template 'S::template A'}}
+ T::template A<int>::B = 0; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
}
template<typename T> void h() {
- class T::template A<int> c; // expected-error {{template name refers to non-type template 'S::template A'}}
+ class T::template A<int> c; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
}
template<typename T>
- struct X : T::template A<int> {}; // expected-error {{template name refers to non-type template 'S::template A'}}
+ struct X : T::template A<int> {}; // expected-error {{template name refers to non-type template 'in_class_template::bad_reference::S::template A'}}
template void f<S>(); // expected-note {{in instantiation of}}
template void g<S>(); // expected-note {{in instantiation of}}
@@ -393,7 +393,7 @@ namespace dependent_static_var_template {
int &r = A::template n; // expected-error {{use of variable template 'A::template n' requires template arguments}} expected-error {{a template argument list is expected after a name prefixed by the template keyword}}
template<typename T>
- int &f() { return T::template n; } // expected-error {{use of variable template 'A::template n' requires template arguments}} expected-error {{a template argument list is expected after a name prefixed by the template keyword}}
+ int &f() { return T::template n; } // expected-error {{use of variable template 'dependent_static_var_template::A::template n' requires template arguments}} expected-error {{a template argument list is expected after a name prefixed by the template keyword}}
int &s = f<A>(); // expected-note {{instantiation of}}
namespace B {
diff --git a/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp b/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp
index 8745185..204dd9b 100644
--- a/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp
+++ b/clang/test/SemaCXX/cxx1z-class-template-argument-deduction.cpp
@@ -142,13 +142,13 @@ namespace look_into_current_instantiation {
// templates, and members of the current instantiation
A<float> &r = a;
- template<typename T> struct B { // expected-note {{could not match 'B<T>' against 'int'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> B(B<T>) -> B<T>'}}
+ template<typename T> struct B { // expected-note {{could not match 'look_into_current_instantiation::B<T>' against 'int'}} \
+ // expected-note {{implicit deduction guide declared as 'template <typename T> B(look_into_current_instantiation::B<T>) -> look_into_current_instantiation::B<T>'}}
struct X {
typedef T type;
};
B(typename X::type); // expected-note {{couldn't infer template argument 'T'}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> B(typename X::type) -> B<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename T> B(typename X::type) -> look_into_current_instantiation::B<T>'}}
};
B b = 0; // expected-error {{no viable}}
diff --git a/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp b/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp
index 3229a91..1c832e5 100644
--- a/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp
+++ b/clang/test/SemaCXX/cxx23-invalid-constexpr.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -verify=expected -std=c++23 %s
+// RUN: %clang_cc1 -fsyntax-only -verify=expected -std=c++23 %s -fexperimental-new-constant-interpreter
// This test covers modifications made by P2448R2.
diff --git a/clang/test/SemaCXX/cxx2a-destroying-delete.cpp b/clang/test/SemaCXX/cxx2a-destroying-delete.cpp
index 27ea666..812fd78 100644
--- a/clang/test/SemaCXX/cxx2a-destroying-delete.cpp
+++ b/clang/test/SemaCXX/cxx2a-destroying-delete.cpp
@@ -138,7 +138,7 @@ namespace templated {
void operator delete(id_alias<C> *, std::destroying_delete_t);
};
template<typename T> struct D {
- void operator delete(typename id_struct<D>::type *, std::destroying_delete_t); // expected-error {{use 'D<T> *'}}
+ void operator delete(typename id_struct<D>::type *, std::destroying_delete_t); // expected-error {{use 'templated::D<T> *'}}
};
}
diff --git a/clang/test/SemaCXX/cxx2b-deducing-this.cpp b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
index fcbe0f6..74b3573 100644
--- a/clang/test/SemaCXX/cxx2b-deducing-this.cpp
+++ b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
@@ -96,12 +96,12 @@ struct Test {
void test() {
[i = 0](this Test) { }();
- // expected-error@-1 {{invalid explicit object parameter type 'ThisInLambdaWithCaptures::Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
+ // expected-error@-1 {{invalid explicit object parameter type 'Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
struct Derived;
auto ok = [i = 0](this const Derived&) {};
auto ko = [i = 0](this const Test&) {};
- // expected-error@-1 {{invalid explicit object parameter type 'ThisInLambdaWithCaptures::Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
+ // expected-error@-1 {{invalid explicit object parameter type 'Test' in lambda with capture; the type must be the same as, or derived from, the lambda}}
struct Derived : decltype(ok){};
Derived dok{ok};
diff --git a/clang/test/SemaCXX/cxx2c-variadic-friends.cpp b/clang/test/SemaCXX/cxx2c-variadic-friends.cpp
index a4d7c80..0b01907 100644
--- a/clang/test/SemaCXX/cxx2c-variadic-friends.cpp
+++ b/clang/test/SemaCXX/cxx2c-variadic-friends.cpp
@@ -145,7 +145,7 @@ class S {
template <typename U>
struct T {
static_assert(S<U, T>::a == 42);
- static_assert(S<U, T>::a == 43); // expected-error {{static assertion failed due to requirement 'S<int, template_template::T>::a == 43'}} \
+ static_assert(S<U, T>::a == 43); // expected-error {{static assertion failed due to requirement 'template_template::S<int, template_template::T>::a == 43'}} \
// expected-note {{expression evaluates to '42 == 43'}}
};
diff --git a/clang/test/SemaCXX/destructor.cpp b/clang/test/SemaCXX/destructor.cpp
index b9e0b17..bc47d87 100644
--- a/clang/test/SemaCXX/destructor.cpp
+++ b/clang/test/SemaCXX/destructor.cpp
@@ -431,15 +431,15 @@ namespace PR9238 {
}
namespace PR7900 {
- struct A { // expected-note 2{{type 'PR7900::A' found by destructor name lookup}}
+ struct A { // expected-note 2{{type 'A' found by destructor name lookup}}
};
struct B : public A {
};
void foo() {
B b;
b.~B();
- b.~A(); // expected-error{{destructor type 'PR7900::A' in object destruction expression does not match the type 'B' of the object being destroyed}}
- (&b)->~A(); // expected-error{{destructor type 'PR7900::A' in object destruction expression does not match the type 'B' of the object being destroyed}}
+ b.~A(); // expected-error{{destructor type 'A' in object destruction expression does not match the type 'B' of the object being destroyed}}
+ (&b)->~A(); // expected-error{{destructor type 'A' in object destruction expression does not match the type 'B' of the object being destroyed}}
}
}
diff --git a/clang/test/SemaCXX/elaborated-type-specifier.cpp b/clang/test/SemaCXX/elaborated-type-specifier.cpp
index a96e696..a80ba07 100644
--- a/clang/test/SemaCXX/elaborated-type-specifier.cpp
+++ b/clang/test/SemaCXX/elaborated-type-specifier.cpp
@@ -27,7 +27,7 @@ namespace NS {
void test_X_elab(NS::X x) {
struct S4 *s4 = 0; // expected-note{{'S4' is not defined, but forward declared here; conversion would be valid if it was derived from 'NS::S4'}}
- x.test_elab2(s4); // expected-error{{cannot initialize a parameter of type 'S4 *' (aka 'NS::S4 *') with an lvalue of type 'struct S4 *'}}
+ x.test_elab2(s4); // expected-error{{cannot initialize a parameter of type 'S4 *' with an lvalue of type 'struct S4 *'}}
}
namespace NS {
diff --git a/clang/test/SemaCXX/enum-scoped.cpp b/clang/test/SemaCXX/enum-scoped.cpp
index 2d7b3c9..bf43f292 100644
--- a/clang/test/SemaCXX/enum-scoped.cpp
+++ b/clang/test/SemaCXX/enum-scoped.cpp
@@ -334,7 +334,7 @@ namespace test11 {
typedef E E2;
E2 f1() { return E::a; }
- bool f() { return !f1(); } // expected-error {{invalid argument type 'E2' (aka 'test11::E') to unary expression}}
+ bool f() { return !f1(); } // expected-error {{invalid argument type 'E2' (aka 'E') to unary expression}}
}
namespace PR35586 {
diff --git a/clang/test/SemaCXX/err_init_conversion_failed.cpp b/clang/test/SemaCXX/err_init_conversion_failed.cpp
index f1949c0..e2617a1 100644
--- a/clang/test/SemaCXX/err_init_conversion_failed.cpp
+++ b/clang/test/SemaCXX/err_init_conversion_failed.cpp
@@ -56,6 +56,6 @@ template <class P> struct S2 {
void test_15() {
S2<S> X = {&S::foo};
- // expected-error-re@-1{{cannot initialize a member subobject of type 'void (S::*)(const int &){{( __attribute__\(\(thiscall\)\))?}}' with an rvalue of type 'void (S::*)(int){{( __attribute__\(\(thiscall\)\))?}}': type mismatch at 1st parameter ('const int &' vs 'int')}}
+ // expected-error-re@-1{{cannot initialize a member subobject of type 'void (template_test::S::*)(const int &){{( __attribute__\(\(thiscall\)\))?}}' with an rvalue of type 'void (S::*)(int){{( __attribute__\(\(thiscall\)\))?}}': type mismatch at 1st parameter ('const int &' vs 'int')}}
}
}
diff --git a/clang/test/SemaCXX/gh102293.cpp b/clang/test/SemaCXX/gh102293.cpp
index fe417e6..37b674d 100644
--- a/clang/test/SemaCXX/gh102293.cpp
+++ b/clang/test/SemaCXX/gh102293.cpp
@@ -35,7 +35,7 @@ class bar { // expected-note {{definition of 'GH104802::bar' is not comple
class baz { // expected-note {{definition of 'GH104802::baz' is not complete until the closing '}'}}
typedef class baz blech;
- blech a; // expected-error {{field has incomplete type 'blech' (aka 'GH104802::baz')}}
+ blech a; // expected-error {{field has incomplete type 'blech' (aka 'class baz')}}
virtual int c();
};
diff --git a/clang/test/SemaCXX/incomplete-call.cpp b/clang/test/SemaCXX/incomplete-call.cpp
index 0846c88..8f24d45 100644
--- a/clang/test/SemaCXX/incomplete-call.cpp
+++ b/clang/test/SemaCXX/incomplete-call.cpp
@@ -59,7 +59,7 @@ namespace pr18542 {
int count;
template<typename CharT> class basic_istream;
template<typename CharT>
- void basic_istream<CharT>::read() { // expected-error{{out-of-line definition of 'read' from class 'basic_istream<CharT>' without definition}}
+ void basic_istream<CharT>::read() { // expected-error{{out-of-line definition of 'read' from class 'pr18542::X::basic_istream<CharT>' without definition}}
count = 0;
}
};
diff --git a/clang/test/SemaCXX/matrix-casts.cpp b/clang/test/SemaCXX/matrix-casts.cpp
index c0f3df1..708b3fa 100644
--- a/clang/test/SemaCXX/matrix-casts.cpp
+++ b/clang/test/SemaCXX/matrix-casts.cpp
@@ -35,7 +35,7 @@ void f1() {
(matrix_4_4<char>)v; // expected-error {{C-style cast from 'vec' (vector of 1 'int' value) to 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') is not allowed}}
(test_struct *)m1; // expected-error {{cannot cast from type 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') to pointer type 'test_struct *'}}
- (matrix_5_5<float>)s; // expected-error {{C-style cast from 'test_struct *' to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
+ (matrix_5_5<float>)s; // expected-error {{C-style cast from 'test_struct *' (aka 'struct test_struct *') to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
}
void f2() {
@@ -59,7 +59,7 @@ void f2() {
static_cast<matrix_4_4<char>>(v); // expected-error {{static_cast from 'vec' (vector of 1 'int' value) to 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') is not allowed}}
static_cast<test_struct *>(m1); // expected-error {{cannot cast from type 'matrix_4_4<char>' (aka 'char __attribute__((matrix_type(4, 4)))') to pointer type 'test_struct *'}}
- static_cast<matrix_5_5<float>>(s); // expected-error {{static_cast from 'test_struct *' to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
+ static_cast<matrix_5_5<float>>(s); // expected-error {{static_cast from 'test_struct *' (aka 'struct test_struct *') to 'matrix_5_5<float>' (aka 'float __attribute__((matrix_type(5, 5)))') is not allowed}}
}
void f3() {
diff --git a/clang/test/SemaCXX/nested-name-spec.cpp b/clang/test/SemaCXX/nested-name-spec.cpp
index fedbb30..c60275b 100644
--- a/clang/test/SemaCXX/nested-name-spec.cpp
+++ b/clang/test/SemaCXX/nested-name-spec.cpp
@@ -15,7 +15,9 @@ namespace A {
static int Ag1();
static int Ag2();
};
- int ax; // expected-note {{'ax' declared here}}
+ int ax;
+ // expected-note@-1 {{'ax' declared here}}
+ // expected-note@-2 {{'::A::ax' declared here}}
void Af();
}
@@ -100,7 +102,7 @@ void f3() {
N::x = 0; // expected-error {{'N' is not a class, namespace, or enumeration}}
{ int A; A::ax = 0; }
{ typedef int A; A::ax = 0; } // expected-error{{'A' (aka 'int') is not a class, namespace, or enumeration}}
- { typedef A::C A; A::ax = 0; } // expected-error {{no member named 'ax'}}
+ { typedef A::C A; A::ax = 0; } // expected-error {{no member named 'ax' in 'A::C'; did you mean '::A::ax'?}}
{ typedef A::C A; A::cx = 0; }
}
@@ -474,7 +476,7 @@ namespace A {
class B {
typedef C D; // expected-error{{unknown type name 'C'}}
A::D::F;
- // expected-error@-1{{'PR30619::A::B::D' (aka 'int') is not a class, namespace, or enumeration}}
+ // expected-error@-1{{'A::D' (aka 'int') is not a class, namespace, or enumeration}}
};
}
}
diff --git a/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp b/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp
index b0a101e..8d0cdcd 100644
--- a/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp
+++ b/clang/test/SemaCXX/pointer-forward-declared-class-conversion.cpp
@@ -12,6 +12,6 @@ A2<int> *a2 = b2; // expected-error{{cannot initialize a variable of type 'A2<in
typedef struct S s;
const s *f();
-s *g1() { return f(); } // expected-error{{cannot initialize return object of type 's *' (aka 'S *') with an rvalue of type 'const s *' (aka 'const S *')}}
+s *g1() { return f(); } // expected-error{{cannot initialize return object of type 's *' (aka 'struct S *') with an rvalue of type 'const s *' (aka 'const struct S *')}}
-B1 *g2() { return f(); } // expected-error{{cannot initialize return object of type 'B1 *' with an rvalue of type 'const s *' (aka 'const S *')}}
+B1 *g2() { return f(); } // expected-error{{cannot initialize return object of type 'B1 *' with an rvalue of type 'const s *' (aka 'const struct S *')}}
diff --git a/clang/test/SemaCXX/pseudo-destructors.cpp b/clang/test/SemaCXX/pseudo-destructors.cpp
index 8120292..55849d6 100644
--- a/clang/test/SemaCXX/pseudo-destructors.cpp
+++ b/clang/test/SemaCXX/pseudo-destructors.cpp
@@ -116,7 +116,7 @@ typedef Derived *Foo;
void test2(Foo d) {
d.~Foo(); // This is ok
- d.~Derived(); // expected-error {{member reference type 'Foo' (aka 'dotPointerAccess::Derived *') is a pointer; did you mean to use '->'}}
+ d.~Derived(); // expected-error {{member reference type 'Foo' (aka 'Derived *') is a pointer; did you mean to use '->'}}
}
}
diff --git a/clang/test/SemaCXX/return.cpp b/clang/test/SemaCXX/return.cpp
index 796c9ae..60accbd 100644
--- a/clang/test/SemaCXX/return.cpp
+++ b/clang/test/SemaCXX/return.cpp
@@ -115,9 +115,9 @@ namespace ctor_returns_void {
};
template <typename T> struct ST {
- ST() { return f(); } // expected-error {{constructor 'ST<T>' must not return void expression}}
+ ST() { return f(); } // expected-error {{constructor 'ctor_returns_void::ST<T>' must not return void expression}}
// expected-error@-1 {{constructor 'ST' must not return void expression}}
- ~ST() { return f(); } // expected-error {{destructor '~ST<T>' must not return void expression}}
+ ~ST() { return f(); } // expected-error {{destructor '~ctor_returns_void::ST<T>' must not return void expression}}
// expected-error@-1 {{destructor '~ST' must not return void expression}}
};
diff --git a/clang/test/SemaCXX/static-assert.cpp b/clang/test/SemaCXX/static-assert.cpp
index bf6a2ee..354016d 100644
--- a/clang/test/SemaCXX/static-assert.cpp
+++ b/clang/test/SemaCXX/static-assert.cpp
@@ -199,7 +199,7 @@ void foo2() {
// FIXME: Here the template keyword is dropped because the failed condition
// for a static assert is always printed with canonical types.
static_assert(::ns::NestedTemplates1<T, a>::NestedTemplates2::template NestedTemplates3<U>::value, "message");
- // expected-error@-1{{static assertion failed due to requirement '::ns::NestedTemplates1<int, 3>::NestedTemplates2::NestedTemplates3<float>::value': message}}
+ // expected-error@-1{{static assertion failed due to requirement 'ns::NestedTemplates1<int, 3>::NestedTemplates2::NestedTemplates3<float>::value': message}}
}
template void foo2<int, float, 3>();
// expected-note@-1{{in instantiation of function template specialization 'foo2<int, float, 3>' requested here}}
diff --git a/clang/test/SemaCXX/sugar-common-types.cpp b/clang/test/SemaCXX/sugar-common-types.cpp
index d58f6cd..dd5fc4a 100644
--- a/clang/test/SemaCXX/sugar-common-types.cpp
+++ b/clang/test/SemaCXX/sugar-common-types.cpp
@@ -44,7 +44,8 @@ template <class T> struct S1 {
};
N t10 = 0 ? S1<X1>() : S1<Y1>(); // expected-error {{from 'S1<B1>' (aka 'S1<int>')}}
-N t11 = 0 ? S1<X1>::S2<X2>() : S1<Y1>::S2<Y2>(); // expected-error {{from 'S1<B1>::S2<B2>' (aka 'S2<void>')}}
+// FIXME: needs to compute common sugar for qualified template names
+N t11 = 0 ? S1<X1>::S2<X2>() : S1<Y1>::S2<Y2>(); // expected-error {{from 'S1<int>::S2<B2>' (aka 'S1<int>::S2<void>')}}
template <class T> using Al = S1<T>;
@@ -200,5 +201,5 @@ namespace member_pointers {
// FIXME: adjusted MemberPointer does not preserve qualifier
N t3 = 0 ? &W1::a : &W2::b;
- // expected-error@-1 {{rvalue of type 'B1 W<void>::*'}}
+ // expected-error@-1 {{rvalue of type 'B1 member_pointers::W<void>::*'}}
} // namespace member_pointers
diff --git a/clang/test/SemaCXX/sugared-auto.cpp b/clang/test/SemaCXX/sugared-auto.cpp
index b5bb4f0..cf879ef 100644
--- a/clang/test/SemaCXX/sugared-auto.cpp
+++ b/clang/test/SemaCXX/sugared-auto.cpp
@@ -54,7 +54,7 @@ N t4 = x4; // expected-error {{lvalue of type 'Man' (aka 'int')}}
N t5 = x5; // expected-error {{lvalue of type 'Dog' (aka 'int')}}
auto x6 = { Man(), Dog() };
-N t6 = x6; // expected-error {{from 'std::initializer_list<Animal>' (aka 'initializer_list<int>')}}
+N t6 = x6; // expected-error {{from 'std::initializer_list<Animal>' (aka 'std::initializer_list<int>')}}
} // namespace variable
diff --git a/clang/test/SemaCXX/type-aware-coroutines.cpp b/clang/test/SemaCXX/type-aware-coroutines.cpp
index 742e5f0..e41d07b 100644
--- a/clang/test/SemaCXX/type-aware-coroutines.cpp
+++ b/clang/test/SemaCXX/type-aware-coroutines.cpp
@@ -93,7 +93,7 @@ struct resumable5 {
};
resumable f1(int) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f1'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f1'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable_tan1 {{type aware 'operator new' declared here}}
// expected-note@#resumable_tan2 {{type aware 'operator new' declared here}}
@@ -101,7 +101,7 @@ resumable f1(int) {
}
resumable f2(float) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, float>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f2'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, float>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f2'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable_tan1 {{type aware 'operator new' declared here}}
// expected-note@#resumable_tan2 {{type aware 'operator new' declared here}}
@@ -109,7 +109,7 @@ resumable f2(float) {
}
resumable2 f3(int, float, const char*, Allocator) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable2, int, float, const char *, Allocator>::promise_type' (aka 'resumable2::promise_type') is not usable with the function signature of 'f3'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable2, int, float, const char *, Allocator>::promise_type' (aka 'typename resumable2::promise_type') is not usable with the function signature of 'f3'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable2_tan1 {{type aware 'operator new' declared here}}
co_yield 1;
@@ -117,7 +117,7 @@ resumable2 f3(int, float, const char*, Allocator) {
}
resumable f4(int n = 10) {
- // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'resumable::promise_type') is not usable with the function signature of 'f4'}}
+ // expected-error@-1 {{'operator new' provided by 'std::coroutine_traits<resumable, int>::promise_type' (aka 'typename resumable::promise_type') is not usable with the function signature of 'f4'}}
// expected-note@-2 {{type aware 'operator new' will not be used for coroutine allocation}}
// expected-note@#resumable_tan1 {{type aware 'operator new' declared here}}
// expected-note@#resumable_tan2 {{type aware 'operator new' declared here}}
diff --git a/clang/test/SemaCXX/undefined-partial-specialization.cpp b/clang/test/SemaCXX/undefined-partial-specialization.cpp
index 0f776a6..3992d02 100644
--- a/clang/test/SemaCXX/undefined-partial-specialization.cpp
+++ b/clang/test/SemaCXX/undefined-partial-specialization.cpp
@@ -10,6 +10,6 @@ template <typename T>
class boo<T, true>;
template<typename T>
-void boo<T, true>::foo(){} // expected-error{{out-of-line definition of 'foo' from class 'boo<T, true>' without definition}}
+void boo<T, true>::foo(){} // expected-error{{out-of-line definition of 'foo' from class 'GH61356::boo<T, true>' without definition}}
}
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-libc-functions.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-libc-functions.cpp
index 6ae329a..765dcbc 100644
--- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-libc-functions.cpp
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-libc-functions.cpp
@@ -4,8 +4,16 @@
// RUN: -verify %s -x objective-c++
// RUN: %clang_cc1 -std=c++20 -Wno-all -Wunsafe-buffer-usage-in-libc-call \
// RUN: -verify %s
+// RUN: %clang_cc1 -std=c++20 -Wno-all -Wunsafe-buffer-usage-in-libc-call \
+// RUN: -verify %s -DTEST_STD_NS
typedef struct {} FILE;
+typedef unsigned int size_t;
+
+#ifdef TEST_STD_NS
+namespace std {
+#endif
+
void memcpy();
void __asan_memcpy();
void strcpy();
@@ -25,6 +33,11 @@ int sscanf(const char * buffer, const char * format, ... );
int wprintf(const wchar_t* format, ... );
int __asan_printf();
+#ifdef TEST_STD_NS
+} //namespace std
+using namespace std;
+#endif
+
namespace std {
template< class InputIt, class OutputIt >
OutputIt copy( InputIt first, InputIt last,
@@ -64,10 +77,6 @@ namespace std {
typedef basic_string_view<char> string_view;
typedef basic_string_view<wchar_t> wstring_view;
-
- // C function under std:
- void memcpy();
- void strcpy();
}
void f(char * p, char * q, std::span<char> s, std::span<char> s2) {
@@ -77,14 +86,16 @@ void f(char * p, char * q, std::span<char> s, std::span<char> s2) {
aligned_char_ptr_t cp;
memcpy(); // expected-warning{{function 'memcpy' is unsafe}}
- std::memcpy(); // expected-warning{{function 'memcpy' is unsafe}}
__builtin_memcpy(p, q, 64); // expected-warning{{function '__builtin_memcpy' is unsafe}}
__builtin___memcpy_chk(p, q, 8, 64); // expected-warning{{function '__builtin___memcpy_chk' is unsafe}}
__asan_memcpy(); // expected-warning{{function '__asan_memcpy' is unsafe}}
strcpy(); // expected-warning{{function 'strcpy' is unsafe}}
- std::strcpy(); // expected-warning{{function 'strcpy' is unsafe}}
strcpy_s(); // expected-warning{{function 'strcpy_s' is unsafe}}
wcscpy_s(); // expected-warning{{function 'wcscpy_s' is unsafe}}
+#ifdef TEST_STD_NS
+ std::strcpy(); // expected-warning{{function 'strcpy' is unsafe}}
+ std::memcpy(); // expected-warning{{function 'memcpy' is unsafe}}
+#endif
/* Test printfs */
fprintf((FILE*)p, "%s%d", p, *p); // expected-warning{{function 'fprintf' is unsafe}} expected-note{{string argument is not guaranteed to be null-terminated}}
@@ -181,13 +192,59 @@ void ff(char * p, char * q, std::span<char> s, std::span<char> s2) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunsafe-buffer-usage-in-libc-call"
memcpy();
- std::memcpy();
__builtin_memcpy(p, q, 64);
__builtin___memcpy_chk(p, q, 8, 64);
__asan_memcpy();
strcpy();
+#ifdef TEST_STD_NS
std::strcpy();
+ std::memcpy();
+#endif
strcpy_s();
wcscpy_s();
#pragma clang diagnostic pop
}
+
+
+
+// functions not in global scope or std:: namespace are not libc
+// functions regardless of their names:
+struct StrBuff
+{
+ void strcpy();
+ void strcpy(char* dst);
+ void memcpy(void *dst, const void *src, size_t size);
+};
+
+namespace NS {
+ void strcpy();
+ void strcpy(char* dst);
+ void memcpy(void *dst, const void *src, size_t size);
+}
+
+namespace std {
+ // class methods even in std namespace cannot be libc functions:
+ struct LibC
+ {
+ void strcpy();
+ void strcpy(char* dst);
+ void memcpy(void *dst, const void *src, size_t size);
+ };
+}
+
+void test(StrBuff& str)
+{
+ char buff[64];
+ str.strcpy();
+ str.strcpy(buff);
+ str.memcpy(buff, buff, 64);
+ NS::strcpy();
+ NS::strcpy(buff);
+ NS::memcpy(buff, buff, 64);
+
+ std::LibC LibC;
+
+ LibC.strcpy();
+ LibC.strcpy(buff);
+ LibC.memcpy(buff, buff, 64);
+}
diff --git a/clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip b/clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip
new file mode 100644
index 0000000..c3e7e1a
--- /dev/null
+++ b/clang/test/SemaHIP/amdgcnspirv-implicit-alloc-function-calling-conv.hip
@@ -0,0 +1,32 @@
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv32 -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64 -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64-amd-amdhsa -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv32 -aux-triple x86_64-unknown-linux-gnu -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64-amd-amdhsa -aux-triple x86_64-unknown-linux-gnu -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv32 -aux-triple x86_64-pc-windows-msvc -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64 -aux-triple x86_64-pc-windows-msvc -verify
+// RUN: %clang_cc1 %s -fcuda-is-device -std=c++17 -triple spirv64-amd-amdhsa -aux-triple x86_64-pc-windows-msvc -verify
+
+// expected-no-diagnostics
+
+namespace std
+{
+ enum class align_val_t : __SIZE_TYPE__ {};
+ struct nothrow_t { explicit nothrow_t() = default; };
+ extern nothrow_t const nothrow;
+}
+
+void* __attribute__((cdecl)) operator new(__SIZE_TYPE__);
+void* __attribute__((cdecl)) operator new[](__SIZE_TYPE__);
+void* __attribute__((cdecl)) operator new(__SIZE_TYPE__, ::std::align_val_t);
+void* __attribute__((cdecl)) operator new[](__SIZE_TYPE__, ::std::align_val_t);
+
+void __attribute__((cdecl)) operator delete(void*) noexcept;
+void __attribute__((cdecl)) operator delete[](void*) noexcept;
+void __attribute__((cdecl)) operator delete(void*, __SIZE_TYPE__) noexcept;
+void __attribute__((cdecl)) operator delete[](void*, __SIZE_TYPE__) noexcept;
+void __attribute__((cdecl)) operator delete(void*, ::std::align_val_t) noexcept;
+void __attribute__((cdecl)) operator delete[](void*, ::std::align_val_t) noexcept;
+void __attribute__((cdecl)) operator delete(void*, __SIZE_TYPE__, ::std::align_val_t) noexcept;
+void __attribute__((cdecl)) operator delete[](void*, __SIZE_TYPE__, ::std::align_val_t) noexcept;
diff --git a/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h b/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h
index 91753ca..9d5329c 100644
--- a/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h
+++ b/clang/test/SemaObjCXX/Inputs/nullability-pragmas-1.h
@@ -39,7 +39,7 @@ A *f14(void);
int * _Null_unspecified f15(void);
A * _Null_unspecified f16(void);
-void f17(CFErrorRef *error); // expected-note{{no known conversion from 'A * _Nonnull' to 'CFErrorRef _Nullable * _Nullable' (aka '__CFError **') for 1st argument}}
+void f17(CFErrorRef *error); // expected-note{{no known conversion from 'A * _Nonnull' to 'CFErrorRef _Nullable * _Nullable' (aka 'struct __CFError **') for 1st argument}}
void f18(A **); // expected-warning 2{{pointer is missing a nullability type specifier}}
// expected-note@-1 2 {{insert '_Nullable' if the pointer may be null}}
// expected-note@-2 2 {{insert '_Nonnull' if the pointer should never be null}}
diff --git a/clang/test/SemaObjCXX/arc-bridged-cast.mm b/clang/test/SemaObjCXX/arc-bridged-cast.mm
index 1f68897..67769eb 100644
--- a/clang/test/SemaObjCXX/arc-bridged-cast.mm
+++ b/clang/test/SemaObjCXX/arc-bridged-cast.mm
@@ -63,7 +63,7 @@ extern "C" const CFAnnotatedObjectRef r3 = &cf0;
void testExternC() {
id obj;
obj = (id)r0;
- obj = (id)r1; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
+ obj = (id)r1; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const struct __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
obj = (id)r2;
- obj = (id)r3; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
+ obj = (id)r3; // expected-error{{cast of C pointer type 'CFAnnotatedObjectRef' (aka 'const struct __CFAnnotatedObject *') to Objective-C pointer type 'id' requires a bridged cast}} expected-note{{use __bridge to convert directly}} expected-note{{use __bridge_transfer to transfer ownership of a +1 'CFAnnotatedObjectRef'}}
}
diff --git a/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm b/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
index 3b590c4..57aaa62 100644
--- a/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
+++ b/clang/test/SemaObjCXX/objc-boxed-expressions-nsvalue.mm
@@ -81,7 +81,7 @@ int main() {
id edge_insets_object = @(edge_insets);
SomeStruct s;
- id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+ id err = @(s); // expected-error{{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
NonTriviallyCopyable ntc;
id ntcErr = @(ntc); // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
@@ -99,7 +99,7 @@ SomeStruct getSomeStruct() {
void rvalue() {
id rv_rect = @(getRect());
- id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka '_SomeStruct') used in a boxed expression}}
+ id rv_some_struct = @(getSomeStruct()); // expected-error {{illegal type 'SomeStruct' (aka 'struct _SomeStruct') used in a boxed expression}}
}
template <class T> id box(T value) { return @(value); } // expected-error{{non-trivially copyable type 'NonTriviallyCopyable' cannot be used in a boxed expression}}
diff --git a/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm b/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm
index 0a86ee9..0d3751f 100644
--- a/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm
+++ b/clang/test/SemaObjCXX/objcbridge-attribute-arc.mm
@@ -26,10 +26,10 @@ typedef XXX *CFUColor2Ref;
typedef struct __attribute__((objc_bridge(NSTesting))) __CFError *CFTestingRef; // expected-note {{declared here}}
id Test1(CFTestingRef cf) {
- return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka '__CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
- // expected-error {{cast of C pointer type 'CFTestingRef' (aka '__CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka 'struct __CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
+ // expected-error {{cast of C pointer type 'CFTestingRef' (aka 'struct __CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka '__CFError *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka 'struct __CFError *') into ARC}}
}
typedef CFErrorRef CFErrorRef1;
@@ -52,105 +52,105 @@ typedef CFErrorRef1 CFErrorRef2; // expected-note 2 {{declared here}}
@class NSString;
void Test2(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}} \
- // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
- (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka '__CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}} \
+ // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka '__CFUPrimeColor *') into ARC}}
- (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') into ARC}}
+ (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \\
- // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \\
+ // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}} \\
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}} \\
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *}} \\
- // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *'}} \\
+ // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test3(CFErrorRef cf, NSError *ns) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka '__CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka 'struct __CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
}
void Test4(CFMyErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka '__CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
}
void Test5(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test6(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
}
typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErrorRef * CFMyPersonalErrorRef; // expected-note 1 {{declared here}}
@@ -159,51 +159,51 @@ typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErro
@end
void Test7(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
}
void Test8(CFMyPersonalErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
- // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
+ // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
}
void Test9(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}}
+ (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}}
(void)(__bridge NSError *)cf; // okay
(void)(__bridge NSUColor *)cf2; // okay
(void)(__bridge CFErrorRef)ns; // okay
- (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}}
- (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
+ (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}}
+ (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
diff --git a/clang/test/SemaObjCXX/objcbridge-attribute.mm b/clang/test/SemaObjCXX/objcbridge-attribute.mm
index 907163b..c04fba1 100644
--- a/clang/test/SemaObjCXX/objcbridge-attribute.mm
+++ b/clang/test/SemaObjCXX/objcbridge-attribute.mm
@@ -26,10 +26,10 @@ typedef XXX *CFUColor2Ref;
typedef struct __attribute__((objc_bridge(NSTesting))) __CFError *CFTestingRef; // expected-note {{declared here}}
id Test1(CFTestingRef cf) {
- return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka '__CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
- // expected-error {{cast of C pointer type 'CFTestingRef' (aka '__CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ return (NSString *)cf; // expected-error {{CF object of type 'CFTestingRef' (aka 'struct __CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
+ // expected-error {{cast of C pointer type 'CFTestingRef' (aka 'struct __CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka '__CFError *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFTestingRef' (aka 'struct __CFError *') into ARC}}
}
typedef CFErrorRef CFErrorRef1;
@@ -52,105 +52,105 @@ typedef CFErrorRef1 CFErrorRef2; // expected-note 2 {{declared here}}
@class NSString;
void Test2(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ (void)(NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(NSError *)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(MyError*)cf; // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
// expected-note {{__bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}} \
- // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
- (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka '__CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}} \
+ // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)(NSUColor *)cf2; // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka '__CFUPrimeColor *') into ARC}}
- (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') into ARC}}
+ (void)(CFErrorRef)ns; // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \\
- // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \\
+ // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}} \\
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}} \\
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \
- // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \
+ // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test3(CFErrorRef cf, NSError *ns) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka '__CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFErrorRef' (aka 'struct __CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
}
void Test4(CFMyErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka '__CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)(id<P1, P2, P4>)cf; // expected-warning {{'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
}
void Test5(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ (void)(CFErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(CFErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test6(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ (void)(CFMyErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
- (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
+ (void)(CFMyErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *')}}
}
typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErrorRef * CFMyPersonalErrorRef; // expected-note 1 {{declared here}}
@@ -159,52 +159,52 @@ typedef struct __attribute__ ((objc_bridge(MyPersonalError))) __CFMyPersonalErro
@end
void Test7(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ (void)(CFMyPersonalErrorRef)ID; // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P123; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P1234; // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P12; // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
- (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
+ (void)(CFMyPersonalErrorRef)P23; // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *')}}
+ // expected-note {{use __bridge_retained to make an ARC object available as a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *')}}
}
void Test8(CFMyPersonalErrorRef cf) {
- (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)(id)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4>)cf; // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
- (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
- // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
+ (void)(id<P1, P2, P3, P4, P5>)cf; // expected-warning {{'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') bridges to MyPersonalError, not 'id<P1,P2,P3,P4,P5>'}} \
+ // expected-error {{cast of C pointer type 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') to Objective-C pointer type 'id<P1,P2,P3,P4,P5>' requires a bridged cast}} \
// expected-note {{use __bridge to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka '__CFMyPersonalErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer to transfer ownership of a +1 'CFMyPersonalErrorRef' (aka 'struct __CFMyPersonalErrorRef *') into ARC}}
}
void Test9(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}}
+ (void)(__bridge NSString *)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}}
(void)(__bridge NSError *)cf; // okay
- (void)(__bridge MyError*)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)(__bridge MyError*)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
(void)(__bridge NSUColor *)cf2; // okay
(void)(__bridge CFErrorRef)ns; // okay
- (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}}
- (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}}
+ (void)(__bridge CFErrorRef)str; // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)(__bridge Class)cf; // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}}
+ (void)(__bridge CFErrorRef)c; // expected-warning {{'__unsafe_unretained Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
diff --git a/clang/test/SemaObjCXX/objcbridge-related-attribute.mm b/clang/test/SemaObjCXX/objcbridge-related-attribute.mm
index 66755f3..b835a10 100644
--- a/clang/test/SemaObjCXX/objcbridge-related-attribute.mm
+++ b/clang/test/SemaObjCXX/objcbridge-related-attribute.mm
@@ -13,14 +13,14 @@ typedef struct __attribute__((objc_bridge_related(NSColor,colorWithCGColor:,CGCo
NSColor *Test1(NSColor *nsColor, CGColorRef newColor) {
- nsColor = newColor; // expected-error {{'CGColorRef' (aka 'CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
- NSColor *ns = newColor; // expected-error {{'CGColorRef' (aka 'CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
- return newColor; // expected-error {{'CGColorRef' (aka 'CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
+ nsColor = newColor; // expected-error {{'CGColorRef' (aka 'struct CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
+ NSColor *ns = newColor; // expected-error {{'CGColorRef' (aka 'struct CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
+ return newColor; // expected-error {{'CGColorRef' (aka 'struct CGColor *') must be explicitly converted to 'NSColor *'; use '+colorWithCGColor:' method for this conversion}}
}
CGColorRef Test2(NSColor *newColor, CGColorRef cgColor) {
- cgColor = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'CGColor *'); use '-CGColor' method for this conversion}}
- CGColorRef cg = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'CGColor *'); use '-CGColor' method for this conversion}}
- return newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'CGColor *'); use '-CGColor' method for this conversion}}
+ cgColor = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'struct CGColor *'); use '-CGColor' method for this conversion}}
+ CGColorRef cg = newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'struct CGColor *'); use '-CGColor' method for this conversion}}
+ return newColor; // expected-error {{'NSColor *' must be explicitly converted to 'CGColorRef' (aka 'struct CGColor *'); use '-CGColor' method for this conversion}}
}
diff --git a/clang/test/SemaObjCXX/objcbridge-static-cast.mm b/clang/test/SemaObjCXX/objcbridge-static-cast.mm
index 6cb9137..ad939da 100644
--- a/clang/test/SemaObjCXX/objcbridge-static-cast.mm
+++ b/clang/test/SemaObjCXX/objcbridge-static-cast.mm
@@ -26,10 +26,10 @@ typedef XXX *CFUColor2Ref;
typedef struct __attribute__((objc_bridge(NSTesting))) __CFError *CFTestingRef; // expected-note {{declared here}}
id Test1(CFTestingRef cf) {
- return static_cast<NSString *>(cf); // expected-error {{CF object of type 'CFTestingRef' (aka '__CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
- // expected-error {{cast of C pointer type 'CFTestingRef' (aka '__CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ return static_cast<NSString *>(cf); // expected-error {{CF object of type 'CFTestingRef' (aka 'struct __CFError *') is bridged to 'NSTesting', which is not an Objective-C class}} \
+ // expected-error {{cast of C pointer type 'CFTestingRef' (aka 'struct __CFError *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFTestingRef' (aka '__CFError *') into ARC}}
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFTestingRef' (aka 'struct __CFError *') into ARC}}
}
typedef CFErrorRef CFErrorRef1;
@@ -52,84 +52,84 @@ typedef CFErrorRef1 CFErrorRef2; // expected-note 1 {{declared here}}
@class NSString;
void Test2(CFErrorRef2 cf, NSError *ns, NSString *str, Class c, CFUColor2Ref cf2) {
- (void)static_cast<NSString *>(cf); // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'NSString'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
+ (void)static_cast<NSString *>(cf); // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'NSString'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSString *' requires a bridged cast}} \
// expected-note {{__bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast<NSError *>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast<NSError *>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'NSError *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast<MyError*>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast<MyError*>(cf); // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'MyError *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}} \
- // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'MyError'}}
- (void)static_cast<NSUColor *>(cf2); // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka '__CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}} \
+ // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'MyError'}}
+ (void)static_cast<NSUColor *>(cf2); // expected-error {{cast of C pointer type 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') to Objective-C pointer type 'NSUColor *' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFUColor2Ref' (aka '__CFUPrimeColor *') into ARC}}
- (void)static_cast<CFErrorRef>(ns); // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFUColor2Ref' (aka 'union __CFUPrimeColor *') into ARC}}
+ (void)static_cast<CFErrorRef>(ns); // expected-error {{cast of Objective-C pointer type 'NSError *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(str); // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *')}} \\
- // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(str); // expected-warning {{'NSString' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *')}} \\
+ // expected-error {{cast of Objective-C pointer type 'NSString *' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<Class>(cf); // expected-warning {{'CFErrorRef2' (aka '__CFErrorRef *') bridges to NSError, not 'Class'}} \\
- // expected-error {{cast of C pointer type 'CFErrorRef2' (aka '__CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<Class>(cf); // expected-warning {{'CFErrorRef2' (aka 'struct __CFErrorRef *') bridges to NSError, not 'Class'}} \\
+ // expected-error {{cast of C pointer type 'CFErrorRef2' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'Class' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast<CFErrorRef>(c); // expected-warning {{'Class' cannot bridge to 'CFErrorRef' (aka '__CFErrorRef *}} \\
- // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef2' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast<CFErrorRef>(c); // expected-warning {{'Class' cannot bridge to 'CFErrorRef' (aka 'struct __CFErrorRef *'}} \\
+ // expected-error {{cast of Objective-C pointer type 'Class' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
void Test3(CFErrorRef cf, NSError *ns) {
- (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFErrorRef' (aka '__CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFErrorRef' (aka '__CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFErrorRef' (aka 'struct __CFErrorRef *') bridges to NSError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka '__CFErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFErrorRef' (aka 'struct __CFErrorRef *') into ARC}}
}
void Test4(CFMyErrorRef cf) {
- (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
+ (void)static_cast<id>(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P2, P3> >(cf); // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P2,P3>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
- (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFMyErrorRef' (aka '__CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
- // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka '__CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
+ (void)static_cast< id<P1, P2, P4> >(cf); // expected-warning {{'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') bridges to MyError, not 'id<P1,P2,P4>'}} \
+ // expected-error {{cast of C pointer type 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') to Objective-C pointer type 'id<P1,P2,P4>' requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka '__CFMyErrorRef *') into ARC}}
+ // expected-note {{use __bridge_transfer with C-style cast to transfer ownership of a +1 'CFMyErrorRef' (aka 'struct __CFMyErrorRef *') into ARC}}
}
void Test5(id<P1, P2, P3> P123, id ID, id<P1, P2, P3, P4> P1234, id<P1, P2> P12, id<P2, P3> P23) {
- (void)static_cast<CFErrorRef>(ID); // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ (void)static_cast<CFErrorRef>(ID); // expected-error {{cast of Objective-C pointer type 'id' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P123); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P123); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P1234); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P1234); // expected-error {{cast of Objective-C pointer type 'id<P1,P2,P3,P4>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P12); // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P12); // expected-error {{cast of Objective-C pointer type 'id<P1,P2>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
- (void)static_cast<CFErrorRef>(P23); // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka '__CFErrorRef *') requires a bridged cast}} \
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
+ (void)static_cast<CFErrorRef>(P23); // expected-error {{cast of Objective-C pointer type 'id<P2,P3>' to C pointer type 'CFErrorRef' (aka 'struct __CFErrorRef *') requires a bridged cast}} \
// expected-note {{use __bridge with C-style cast to convert directly (no change in ownership)}} \
- // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka '__CFErrorRef *')}}
+ // expected-note {{use __bridge_retained with C-style cast to make an ARC object available as a +1 'CFErrorRef' (aka 'struct __CFErrorRef *')}}
}
diff --git a/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp b/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp
index a04fcdd..cdd6bc7 100644
--- a/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp
+++ b/clang/test/SemaOpenACC/combined-construct-num_workers-ast.cpp
@@ -66,7 +66,7 @@ void NormalUses() {
// CHECK-NEXT: ImplicitCastExpr{{.*}} 'int' <UserDefinedConversion>
// CHECK-NEXT: CXXMemberCallExpr{{.*}}'int'
// CHECK-NEXT: MemberExpr{{.*}} '<bound member function type>' .operator int
- // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert':'CorrectConvert' lvalue Var
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert' lvalue Var
// CHECK-NEXT: ForStmt
// CHECK: NullStmt
}
diff --git a/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp b/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp
index 58476df..63c5cde 100644
--- a/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-intexpr-clause-ast.cpp
@@ -73,7 +73,7 @@ void NormalUses() {
// CHECK-NEXT: ImplicitCastExpr{{.*}} 'int' <UserDefinedConversion>
// CHECK-NEXT: CXXMemberCallExpr{{.*}}'int'
// CHECK-NEXT: MemberExpr{{.*}} '<bound member function type>' .operator int
- // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert':'CorrectConvert' lvalue Var
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'struct CorrectConvert' lvalue Var
// CHECK-NEXT: WhileStmt
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: CompoundStmt
diff --git a/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp b/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp
index e3c4184..2d2b20f 100644
--- a/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-varlist-ast.cpp
@@ -102,7 +102,7 @@ void NormalUses(float *PointerParam) {
#pragma acc parallel copy(GlobalArray) pcopy(always: PointerParam[Global]) present_or_copy(alwaysin, alwaysout: Global)
while(true);
// CHECK-NEXT: OpenACCComputeConstruct{{.*}} parallel
- // CHECK-NEXT: copy clause
+ // CHECK-NEXT: copy clause
// CHECK-NEXT: DeclRefExpr{{.*}}'short[5]' lvalue Var{{.*}}'GlobalArray' 'short[5]'
// CHECK-NEXT: pcopy clause modifiers: always
// CHECK-NEXT: ArraySubscriptExpr{{.*}}'float' lvalue
@@ -649,7 +649,7 @@ void TemplUses(T t, U u, T*PointerParam) {
struct S {
// CHECK-NEXT: CXXRecordDecl{{.*}} struct S definition
- // CHECK: CXXRecordDecl{{.*}} implicit struct S
+ // CHECK: CXXRecordDecl{{.*}} implicit {{.*}}struct S{{$}}
int ThisMember;
// CHECK-NEXT: FieldDecl{{.*}} ThisMember 'int'
int *ThisMemberPtr;
diff --git a/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp b/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp
index 2ecd269..bf153b2 100644
--- a/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp
+++ b/clang/test/SemaTemplate/aggregate-deduction-candidate.cpp
@@ -15,7 +15,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for A>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced class depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> A<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> Basic::A<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'T'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (double, double) -> Basic::A<double>'
@@ -23,8 +23,8 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'double'
// CHECK: |-ParmVarDecl {{.*}} 'double'
// CHECK: `-ParmVarDecl {{.*}} 'double'
- // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> A<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> Basic::A<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::A<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: |-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'T'
@@ -66,7 +66,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for C>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for C>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for C> 'auto (S<T>, T) -> C<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for C> 'auto (S<T>, T) -> Basic::C<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'S<T>'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for C> 'auto (S<int>, int) -> Basic::C<int>'
@@ -74,25 +74,24 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'S<int>':'Basic::S<int>'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (S<T>, T) -> C<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'C<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (S<T>, T) -> Basic::C<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::C<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'C'
- // CHECK: |-ElaboratedType {{.*}} 'S<T>' sugar dependent
- // CHECK: | `-TemplateSpecializationType {{.*}} 'S<T>' dependent
- // CHECK: | `-TemplateArgument type 'T'
- // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
- // CHECK: | `-TemplateTypeParm {{.*}} 'T'
+ // CHECK: |-TemplateSpecializationType {{.*}} 'S<T>' dependent
+ // CHECK: | `-TemplateArgument type 'T'
+ // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
+ // CHECK: | `-TemplateTypeParm {{.*}} 'T'
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: `-TemplateTypeParm {{.*}} 'T'
// CHECK-LABEL: Dumping Basic::<deduction guide for D>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for D>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: `-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for D> 'auto (int, int) -> D<T>'
+ // CHECK: `-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for D> 'auto (int, int) -> Basic::D<T>'
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (int, int) -> D<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'D<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (int, int) -> Basic::D<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::D<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'D'
// CHECK: |-SubstTemplateTypeParmType {{.*}} 'int' sugar typename depth 0 index 0 T
// CHECK: | |-ClassTemplateSpecialization {{.*}} 'S'
@@ -113,7 +112,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for E>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for E>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for E> 'auto (T, decltype(t)) -> E<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for E> 'auto (T, decltype(t)) -> Basic::E<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'T'
// CHECK: | `-ParmVarDecl {{.*}} 'decltype(t)'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for E> 'auto (int, decltype(t)) -> Basic::E<int>'
@@ -121,8 +120,8 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: `-ParmVarDecl {{.*}} 'decltype(t)':'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (T, decltype(t)) -> E<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'E<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T, decltype(t)) -> Basic::E<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::E<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'E'
// CHECK: |-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'T'
@@ -145,7 +144,7 @@ namespace Basic {
// CHECK-LABEL: Dumping Basic::<deduction guide for F>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for F>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (typename I<T>::type, T) -> F<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (typename I<T>::type, T) -> Basic::F<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'typename I<T>::type'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for F> 'auto (typename I<int>::type, int) -> Basic::F<int>'
@@ -153,8 +152,8 @@ namespace Basic {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'typename I<int>::type':'int'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (typename I<T>::type, T) -> F<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'F<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (typename I<T>::type, T) -> Basic::F<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Basic::F<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'F'
// CHECK: |-DependentNameType {{.*}} 'typename I<T>::type' dependent
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
@@ -181,15 +180,15 @@ namespace Array {
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
// CHECK: |-NonTypeTemplateParmDecl {{.*}} 'size_t':'unsigned {{.*}}' depth 0 index 1 N
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T (&&)[N]) -> A<T, N>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T (&&)[N]) -> Array::A<T, N>'
// CHECK: | `-ParmVarDecl {{.*}} 'T (&&)[N]'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (int (&&)[3]) -> Array::A<int, 3>'
// CHECK: |-TemplateArgument type 'int'
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-TemplateArgument integral '3UL'
// CHECK: `-ParmVarDecl {{.*}} 'int (&&)[3]'
- // CHECK: FunctionProtoType {{.*}} 'auto (T (&&)[N]) -> A<T, N>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T, N>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T (&&)[N]) -> Array::A<T, N>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Array::A<T, N>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: `-RValueReferenceType {{.*}} 'T (&&)[N]' dependent
// CHECK: `-DependentSizedArrayType {{.*}} 'T[N]' dependent
@@ -201,15 +200,15 @@ namespace Array {
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
// CHECK: |-NonTypeTemplateParmDecl {{.*}} 'size_t':'unsigned {{.*}}' depth 0 index 1 N
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (const T (&)[N]) -> A<T, N>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (const T (&)[N]) -> Array::A<T, N>'
// CHECK: | `-ParmVarDecl {{.*}} 'const T (&)[N]'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (const char (&)[5]) -> Array::A<char, 5>'
// CHECK: |-TemplateArgument type 'char'
// CHECK: | `-BuiltinType {{.*}} 'char'
// CHECK: |-TemplateArgument integral '5UL'
// CHECK: `-ParmVarDecl {{.*}} 'const char (&)[5]'
- // CHECK: FunctionProtoType {{.*}} 'auto (const T (&)[N]) -> A<T, N>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T, N>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (const T (&)[N]) -> Array::A<T, N>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'Array::A<T, N>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: `-LValueReferenceType {{.*}} 'const T (&)[N]' dependent
// CHECK: `-QualType {{.*}} 'const T[N]' const
@@ -232,7 +231,7 @@ namespace BraceElision {
// CHECK-LABEL: Dumping BraceElision::<deduction guide for A>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> A<T>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T, T) -> BraceElision::A<T>'
// CHECK: | |-ParmVarDecl {{.*}} 'T'
// CHECK: | `-ParmVarDecl {{.*}} 'T'
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A> 'auto (int, int) -> BraceElision::A<int>'
@@ -240,8 +239,8 @@ namespace BraceElision {
// CHECK: | `-BuiltinType {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: `-ParmVarDecl {{.*}} 'int'
- // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> A<T>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T, T) -> BraceElision::A<T>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'BraceElision::A<T>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: |-TemplateTypeParmType {{.*}} 'T' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'T'
@@ -266,7 +265,7 @@ namespace TrailingPack {
// CHECK-LABEL: Dumping TrailingPack::<deduction guide for A>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for A>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 ... T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T...) -> A<T...>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for A> 'auto (T...) -> TrailingPack::A<T...>'
// CHECK: | `-ParmVarDecl {{.*}} 'T...' pack
// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for A>
// CHECK-SAME: 'auto (TrailingPack::(lambda at {{.*}}), TrailingPack::(lambda at {{.*}})) ->
@@ -274,14 +273,14 @@ namespace TrailingPack {
// CHECK: |-TemplateArgument pack
// CHECK: | |-TemplateArgument type 'TrailingPack::(lambda at {{.*}})'
// CHECK: | | `-RecordType {{.*}} 'TrailingPack::(lambda at {{.*}})'
- // CHECK: | | `-CXXRecord {{.*}} <line:262:5>
+ // CHECK: | | `-CXXRecord {{.*}} <line:261:5>
// CHECK: | `-TemplateArgument type 'TrailingPack::(lambda at {{.*}})'
// CHECK: | `-RecordType {{.*}} 'TrailingPack::(lambda at {{.*}})'
- // CHECK: | `-CXXRecord {{.*}} <line:263:5>
+ // CHECK: | `-CXXRecord {{.*}} <line:262:5>
// CHECK: |-ParmVarDecl {{.*}} 'TrailingPack::(lambda at {{.*}})'
// CHECK: `-ParmVarDecl {{.*}} 'TrailingPack::(lambda at {{.*}})'
- // CHECK: FunctionProtoType {{.*}} 'auto (T...) -> A<T...>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'A<T...>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (T...) -> TrailingPack::A<T...>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'TrailingPack::A<T...>' dependent
// CHECK: | `-CXXRecord {{.*}} 'A'
// CHECK: `-PackExpansionType {{.*}} 'T...' dependent
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
@@ -327,7 +326,7 @@ namespace DeduceArity {
// CHECK-LABEL: Dumping DeduceArity::<deduction guide for F>:
// CHECK: FunctionTemplateDecl {{.*}} implicit <deduction guide for F>
// CHECK: |-TemplateTypeParmDecl {{.*}} referenced typename depth 0 index 0 ... T
- // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (Types<T...>, T...) -> F<T...>'
+ // CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for F> 'auto (Types<T...>, T...) -> DeduceArity::F<T...>'
// CHECK: | |-ParmVarDecl {{.*}} 'Types<T...>'
// CHECK: | `-ParmVarDecl {{.*}} 'T...' pack
// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for F>
@@ -354,15 +353,14 @@ namespace DeduceArity {
// CHECK: | `-CXXRecord {{.*}} 'X'
// CHECK: |-ParmVarDecl {{.*}} 'Types<DeduceArity::X>':'DeduceArity::Types<DeduceArity::X>'
// CHECK: `-ParmVarDecl {{.*}} 'DeduceArity::X'
- // CHECK: FunctionProtoType {{.*}} 'auto (Types<T...>, T...) -> F<T...>' dependent trailing_return cdecl
- // CHECK: |-InjectedClassNameType {{.*}} 'F<T...>' dependent
+ // CHECK: FunctionProtoType {{.*}} 'auto (Types<T...>, T...) -> DeduceArity::F<T...>' dependent trailing_return cdecl
+ // CHECK: |-InjectedClassNameType {{.*}} 'DeduceArity::F<T...>' dependent
// CHECK: | `-CXXRecord {{.*}} 'F'
- // CHECK: |-ElaboratedType {{.*}} 'Types<T...>' sugar dependent
- // CHECK: | `-TemplateSpecializationType {{.*}} 'Types<T...>' dependent
- // CHECK: | `-TemplateArgument type 'T...'
- // CHECK: | `-PackExpansionType {{.*}} 'T...' dependent
- // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
- // CHECK: | `-TemplateTypeParm {{.*}} 'T'
+ // CHECK: |-TemplateSpecializationType {{.*}} 'Types<T...>' dependent
+ // CHECK: | `-TemplateArgument type 'T...'
+ // CHECK: | `-PackExpansionType {{.*}} 'T...' dependent
+ // CHECK: | `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
+ // CHECK: | `-TemplateTypeParm {{.*}} 'T'
// CHECK: `-PackExpansionType {{.*}} 'T...' dependent
// CHECK: `-TemplateTypeParmType {{.*}} 'T' dependent contains_unexpanded_pack depth 0 index 0 pack
// CHECK: `-TemplateTypeParm {{.*}} 'T'
diff --git a/clang/test/SemaTemplate/ctad.cpp b/clang/test/SemaTemplate/ctad.cpp
index 1bf605f..49c4615 100644
--- a/clang/test/SemaTemplate/ctad.cpp
+++ b/clang/test/SemaTemplate/ctad.cpp
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -std=c++17 -verify %s
+// RUN: %clang_cc1 -std=c++17 -verify=expected,cxx17 %s
+// RUN: %clang_cc1 -std=c++20 -verify=expected,cxx20 %s
namespace pr41427 {
template <typename T> class A {
@@ -21,9 +22,9 @@ namespace Access {
struct type {};
};
template<typename T> struct D : B { // expected-note {{not viable}} \
- expected-note {{implicit deduction guide declared as 'template <typename T> D(D<T>) -> D<T>'}}
+ expected-note {{implicit deduction guide declared as 'template <typename T> D(Access::D<T>) -> Access::D<T>'}}
D(T, typename T::type); // expected-note {{private member}} \
- // expected-note {{implicit deduction guide declared as 'template <typename T> D(T, typename T::type) -> D<T>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename T> D(T, typename T::type) -> Access::D<T>'}}
};
D b = {B(), {}};
@@ -61,15 +62,31 @@ namespace NoCrashOnGettingDefaultArgLoc {
template <typename>
class A {
A(int = 1); // expected-note {{candidate template ignored: couldn't infer template argumen}} \
- // expected-note {{implicit deduction guide declared as 'template <typename> D(int = <null expr>) -> D<type-parameter-0-0>'}}
+ // expected-note {{implicit deduction guide declared as 'template <typename> D(int = <null expr>) -> NoCrashOnGettingDefaultArgLoc::D<type-parameter-0-0>'}}
};
class C : A<int> {
using A::A;
};
template <typename>
class D : C { // expected-note {{candidate function template not viable: requires 1 argument}} \
- expected-note {{implicit deduction guide declared as 'template <typename> D(D<type-parameter-0-0>) -> D<type-parameter-0-0>'}}
+ expected-note {{implicit deduction guide declared as 'template <typename> D(NoCrashOnGettingDefaultArgLoc::D<type-parameter-0-0>) -> NoCrashOnGettingDefaultArgLoc::D<type-parameter-0-0>'}}
using C::C;
};
D abc; // expected-error {{no viable constructor or deduction guide}}
}
+
+namespace AsValueParameter {
+ namespace foo {
+ // cxx17-note@+2 {{template is declared here}}
+ // cxx20-note@+1 {{'A<int>' is not literal because it is not an aggregate and has no constexpr constructors other than copy or move constructors}}
+ template <class> struct A {
+ A();
+ };
+ }
+ template <foo::A> struct B {}; // expected-note {{template parameter is declared here}}
+ // cxx17-error@-1 {{use of class template 'foo::A' requires template arguments; argument deduction not allowed in template parameter}}
+
+ template struct B<foo::A<int>{}>;
+ // cxx17-error@-1 {{value of type 'foo::A<int>' is not implicitly convertible to 'int'}}
+ // cxx20-error@-2 {{non-type template parameter has non-literal type 'foo::A<int>' (aka 'AsValueParameter::foo::A<int>')}}
+} // namespace AsValueParameter
diff --git a/clang/test/SemaTemplate/current-instantiation.cpp b/clang/test/SemaTemplate/current-instantiation.cpp
index 9030768..9214bbe 100644
--- a/clang/test/SemaTemplate/current-instantiation.cpp
+++ b/clang/test/SemaTemplate/current-instantiation.cpp
@@ -245,5 +245,5 @@ namespace RebuildDependentScopeDeclRefExpr {
};
template<typename T> N<X<T>::thing> X<T>::data() {}
// FIXME: We should issue a typo-correction here.
- template<typename T> N<X<T>::think> X<T>::foo() {} // expected-error {{no member named 'think' in 'X<T>'}}
+ template<typename T> N<X<T>::think> X<T>::foo() {} // expected-error {{no member named 'think' in 'RebuildDependentScopeDeclRefExpr::X<T>'}}
}
diff --git a/clang/test/SemaTemplate/deduction-crash.cpp b/clang/test/SemaTemplate/deduction-crash.cpp
index 287c61a..99ca0b3 100644
--- a/clang/test/SemaTemplate/deduction-crash.cpp
+++ b/clang/test/SemaTemplate/deduction-crash.cpp
@@ -166,9 +166,9 @@ namespace PR51872_part1 {
template<int> class T1 { template <struct U1> T1(); };
// expected-error@-1 {{non-type template parameter has incomplete type 'struct U1'}}
// expected-note@-2 {{forward declaration of 'PR51872_part1::U1'}}
- // expected-note@-3 {{implicit deduction guide declared as 'template <int> T1(T1<value-parameter-0-0>) -> T1<value-parameter-0-0>'}}
+ // expected-note@-3 {{implicit deduction guide declared as 'template <int> T1(PR51872_part1::T1<value-parameter-0-0>) -> PR51872_part1::T1<value-parameter-0-0>'}}
T1 t1 = 0;
// expected-error@-1 {{no viable constructor or deduction guide for deduction of template arguments of 'T1'}}
- // expected-note@-7 {{candidate template ignored: could not match 'T1<value-parameter-0-0>' against 'int'}}
+ // expected-note@-7 {{candidate template ignored: could not match 'PR51872_part1::T1<value-parameter-0-0>' against 'int'}}
}
diff --git a/clang/test/SemaTemplate/deduction-guide.cpp b/clang/test/SemaTemplate/deduction-guide.cpp
index f6bc6ee..ef9b91ca 100644
--- a/clang/test/SemaTemplate/deduction-guide.cpp
+++ b/clang/test/SemaTemplate/deduction-guide.cpp
@@ -43,11 +43,10 @@ using AT = A<int[3], int, int, short>;
// CHECK: `-ParmVarDecl {{.*}} 'short (*)[4]'
// CHECK: FunctionProtoType {{.*}} 'auto (X<Ps...>, Ts (*)[Ns]...) -> A<T, Ts...>' dependent trailing_return
// CHECK: |-InjectedClassNameType {{.*}} 'A<T, Ts...>' dependent
-// CHECK: |-ElaboratedType {{.*}} 'X<Ps...>' sugar dependent
-// CHECK: | `-TemplateSpecializationType {{.*}} 'X<Ps...>' dependent
-// CHECK: | `-TemplateArgument expr
-// CHECK: | `-PackExpansionExpr {{.*}} 'T *'
-// CHECK: | `-DeclRefExpr {{.*}} 'T *' NonTypeTemplateParm {{.*}} 'Ps' 'T *'
+// CHECK: |-TemplateSpecializationType {{.*}} 'X<Ps...>' dependent
+// CHECK: | `-TemplateArgument expr
+// CHECK: | `-PackExpansionExpr {{.*}} 'T *'
+// CHECK: | `-DeclRefExpr {{.*}} 'T *' NonTypeTemplateParm {{.*}} 'Ps' 'T *'
// CHECK: `-PackExpansionType {{.*}} 'Ts (*)[Ns]...' dependent
// CHECK: `-PointerType {{.*}} 'Ts (*)[Ns]' dependent contains_unexpanded_pack
// CHECK: `-ParenType {{.*}} 'Ts[Ns]' sugar dependent contains_unexpanded_pack
@@ -118,9 +117,8 @@ using CT = C<int>;
// CHECK: |-InjectedClassNameType {{.*}} 'C<A>' dependent
// CHECK: |-TemplateTypeParmType {{.*}} 'A' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'A'
-// CHECK: |-ElaboratedType {{.*}} 'Y<T>' sugar dependent
-// CHECK: | `-TemplateSpecializationType {{.*}} 'Y<T>' dependent
-// CHECK: | `-TemplateArgument template
+// CHECK: |-TemplateSpecializationType {{.*}} 'Y<T>' dependent
+// CHECK: | `-TemplateArgument template
// CHECK: `-TemplateTypeParmType {{.*}} 'U' dependent depth 0 index 2
template<typename ...T> struct D { // expected-note {{candidate}} \
@@ -321,24 +319,23 @@ namespace TTP {
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 0 T{{$}}
// CHECK-NEXT: |-TemplateTemplateParmDecl {{.+}} depth 0 index 1 TT{{$}}
// CHECK-NEXT: | `-TemplateTypeParmDecl {{.+}} class depth 1 index 0{{$}}
-// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} 'auto (TT<T>) -> B<T>'{{$}}
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} 'auto (TT<T>) -> TTP::B<T>'{{$}}
// CHECK-NEXT: | `-ParmVarDecl {{.+}} 'TT<T>'{{$}}
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} 'auto (A<int>) -> TTP::B<int>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} 'auto (TTP::A<int>) -> TTP::B<int>'
// CHECK-NEXT: |-TemplateArgument type 'int'
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'{{$}}
// CHECK-NEXT: |-TemplateArgument template 'TTP::A'{{$}}
// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A{{$}}
-// CHECK-NEXT: `-ParmVarDecl {{.+}} 'A<int>':'TTP::A<int>'{{$}}
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (TT<T>) -> B<T>' dependent trailing_return cdecl{{$}}
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'B<T>' dependent{{$}}
+// CHECK-NEXT: `-ParmVarDecl {{.+}} 'TTP::A<int>'{{$}}
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (TT<T>) -> TTP::B<T>' dependent trailing_return cdecl{{$}}
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TTP::B<T>' dependent{{$}}
// CHECK-NEXT: | `-CXXRecord {{.+}} 'B'{{$}}
-// CHECK-NEXT: `-ElaboratedType {{.+}} 'TT<T>' sugar dependent{{$}}
-// CHECK-NEXT: `-TemplateSpecializationType {{.+}} 'TT<T>' dependent{{$}}
-// CHECK-NEXT: |-name: 'TT':'template-parameter-0-1' qualified
-// CHECK-NEXT: | `-TemplateTemplateParmDecl {{.+}} depth 0 index 1
-// CHECK-NEXT: `-TemplateArgument type 'T':'type-parameter-0-0'{{$}}
-// CHECK-NEXT: `-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0{{$}}
-// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'T'{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType {{.+}} 'TT<T>' dependent{{$}}
+// CHECK-NEXT: |-name: 'TT':'template-parameter-0-1' qualified
+// CHECK-NEXT: | `-TemplateTemplateParmDecl {{.+}} depth 0 index 1
+// CHECK-NEXT: `-TemplateArgument type 'T':'type-parameter-0-0'{{$}}
+// CHECK-NEXT: `-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0{{$}}
+// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'T'{{$}}
namespace GH64625 {
@@ -351,14 +348,14 @@ X x = {{1, 2}};
// CHECK-LABEL: Dumping GH64625::<deduction guide for X>:
// CHECK-NEXT: FunctionTemplateDecl {{.+}} <{{.+}}:[[#@LINE - 7]]:1, col:27> col:27 implicit <deduction guide for X>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} <col:11, col:17> col:17 referenced class depth 0 index 0 T
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:27> col:27 implicit <deduction guide for X> 'auto (T (&&)[2]) -> X<T>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:27> col:27 implicit <deduction guide for X> 'auto (T (&&)[2]) -> GH64625::X<T>' aggregate
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:27> col:27 'T (&&)[2]'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <col:27> col:27 implicit used <deduction guide for X> 'auto (int (&&)[2]) -> GH64625::X<int>' implicit_instantiation aggregate
// CHECK-NEXT: |-TemplateArgument type 'int'
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:27> col:27 'int (&&)[2]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2]) -> X<T>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'X<T>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2]) -> GH64625::X<T>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::X<T>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'X'
// CHECK-NEXT: `-RValueReferenceType {{.+}} 'T (&&)[2]' dependent
// CHECK-NEXT: `-ConstantArrayType {{.+}} 'T[2]' dependent 2
@@ -375,7 +372,7 @@ TwoArrays ta = {{1, 2}, {3, 4, 5}};
// CHECK-NEXT: FunctionTemplateDecl {{.+}} <{{.+}}:[[#@LINE - 7]]:1, col:36> col:36 implicit <deduction guide for TwoArrays>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} <col:11, col:17> col:17 referenced class depth 0 index 0 T
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} <col:20, col:26> col:26 referenced class depth 0 index 1 U
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U (&&)[3]) -> TwoArrays<T, U>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U (&&)[3]) -> GH64625::TwoArrays<T, U>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T (&&)[2]'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:36> col:36 'U (&&)[3]'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit used <deduction guide for TwoArrays> 'auto (int (&&)[2], int (&&)[3]) -> GH64625::TwoArrays<int, int>' implicit_instantiation aggregate
@@ -385,8 +382,8 @@ TwoArrays ta = {{1, 2}, {3, 4, 5}};
// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int (&&)[2]'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:36> col:36 'int (&&)[3]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U (&&)[3]) -> TwoArrays<T, U>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TwoArrays<T, U>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U (&&)[3]) -> GH64625::TwoArrays<T, U>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::TwoArrays<T, U>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'TwoArrays'
// CHECK-NEXT: |-RValueReferenceType {{.+}} 'T (&&)[2]' dependent
// CHECK-NEXT: | `-ConstantArrayType {{.+}} 'T[2]' dependent 2
@@ -398,7 +395,7 @@ TwoArrays ta = {{1, 2}, {3, 4, 5}};
// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'U'
TwoArrays tb = {1, 2, {3, 4, 5}};
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T, T, U (&&)[3]) -> TwoArrays<T, U>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T, T, U (&&)[3]) -> GH64625::TwoArrays<T, U>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T'
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:36> col:36 'U (&&)[3]'
@@ -410,8 +407,8 @@ TwoArrays tb = {1, 2, {3, 4, 5}};
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:36> col:36 'int (&&)[3]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T, T, U (&&)[3]) -> TwoArrays<T, U>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TwoArrays<T, U>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T, T, U (&&)[3]) -> GH64625::TwoArrays<T, U>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::TwoArrays<T, U>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'TwoArrays'
// CHECK-NEXT: |-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0
// CHECK-NEXT: | `-TemplateTypeParm {{.+}} 'T'
@@ -423,7 +420,7 @@ TwoArrays tb = {1, 2, {3, 4, 5}};
// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'U'
TwoArrays tc = {{1, 2}, 3, 4, 5};
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U, U, U) -> TwoArrays<T, U>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:36> col:36 implicit <deduction guide for TwoArrays> 'auto (T (&&)[2], U, U, U) -> GH64625::TwoArrays<T, U>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'T (&&)[2]'
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'U'
// CHECK-NEXT: | |-ParmVarDecl {{.+}} <col:36> col:36 'U'
@@ -437,8 +434,8 @@ TwoArrays tc = {{1, 2}, 3, 4, 5};
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: |-ParmVarDecl {{.+}} <col:36> col:36 'int'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:36> col:36 'int'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U, U, U) -> TwoArrays<T, U>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'TwoArrays<T, U>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (T (&&)[2], U, U, U) -> GH64625::TwoArrays<T, U>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH64625::TwoArrays<T, U>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'TwoArrays'
// CHECK-NEXT: |-RValueReferenceType {{.+}} 'T (&&)[2]' dependent
// CHECK-NEXT: | `-ConstantArrayType {{.+}} 'T[2]' dependent 2
@@ -464,13 +461,13 @@ A a{.f1 = {1}};
// CHECK-LABEL: Dumping GH83368::<deduction guide for A>:
// CHECK-NEXT: FunctionTemplateDecl 0x{{.+}} <{{.+}}:[[#@LINE - 7]]:1, col:25> col:25 implicit <deduction guide for A>
// CHECK-NEXT: |-NonTypeTemplateParmDecl {{.+}} <col:11, col:15> col:15 referenced 'int' depth 0 index 0 N
-// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:25> col:25 implicit <deduction guide for A> 'auto (int (&&)[N]) -> A<N>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} <col:25> col:25 implicit <deduction guide for A> 'auto (int (&&)[N]) -> GH83368::A<N>' aggregate
// CHECK-NEXT: | `-ParmVarDecl {{.+}} <col:25> col:25 'int (&&)[N]'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <col:25> col:25 implicit used <deduction guide for A> 'auto (int (&&)[1]) -> GH83368::A<1>' implicit_instantiation aggregate
// CHECK-NEXT: |-TemplateArgument integral '1'
// CHECK-NEXT: `-ParmVarDecl {{.+}} <col:25> col:25 'int (&&)[1]'
-// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (int (&&)[N]) -> A<N>' dependent trailing_return
-// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'A<N>' dependent
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (int (&&)[N]) -> GH83368::A<N>' dependent trailing_return
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'GH83368::A<N>' dependent
// CHECK-NEXT: | `-CXXRecord {{.+}} 'A'
// CHECK-NEXT: `-RValueReferenceType {{.+}} 'int (&&)[N]' dependent
// CHECK-NEXT: `-DependentSizedArrayType {{.+}} 'int[N]' dependent
@@ -512,7 +509,7 @@ A a(42);
// CHECK-NEXT: | | `-TemplateTypeParm 0x{{.+}} 'Ts'
// CHECK-NEXT: | `-ImplicitCastExpr {{.+}} <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral 0x{{.+}} <{{.+}}> 'int' 0
-// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> line:{{.+}} implicit <deduction guide for A> 'auto (T, Ts...) -> A<T>'
+// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> line:{{.+}} implicit <deduction guide for A> 'auto (T, Ts...) -> GH60777::A<T>'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <{{.+}}> col:{{.+}} val 'T'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <{{.+}}> col:{{.+}} tail 'Ts...' pack
// CHECK-NEXT: | `-BinaryOperator 0x{{.+}} <{{.+}}> 'bool' '&&'
@@ -541,9 +538,9 @@ B b(42, 43);
// expected-error@-1 {{no viable constructor}} \
// expected-note@-6 {{constraints not satisfied}} \
// expected-note@-5 {{because substituted constraint expression is ill-formed}} \
-// expected-note@-6 {{implicit deduction guide declared as 'template <typename T, typename ...Ts> B(T val, Ts ...tail) -> B<T> requires (True<tail...>())'}} \
+// expected-note@-6 {{implicit deduction guide declared as 'template <typename T, typename ...Ts> B(T val, Ts ...tail) -> GH60777::B<T> requires (True<tail...>())'}} \
// expected-note@-8 {{function template not viable}} \
-// expected-note@-8 {{implicit deduction guide declared as 'template <typename T> B(B<T>) -> B<T>'}}
+// expected-note@-8 {{implicit deduction guide declared as 'template <typename T> B(GH60777::B<T>) -> GH60777::B<T>'}}
} // namespace GH60777
@@ -572,7 +569,7 @@ static_assert(x.size == 4);
// CHECK-NEXT: FunctionTemplateDecl 0x{{.+}} <{{.+}}> col:13 implicit <deduction guide for X>
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <{{.+}}> col:17 referenced class depth 0 index 0 T
// CHECK-NEXT: |-TemplateTypeParmDecl 0x{{.+}} <{{.+}}> col:19 class depth 0 index 1 U
-// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> col:13 implicit <deduction guide for X> 'auto (T, U (&)[3]) -> X<T>'
+// CHECK-NEXT: |-CXXDeductionGuideDecl 0x{{.+}} <{{.+}}> col:13 implicit <deduction guide for X> 'auto (T, U (&)[3]) -> GH98592::X<T>'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <col:15> col:16 'T'
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <col:18, col:24> col:21 'U (&)[3]'
// CHECK-NEXT: | `-ConceptSpecializationExpr 0x{{.+}} <col:36, col:42> 'bool' Concept 0x{{.+}} 'True'
@@ -719,7 +716,7 @@ void test() { NewDeleteAllocator abc(42); } // expected-error {{no viable constr
// CHECK-NEXT: | |-inherited from TemplateTypeParm {{.+}} depth 0 index 0
// CHECK-NEXT: | `-BuiltinType {{.+}} 'void'
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} typename depth 0 index 1 T
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <deduction guide for NewDeleteAllocator> 'auto (T) -> NewDeleteAllocator<type-parameter-0-0>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} <deduction guide for NewDeleteAllocator> 'auto (T) -> GH128691::NewDeleteAllocator<type-parameter-0-0>'
// CHECK-NEXT: `-ParmVarDecl {{.+}} 'T'
} // namespace GH128691
@@ -745,7 +742,7 @@ B b(24);
// CHECK-NEXT: FunctionTemplateDecl {{.+}} implicit <deduction guide for B>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} typename depth 0 index 0
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 1 U
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for B> 'auto (U) -> B<type-parameter-0-0>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for B> 'auto (U) -> GH132616_DeductionGuide::B<type-parameter-0-0>'
// CHECK-NEXT: `-ParmVarDecl {{.+}} 'U'
struct C {
@@ -767,7 +764,7 @@ D d(24);
// CHECK-NEXT: FunctionTemplateDecl {{.+}} implicit <deduction guide for D>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} typename depth 0 index 0
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 1 U
-// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for D> 'auto (U) -> D<type-parameter-0-0>'
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for D> 'auto (U) -> GH132616_DeductionGuide::D<type-parameter-0-0>'
// CHECK-NEXT: `-ParmVarDecl {{.+}} 'U'
} // namespace GH132616_DeductionGuide
@@ -859,13 +856,12 @@ CC c{};
// CHECK-NEXT: | `-IntegerLiteral {{.+}} 'int' 42
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 1 U
// CHECK-NEXT: | `-TemplateArgument type 'A<decltype(N)>'
-// CHECK-NEXT: | `-ElaboratedType {{.+}} 'A<decltype(N)>' sugar dependent
-// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'A<decltype(N)>' dependent
-// CHECK-NEXT: | |-name: 'A':'GH133132::A' qualified
-// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} A
-// CHECK-NEXT: | `-TemplateArgument type 'decltype(N)'
-// CHECK-NEXT: | `-DecltypeType {{.+}} 'decltype(N)' dependent
-// CHECK-NEXT: | `-DeclRefExpr {{.+}} 'int' NonTypeTemplateParm {{.+}} 'N' 'int'
+// CHECK-NEXT: | `-TemplateSpecializationType {{.+}} 'A<decltype(N)>' dependent
+// CHECK-NEXT: | |-name: 'A':'GH133132::A' qualified
+// CHECK-NEXT: | | `-ClassTemplateDecl {{.+}} A
+// CHECK-NEXT: | `-TemplateArgument type 'decltype(N)'
+// CHECK-NEXT: | `-DecltypeType {{.+}} 'decltype(N)' dependent
+// CHECK-NEXT: | `-DeclRefExpr {{.+}} 'int' NonTypeTemplateParm {{.+}} 'N' 'int'
// CHECK-NEXT: |-TypeTraitExpr {{.+}} 'bool' __is_deducible
// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.+}} 'GH133132::CC' dependent
// CHECK-NEXT: | | `-name: 'GH133132::CC'
@@ -903,7 +899,7 @@ void f() {
// CHECK-LABEL: Dumping GH67173::<deduction guide for Vec2d>:
// CHECK-NEXT: FunctionTemplateDecl {{.+}} implicit <deduction guide for Vec2d>
// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} referenced class depth 0 index 0 T
-// CHECK: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for Vec2d> 'auto (T, T) -> Vec2d<T>' aggregate
+// CHECK: |-CXXDeductionGuideDecl {{.+}} implicit <deduction guide for Vec2d> 'auto (T, T) -> GH67173::Vec2d<T>' aggregate
// CHECK-NEXT: | |-ParmVarDecl {{.+}} col:27 'T'
// CHECK-NEXT: | `-ParmVarDecl {{.+}} col:27 'T'
// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} implicit used <deduction guide for Vec2d> 'auto (int, int) -> GH67173::Vec2d<int>' implicit_instantiation aggregate
diff --git a/clang/test/SemaTemplate/dependent-base-classes.cpp b/clang/test/SemaTemplate/dependent-base-classes.cpp
index b4b2669..b1c6102 100644
--- a/clang/test/SemaTemplate/dependent-base-classes.cpp
+++ b/clang/test/SemaTemplate/dependent-base-classes.cpp
@@ -32,7 +32,7 @@ namespace PR6031 {
template <class TT>
struct FI2
{
- C<typename FI2::type> a; // expected-error{{no type named 'type' in 'FI2<TT>'}}
+ C<typename FI2::type> a; // expected-error{{no type named 'type' in 'PR6031::FI2<TT>'}}
};
template<typename T>
@@ -54,9 +54,9 @@ namespace PR6031 {
template<typename T>
struct NoDepBase {
int foo() {
- class NoDepBase::Nested nested; // expected-error{{no class named 'Nested' in 'NoDepBase<T>'}}
- typedef typename NoDepBase::template MemberTemplate<T>::type type; // expected-error{{no member named 'MemberTemplate' in 'NoDepBase<T>'}}
- return NoDepBase::a; // expected-error{{no member named 'a' in 'NoDepBase<T>'}}
+ class NoDepBase::Nested nested; // expected-error{{no class named 'Nested' in 'PR6031::NoDepBase<T>'}}
+ typedef typename NoDepBase::template MemberTemplate<T>::type type; // expected-error{{no member named 'MemberTemplate' in 'PR6031::NoDepBase<T>'}}
+ return NoDepBase::a; // expected-error{{no member named 'a' in 'PR6031::NoDepBase<T>'}}
}
};
}
@@ -102,7 +102,7 @@ namespace PR6081 {
template< class X >
void f0(const X & k)
{
- this->template f1<int>()(k); // expected-error{{no member named 'f1' in 'C<T>'}}
+ this->template f1<int>()(k); // expected-error{{no member named 'f1' in 'PR6081::C<T>'}}
}
};
}
diff --git a/clang/test/SemaTemplate/dependent-names.cpp b/clang/test/SemaTemplate/dependent-names.cpp
index 54ce376..d6bd670 100644
--- a/clang/test/SemaTemplate/dependent-names.cpp
+++ b/clang/test/SemaTemplate/dependent-names.cpp
@@ -336,7 +336,7 @@ template < unsigned > struct X {
static const unsigned dimension = 3;
template<unsigned dim=dimension>
struct Y: Y<dim> { }; // expected-error{{base class has incomplete type}}
- // expected-note@-1{{definition of 'Y<dim>' is not complete until the closing '}'}}
+ // expected-note@-1{{definition of 'PR11421::X::Y<dim>' is not complete until the closing '}'}}
};
typedef X<3> X3;
X3::Y<>::iterator it; // expected-error {{no type named 'iterator' in 'PR11421::X<3>::Y<>'}}
diff --git a/clang/test/SemaTemplate/elaborated-type-specifier.cpp b/clang/test/SemaTemplate/elaborated-type-specifier.cpp
index 95c2aa9..ce6e258 100644
--- a/clang/test/SemaTemplate/elaborated-type-specifier.cpp
+++ b/clang/test/SemaTemplate/elaborated-type-specifier.cpp
@@ -79,21 +79,18 @@ namespace canon {
// expected-note@#canon-t3-3 {{candidate function}}
template <class T> constexpr int t4(typename T::template X<int>* = 0) { return 0; }
- // expected-note@-1 3{{candidate function}}
+ // expected-note@-1 2{{candidate function}}
template <class T> constexpr int t4(struct T::template X<int>* = 0) { return 1; }
- // expected-note@-1 3{{candidate function}}
+ // expected-note@-1 2{{candidate function}}
template <class T> constexpr int t4(union T::template X<int>* = 0) { return 2; }
- // expected-note@-1 3{{candidate function}}
+ // expected-note@-1 2{{candidate function}}
- // FIXME: This should work.
struct E { template <class T> using X = T; };
- static_assert(t4<E>() == 0); // expected-error {{call to 't4' is ambiguous}}
+ static_assert(t4<E>() == 0);
- // FIXME: Should not match the union overload.
struct F { template <class> struct X {}; };
static_assert(t4<F>() == 1); // expected-error {{call to 't4' is ambiguous}}
- // FIXME: Should not match the struct overload.
struct G { template <class> union X {}; };
static_assert(t4<G>() == 2); // expected-error {{call to 't4' is ambiguous}}
} // namespace canon
diff --git a/clang/test/SemaTemplate/instantiate-requires-expr.cpp b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
index 47689b9..e60f792 100644
--- a/clang/test/SemaTemplate/instantiate-requires-expr.cpp
+++ b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
@@ -72,8 +72,8 @@ namespace type_requirement {
template<typename T> requires
false_v<requires { typename T::template temp<T>; }>
- // expected-note@-1 {{because 'false_v<requires { typename contains_template<int>::template temp<type_requirement::contains_template<int>>; }>' evaluated to false}}
- // expected-note@-2 {{because 'false_v<requires { typename contains_template<short>::template temp<type_requirement::contains_template<short>>; }>' evaluated to false}}
+ // expected-note@-1 {{because 'false_v<requires { typename type_requirement::contains_template<int>::template temp<type_requirement::contains_template<int>>; }>' evaluated to false}}
+ // expected-note@-2 {{because 'false_v<requires { typename type_requirement::contains_template<short>::template temp<type_requirement::contains_template<short>>; }>' evaluated to false}}
struct r2 {};
using r2i1 = r2<contains_template<int>>; // expected-error{{constraints not satisfied for class template 'r2' [with T = type_requirement::contains_template<int>]}}
diff --git a/clang/test/SemaTemplate/make_integer_seq.cpp b/clang/test/SemaTemplate/make_integer_seq.cpp
index cd36d1e..1203a58 100644
--- a/clang/test/SemaTemplate/make_integer_seq.cpp
+++ b/clang/test/SemaTemplate/make_integer_seq.cpp
@@ -4,116 +4,110 @@ template <class A1, A1... A2> struct A {};
using test1 = __make_integer_seq<A, int, 1>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:5:1, col:43> col:7 test1 '__make_integer_seq<A, int, 1>':'A<int, 0>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar alias
+// CHECK-NEXT: |-name: '__make_integer_seq' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'A'
+// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: |-TemplateArgument expr '1'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | |-value: Int 1
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>' sugar
+// CHECK-NEXT: |-name: 'A' qualified
+// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: |-TemplateArgument expr '0'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | |-value: Int 0
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 0
+// CHECK-NEXT: `-RecordType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>'
+// CHECK-NEXT: `-ClassTemplateSpecialization 0x{{[0-9A-Fa-f]+}} 'A'
+
+template <class B1, B1 B2> using B = __make_integer_seq<A, B1, B2>;
+using test2 = B<int, 1>;
+// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:23> col:7 test2 'B<int, 1>':'A<int, 0>'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'B<int, 1>' sugar alias
+// CHECK-NEXT: |-name: 'B' qualified
+// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} B
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: |-TemplateArgument expr '1'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:22> 'int'
+// CHECK-NEXT: | |-value: Int 1
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:22> 'int' 1
// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar alias
// CHECK-NEXT: |-name: '__make_integer_seq' qualified
// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
// CHECK-NEXT: |-TemplateArgument template 'A'
// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
+// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
// CHECK-NEXT: |-TemplateArgument expr '1'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
// CHECK-NEXT: | |-value: Int 1
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
+// CHECK-NEXT: | `-SubstNonTypeTemplateParmExpr 0x{{[0-9A-Fa-f]+}} <col:64> 'int'
+// CHECK-NEXT: | |-NonTypeTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <col:21, col:24> col:24 referenced 'B1' depth 0 index 1 B2
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 1
// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>' sugar
// CHECK-NEXT: |-name: 'A' qualified
// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
+// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'int'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 0
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 0
// CHECK-NEXT: `-RecordType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>'
// CHECK-NEXT: `-ClassTemplateSpecialization 0x{{[0-9A-Fa-f]+}} 'A'
-template <class B1, B1 B2> using B = __make_integer_seq<A, B1, B2>;
-using test2 = B<int, 1>;
-// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:23> col:7 test2 'B<int, 1>':'A<int, 0>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} 'B<int, 1>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'B<int, 1>' sugar alias
-// CHECK-NEXT: |-name: 'B' qualified
-// CHECK-NEXT: | `-TypeAliasTemplateDecl {{.+}} B
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: |-TemplateArgument expr '1'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:22> 'int'
-// CHECK-NEXT: | |-value: Int 1
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:22> 'int' 1
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, 1>' sugar alias
-// CHECK-NEXT: |-name: '__make_integer_seq' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'A'
-// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
-// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: |-TemplateArgument expr '1'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
-// CHECK-NEXT: | |-value: Int 1
-// CHECK-NEXT: | `-SubstNonTypeTemplateParmExpr 0x{{[0-9A-Fa-f]+}} <col:64> 'int'
-// CHECK-NEXT: | |-NonTypeTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <col:21, col:24> col:24 referenced 'B1' depth 0 index 1 B2
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 1
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>' sugar
-// CHECK-NEXT: |-name: 'A' qualified
-// CHECK-NEXT: | `-ClassTemplateDecl {{.+}} A
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-SubstTemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'int' sugar class depth 0 index 0 B1 final
-// CHECK-NEXT: | |-TypeAliasTemplate 0x{{[0-9A-Fa-f]+}} 'B'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:64> 'int'
-// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:64> 'int' 0
-// CHECK-NEXT: `-RecordType 0x{{[0-9A-Fa-f]+}} 'A<int, 0>'
-// CHECK-NEXT: `-ClassTemplateSpecialization 0x{{[0-9A-Fa-f]+}} 'A'
-
template <template <class T, T...> class S, class T, int N> struct C {
using test3 = __make_integer_seq<S, T, N>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:43> col:9 test3 '__make_integer_seq<S, T, N>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' dependent
-// CHECK-NEXT: |-name: '__make_integer_seq'
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'S'
-// CHECK-NEXT: | | `-TemplateTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:11, col:42> col:42 depth 0 index 0 S
-// CHECK-NEXT: |-TemplateArgument type 'T'
-// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
-// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
-// CHECK-NEXT: `-TemplateArgument expr 'N'
-// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
-// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:42> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' dependent
+// CHECK-NEXT: |-name: '__make_integer_seq'
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'S'
+// CHECK-NEXT: | | `-TemplateTemplateParmDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:11, col:42> col:42 depth 0 index 0 S
+// CHECK-NEXT: |-TemplateArgument type 'T'
+// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
+// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
+// CHECK-NEXT: `-TemplateArgument expr 'N'
+// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
+// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:42> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
using test4 = __make_integer_seq<A, T, 1>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:43> col:9 test4 '__make_integer_seq<A, T, 1>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, T, 1>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, T, 1>' dependent
-// CHECK-NEXT: |-name: '__make_integer_seq'
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'A'
-// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
-// CHECK-NEXT: |-TemplateArgument type 'T'
-// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
-// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
-// CHECK-NEXT: `-TemplateArgument expr '1'
-// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
-// CHECK-NEXT: `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, T, 1>' dependent
+// CHECK-NEXT: |-name: '__make_integer_seq'
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'A'
+// CHECK-NEXT: | `-ClassTemplateDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:1, col:41> col:38 A
+// CHECK-NEXT: |-TemplateArgument type 'T'
+// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'T' dependent depth 0 index 1
+// CHECK-NEXT: | `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'T'
+// CHECK-NEXT: `-TemplateArgument expr '1'
+// CHECK-NEXT: `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:42> 'T' <Dependent>
+// CHECK-NEXT: `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:42> 'int' 1
using test5 = __make_integer_seq<A, int, N>;
// CHECK: `-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:45> col:9 test5 '__make_integer_seq<A, int, N>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, N>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, N>' dependent
-// CHECK-NEXT: |-name: '__make_integer_seq'
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
-// CHECK-NEXT: |-TemplateArgument template 'A'
-// CHECK-NEXT: | `-ClassTemplateDecl 0x{{.+}} <line:{{.+}}:1, col:41> col:38 A
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: `-TemplateArgument expr 'N'
-// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:44> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<A, int, N>' dependent
+// CHECK-NEXT: |-name: '__make_integer_seq'
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __make_integer_seq
+// CHECK-NEXT: |-TemplateArgument template 'A'
+// CHECK-NEXT: | `-ClassTemplateDecl 0x{{.+}} <line:{{.+}}:1, col:41> col:38 A
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-TemplateArgument expr 'N'
+// CHECK-NEXT: `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:44> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
};
// expected-no-diagnostics
diff --git a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
index 547e594..16fe8d3 100644
--- a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
+++ b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
@@ -76,7 +76,7 @@ template<typename T> struct C;
// Test lookup with incomplete lookup context
template<typename T>
auto C<T>::f() -> decltype(x) { } // expected-error {{use of undeclared identifier 'x'}}
- // expected-error@-1 {{out-of-line definition of 'f' from class 'C<T>' without definition}}
+ // expected-error@-1 {{out-of-line definition of 'f' from class 'lookup_dependent_bases_id_expr::C<T>' without definition}}
}
diff --git a/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp b/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp
index 463d86f..ea72332 100644
--- a/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp
+++ b/clang/test/SemaTemplate/ms-sizeof-missing-typename.cpp
@@ -4,9 +4,9 @@
// get the size of this type, so they don't get errors after inserting typename.
namespace basic {
-template <typename T> int type_f() { return sizeof T::type; } // expected-error {{missing 'typename' prior to dependent type name 'X::type'}}
-template <typename T> int type_g() { return sizeof(T::type); } // expected-warning {{missing 'typename' prior to dependent type name 'X::type'}}
-template <typename T> int type_h() { return sizeof((T::type)); } // expected-error {{missing 'typename' prior to dependent type name 'X::type'}}
+template <typename T> int type_f() { return sizeof T::type; } // expected-error {{missing 'typename' prior to dependent type name 'basic::X::type'}}
+template <typename T> int type_g() { return sizeof(T::type); } // expected-warning {{missing 'typename' prior to dependent type name 'basic::X::type'}}
+template <typename T> int type_h() { return sizeof((T::type)); } // expected-error {{missing 'typename' prior to dependent type name 'basic::X::type'}}
template <typename T> int value_f() { return sizeof T::not_a_type; }
template <typename T> int value_g() { return sizeof(T::not_a_type); }
template <typename T> int value_h() { return sizeof((T::not_a_type)); }
@@ -30,9 +30,9 @@ template <typename T>
struct Foo {
enum {
// expected-warning@+2 {{use 'template' keyword to treat 'InnerTemplate' as a dependent template name}}
- // expected-warning@+1 {{missing 'typename' prior to dependent type name 'Bar::InnerType'}}
+ // expected-warning@+1 {{missing 'typename' prior to dependent type name 'nested_sizeof::Bar::InnerType'}}
x1 = sizeof(typename T::/*template*/ InnerTemplate<sizeof(/*typename*/ T::InnerType)>),
- // expected-warning@+1 {{missing 'typename' prior to dependent type name 'Bar::InnerType'}}
+ // expected-warning@+1 {{missing 'typename' prior to dependent type name 'nested_sizeof::Bar::InnerType'}}
x2 = sizeof(typename T::template InnerTemplate<sizeof(/*typename*/ T::InnerType)>),
// expected-warning@+1 {{use 'template' keyword to treat 'InnerTemplate' as a dependent template name}}
y1 = sizeof(typename T::/*template*/ InnerTemplate<sizeof(T::InnerVar)>),
@@ -50,7 +50,7 @@ template struct Foo<Bar>; // expected-note-re {{in instantiation {{.*}} requeste
}
namespace ambiguous_missing_parens {
-// expected-error@+1 {{'Q::template U' is expected to be a non-type template, but instantiated to a class template}}
+// expected-error@+1 {{'ambiguous_missing_parens::Q::template U' is expected to be a non-type template, but instantiated to a class template}}
template <typename T> void f() { int a = sizeof T::template U<0> + 4; }
struct Q {
// expected-note@+1 {{class template declared here}}
diff --git a/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp b/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp
index 5c7a902..8e3b1b6 100644
--- a/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp
+++ b/clang/test/SemaTemplate/nested-implicit-deduction-guides.cpp
@@ -84,11 +84,11 @@ nested_init_list<int>::concept_fail nil_invalid{1, ""};
// expected-note@#INIT_LIST_INNER_INVALID {{candidate template ignored: constraints not satisfied [with F = const char *]}}
// expected-note@#INIT_LIST_INNER_INVALID_HEADER {{because 'const char *' does not satisfy 'False'}}
// expected-note@#False {{because 'false' evaluated to false}}
-// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(int, F) -> concept_fail<F>'}}
+// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(int, F) -> nested_init_list<int>::concept_fail<F>'}}
// expected-note@#INIT_LIST_INNER_INVALID {{candidate function template not viable: requires 1 argument, but 2 were provided}}
-// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(concept_fail<F>) -> concept_fail<F>'}}
+// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail(nested_init_list<int>::concept_fail<F>) -> nested_init_list<int>::concept_fail<F>'}}
// expected-note@#INIT_LIST_INNER_INVALID {{candidate function template not viable: requires 0 arguments, but 2 were provided}}
-// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail() -> concept_fail<F>'}}
+// expected-note@#INIT_LIST_INNER_INVALID {{implicit deduction guide declared as 'template <False F> concept_fail() -> nested_init_list<int>::concept_fail<F>'}}
namespace GH88142 {
diff --git a/clang/test/SemaTemplate/nested-name-spec-template.cpp b/clang/test/SemaTemplate/nested-name-spec-template.cpp
index d366925..e6cbe38 100644
--- a/clang/test/SemaTemplate/nested-name-spec-template.cpp
+++ b/clang/test/SemaTemplate/nested-name-spec-template.cpp
@@ -130,7 +130,7 @@ namespace PR9226 {
template<typename T, typename U>
struct Y {
- typedef typename T::template f<U> type; // expected-error{{template name refers to non-type template 'X::template f'}}
+ typedef typename T::template f<U> type; // expected-error{{template name refers to non-type template 'PR9226::X::template f'}}
};
Y<X, int> yxi; // expected-note{{in instantiation of template class 'PR9226::Y<PR9226::X, int>' requested here}}
@@ -154,3 +154,16 @@ namespace sugared_template_instantiation {
struct B { typedef int type1; };
typedef A<const B> type2;
} // namespace sugated_template_instantiation
+
+namespace unresolved_using {
+ template <class> struct A {
+ struct B {
+ typedef int X;
+ };
+ };
+ template <class T> struct C : A<T> {
+ using typename A<T>::B;
+ typedef typename B::X Y;
+ };
+ template struct C<int>;
+} // namespace unresolved_using
diff --git a/clang/test/SemaTemplate/nested-template.cpp b/clang/test/SemaTemplate/nested-template.cpp
index 7911cf5..b5da1b1 100644
--- a/clang/test/SemaTemplate/nested-template.cpp
+++ b/clang/test/SemaTemplate/nested-template.cpp
@@ -152,14 +152,14 @@ namespace PR10924 {
template< class Topology, class ctype >
template< int codim >
- class ReferenceElement< Topology, ctype > :: BaryCenterArray // expected-error{{out-of-line definition of 'BaryCenterArray' does not match any declaration in 'ReferenceElement<Topology, ctype>'}}
+ class ReferenceElement< Topology, ctype > :: BaryCenterArray // expected-error{{out-of-line definition of 'BaryCenterArray' does not match any declaration in 'PR10924::ReferenceElement<Topology, ctype>'}}
{
};
}
class Outer1 {
template <typename T> struct X;
- template <typename T> int X<T>::func() {} // expected-error{{out-of-line definition of 'func' from class 'X<T>' without definition}}
+ template <typename T> int X<T>::func() {} // expected-error{{out-of-line definition of 'func' from class 'Outer1::X<T>' without definition}}
};
namespace RefPack {
diff --git a/clang/test/SemaTemplate/overload-candidates.cpp b/clang/test/SemaTemplate/overload-candidates.cpp
index de998d7..a9c86b2 100644
--- a/clang/test/SemaTemplate/overload-candidates.cpp
+++ b/clang/test/SemaTemplate/overload-candidates.cpp
@@ -16,9 +16,9 @@ void test_dyn_cast(int* ptr) {
(void)dyn_cast(ptr); // expected-error{{no matching function for call to 'dyn_cast'}}
}
-template<int I, typename T>
+template<int I, typename T>
void get(const T&); // expected-note{{candidate template ignored: invalid explicitly-specified argument for template parameter 'I'}}
-template<template<class T> class, typename T>
+template<template<class T> class, typename T>
void get(const T&); // expected-note{{candidate template ignored: invalid explicitly-specified argument for 1st template parameter}}
void test_get(void *ptr) {
@@ -100,7 +100,7 @@ namespace PR15673 {
#if __cplusplus <= 199711L
// expected-warning@-2 {{default template arguments for a function template are a C++11 extension}}
#endif
- // expected-note@+1 {{candidate template ignored: requirement 'a_trait<int>::value' was not satisfied [with T = int]}}
+ // expected-note@+1 {{candidate template ignored: requirement 'PR15673::a_trait<int>::value' was not satisfied [with T = int]}}
void foo() {}
void bar() { foo<int>(); } // expected-error {{no matching function for call to 'foo'}}
@@ -128,7 +128,7 @@ namespace PR15673 {
#if __cplusplus <= 199711L
// expected-warning@-2 {{alias declarations are a C++11 extension}}
#endif
- // expected-note@+7 {{candidate template ignored: requirement 'some_trait<int>::value' was not satisfied [with T = int]}}
+ // expected-note@+7 {{candidate template ignored: requirement 'PR15673::some_trait<int>::value' was not satisfied [with T = int]}}
template<typename T,
typename Requires = unicorns<T> >
@@ -148,7 +148,7 @@ namespace PR15673 {
template<typename T,
int n = 42,
typename std::enable_if<n == 43 || (some_passing_trait<T>::value && some_trait<T>::value), int>::type = 0>
- void almost_rangesv3(); // expected-note{{candidate template ignored: requirement '42 == 43 || (some_passing_trait<int>::value && some_trait<int>::value)' was not satisfied}}
+ void almost_rangesv3(); // expected-note{{candidate template ignored: requirement '42 == 43 || (PR15673::some_passing_trait<int>::value && PR15673::some_trait<int>::value)' was not satisfied}}
void test_almost_rangesv3() { almost_rangesv3<int>(); } // expected-error{{no matching function for call to 'almost_rangesv3'}}
#define CONCEPT_REQUIRES_(...) \
@@ -161,6 +161,6 @@ namespace PR15673 {
#endif
template<typename T,
CONCEPT_REQUIRES_(some_passing_trait<T>::value && some_trait<T>::value)>
- void rangesv3(); // expected-note{{candidate template ignored: requirement 'some_trait<int>::value' was not satisfied [with T = int, x = 42]}}
+ void rangesv3(); // expected-note{{candidate template ignored: requirement 'PR15673::some_trait<int>::value' was not satisfied [with T = int, x = 42]}}
void test_rangesv3() { rangesv3<int>(); } // expected-error{{no matching function for call to 'rangesv3'}}
}
diff --git a/clang/test/SemaTemplate/temp_arg_nontype.cpp b/clang/test/SemaTemplate/temp_arg_nontype.cpp
index 9363e74..7d2a010 100644
--- a/clang/test/SemaTemplate/temp_arg_nontype.cpp
+++ b/clang/test/SemaTemplate/temp_arg_nontype.cpp
@@ -438,7 +438,7 @@ namespace dependent_nested_partial_specialization {
template<template<typename> class X> struct A {
template<typename T, X<T> N> struct B; // expected-note 2{{here}}
- template <typename T> struct B<T, 0> {}; // expected-error {{non-type template argument specializes a template parameter with dependent type 'Y<T>' (aka 'T *')}}
+ template <typename T> struct B<T, 0> {}; // expected-error {{non-type template argument specializes a template parameter with dependent type 'dependent_nested_partial_specialization::Y<T>' (aka 'T *')}}
};
A<X>::B<int, 0> ax;
A<Y>::B<int, &n> ay; // expected-error {{undefined}} expected-note {{instantiation of}}
diff --git a/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp b/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp
index c35743b..9c25e26 100644
--- a/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp
+++ b/clang/test/SemaTemplate/temp_arg_nontype_cxx1z.cpp
@@ -621,3 +621,8 @@ namespace GH73460 {
int j;
template struct A<int&, j, j>;
} // namespace GH73460
+
+namespace GH118190 {
+ template <auto> int x;
+ template <int i> int x<i>;
+}
diff --git a/clang/test/SemaTemplate/template-id-expr.cpp b/clang/test/SemaTemplate/template-id-expr.cpp
index a13013a..0527af4 100644
--- a/clang/test/SemaTemplate/template-id-expr.cpp
+++ b/clang/test/SemaTemplate/template-id-expr.cpp
@@ -202,7 +202,7 @@ struct P {
struct Q {
template <typename T> int foo() {
return T::template I<int>;
- // expected-error@-1 {{'P::template I' is expected to be a non-type template, but instantiated to a type alias template}}
+ // expected-error@-1 {{'non_functions::PR88832::P::template I' is expected to be a non-type template, but instantiated to a type alias template}}
// expected-note@#TypeAlias {{type alias template declared here}}
}
};
diff --git a/clang/test/SemaTemplate/type_pack_element.cpp b/clang/test/SemaTemplate/type_pack_element.cpp
index 5ff010c..e077638 100644
--- a/clang/test/SemaTemplate/type_pack_element.cpp
+++ b/clang/test/SemaTemplate/type_pack_element.cpp
@@ -2,61 +2,57 @@
using test1 = __type_pack_element<0, int>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <<stdin>:3:1, col:41> col:7 test1 '__type_pack_element<0, int>':'int'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, int>' sugar
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, int>' sugar alias
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long'
-// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:35> 'int' 0
-// CHECK-NEXT: |-TemplateArgument type 'int'
-// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
-// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, int>' sugar alias
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr '0'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long'
+// CHECK-NEXT: | |-value: Int 0
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:35> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:35> 'int' 0
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
template<int N, class ...Ts> struct A {
using test2 = __type_pack_element<N, Ts...>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:45> col:9 test2 '__type_pack_element<N, Ts...>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, Ts...>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, Ts...>' dependent
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr 'N'
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
-// CHECK-NEXT: `-TemplateArgument type 'Ts...'
-// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
-// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
-// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, Ts...>' dependent
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr 'N'
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateArgument type 'Ts...'
+// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
+// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
+// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
using test3 = __type_pack_element<0, Ts...>;
// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:45> col:9 test3 '__type_pack_element<0, Ts...>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, Ts...>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, Ts...>' dependent
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr '0'
-// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long'
-// CHECK-NEXT: | |-value: Int 0
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:37> 'int' 0
-// CHECK-NEXT: `-TemplateArgument type 'Ts...'
-// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
-// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
-// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<0, Ts...>' dependent
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr '0'
+// CHECK-NEXT: | `-ConstantExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long'
+// CHECK-NEXT: | |-value: Int 0
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-IntegerLiteral 0x{{[0-9A-Fa-f]+}} <col:37> 'int' 0
+// CHECK-NEXT: `-TemplateArgument type 'Ts...'
+// CHECK-NEXT: `-PackExpansionType 0x{{[0-9A-Fa-f]+}} 'Ts...' dependent
+// CHECK-NEXT: `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
+// CHECK-NEXT: `-TemplateTypeParm 0x{{[0-9A-Fa-f]+}} 'Ts'
using test4 = __type_pack_element<N, int>;
// CHECK: `-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:{{.+}}:3, col:43> col:9 test4 '__type_pack_element<N, int>'
-// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, int>' sugar dependent
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, int>' dependent
-// CHECK-NEXT: |-name: '__type_pack_element' qualified
-// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
-// CHECK-NEXT: |-TemplateArgument expr 'N'
-// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
-// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
-// CHECK-NEXT: `-TemplateArgument type 'int'
-// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__type_pack_element<N, int>' dependent
+// CHECK-NEXT: |-name: '__type_pack_element' qualified
+// CHECK-NEXT: | `-BuiltinTemplateDecl {{.+}} __type_pack_element
+// CHECK-NEXT: |-TemplateArgument expr 'N'
+// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:37> '__size_t':'unsigned long' <IntegralCast>
+// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:37> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
+// CHECK-NEXT: `-TemplateArgument type 'int'
+// CHECK-NEXT: `-BuiltinType 0x{{[0-9A-Fa-f]+}} 'int'
};
// expected-no-diagnostics
diff --git a/clang/test/SemaTemplate/typename-specifier-4.cpp b/clang/test/SemaTemplate/typename-specifier-4.cpp
index 8d9962e..8f9dce6 100644
--- a/clang/test/SemaTemplate/typename-specifier-4.cpp
+++ b/clang/test/SemaTemplate/typename-specifier-4.cpp
@@ -147,7 +147,7 @@ namespace rdar8740998 {
// expected-error{{dependent using declaration resolved to type without 'typename'}}
void f() {
- typename X<T>::iterator i; // expected-error{{typename specifier refers to a dependent using declaration for a value 'iterator' in 'X<T>'}}
+ typename X<T>::iterator i; // expected-error{{typename specifier refers to a dependent using declaration for a value 'iterator' in 'rdar8740998::X<T>'}}
}
};
diff --git a/clang/test/SemaTemplate/typename-specifier.cpp b/clang/test/SemaTemplate/typename-specifier.cpp
index 01acc34..27175c5 100644
--- a/clang/test/SemaTemplate/typename-specifier.cpp
+++ b/clang/test/SemaTemplate/typename-specifier.cpp
@@ -40,7 +40,7 @@ void test(double d) {
// expected-warning@-3 2{{'typename' outside of a template is a C++11 extension}}
#endif
int five = f(5);
-
+
using namespace N;
for (typename A::type i = 0; i < 10; ++i)
#if __cplusplus <= 199711L
@@ -102,7 +102,7 @@ D<long> struct_D; // expected-note {{in instantiation of template class 'D<long
template<typename T> struct E {
typedef typename T::foo foo;
- typedef typename foo::bar bar; // expected-error {{type 'E<F>::foo' (aka 'double') cannot be used prior to '::' because it has no members}}
+ typedef typename foo::bar bar; // expected-error {{type 'foo' (aka 'double') cannot be used prior to '::' because it has no members}}
};
struct F {
@@ -245,7 +245,7 @@ void j() {
namespace pointer_vs_multiply {
int x;
-// expected-error@+1 {{missing 'typename' prior to dependent type name 'B::type_or_int'}}
+// expected-error@+1 {{missing 'typename' prior to dependent type name 'pointer_vs_multiply::B::type_or_int'}}
template <typename T> void g() { T::type_or_int * x; }
// expected-error@+1 {{typename specifier refers to non-type member 'type_or_int' in 'pointer_vs_multiply::A'}}
template <typename T> void h() { typename T::type_or_int * x; }
diff --git a/clang/test/lit.site.cfg.py.in b/clang/test/lit.site.cfg.py.in
index 176cf64..f50953a 100644
--- a/clang/test/lit.site.cfg.py.in
+++ b/clang/test/lit.site.cfg.py.in
@@ -46,7 +46,6 @@ config.ppc_linux_default_ieeelongdouble = @PPC_LINUX_DEFAULT_IEEELONGDOUBLE@
config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@
config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@
config.substitutions.append(("%llvm-version-major", "@LLVM_VERSION_MAJOR@"))
-config.has_key_instructions = @LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS@
import lit.llvm
lit.llvm.initialize(lit_config, config)
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 9493edf..c446bb0 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -1420,79 +1420,25 @@ bool CursorVisitor::VisitDeclarationNameInfo(DeclarationNameInfo Name) {
llvm_unreachable("Invalid DeclarationName::Kind!");
}
-bool CursorVisitor::VisitNestedNameSpecifier(NestedNameSpecifier *NNS,
- SourceRange Range) {
- // FIXME: This whole routine is a hack to work around the lack of proper
- // source information in nested-name-specifiers (PR5791). Since we do have
- // a beginning source location, we can visit the first component of the
- // nested-name-specifier, if it's a single-token component.
- if (!NNS)
- return false;
-
- // Get the first component in the nested-name-specifier.
- while (NestedNameSpecifier *Prefix = NNS->getPrefix())
- NNS = Prefix;
-
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Namespace:
- return Visit(
- MakeCursorNamespaceRef(NNS->getAsNamespace(), Range.getBegin(), TU));
-
- case NestedNameSpecifier::TypeSpec: {
- // If the type has a form where we know that the beginning of the source
- // range matches up with a reference cursor. Visit the appropriate reference
- // cursor.
- const Type *T = NNS->getAsType();
- if (const TypedefType *Typedef = dyn_cast<TypedefType>(T))
- return Visit(MakeCursorTypeRef(Typedef->getDecl(), Range.getBegin(), TU));
- if (const TagType *Tag = dyn_cast<TagType>(T))
- return Visit(MakeCursorTypeRef(Tag->getDecl(), Range.getBegin(), TU));
- if (const TemplateSpecializationType *TST =
- dyn_cast<TemplateSpecializationType>(T))
- return VisitTemplateName(TST->getTemplateName(), Range.getBegin());
- break;
- }
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Super:
- break;
- }
-
- return false;
-}
-
bool CursorVisitor::VisitNestedNameSpecifierLoc(
NestedNameSpecifierLoc Qualifier) {
- SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
- for (; Qualifier; Qualifier = Qualifier.getPrefix())
- Qualifiers.push_back(Qualifier);
-
- while (!Qualifiers.empty()) {
- NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
- NestedNameSpecifier *NNS = Q.getNestedNameSpecifier();
- switch (NNS->getKind()) {
- case NestedNameSpecifier::Namespace:
- if (Visit(MakeCursorNamespaceRef(NNS->getAsNamespace(),
- Q.getLocalBeginLoc(), TU)))
- return true;
-
- break;
-
- case NestedNameSpecifier::TypeSpec:
- if (Visit(Q.getTypeLoc()))
- return true;
-
- break;
-
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Identifier:
- case NestedNameSpecifier::Super:
- break;
- }
+ NestedNameSpecifier NNS = Qualifier.getNestedNameSpecifier();
+ switch (NNS.getKind()) {
+ case NestedNameSpecifier::Kind::Namespace: {
+ auto [Namespace, Prefix] = Qualifier.castAsNamespaceAndPrefix();
+ if (VisitNestedNameSpecifierLoc(Prefix))
+ return true;
+ return Visit(
+ MakeCursorNamespaceRef(Namespace, Qualifier.getLocalBeginLoc(), TU));
}
-
- return false;
+ case NestedNameSpecifier::Kind::Type:
+ return Visit(Qualifier.castAsTypeLoc());
+ case NestedNameSpecifier::Kind::Null:
+ case NestedNameSpecifier::Kind::Global:
+ case NestedNameSpecifier::Kind::MicrosoftSuper:
+ return false;
+ }
+ llvm_unreachable("unexpected nested name specifier kind");
}
bool CursorVisitor::VisitTemplateParameters(
@@ -1515,16 +1461,23 @@ bool CursorVisitor::VisitTemplateParameters(
return false;
}
-bool CursorVisitor::VisitTemplateName(TemplateName Name, SourceLocation Loc) {
+bool CursorVisitor::VisitTemplateName(TemplateName Name, SourceLocation NameLoc,
+ NestedNameSpecifierLoc NNS) {
switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate: {
+ const QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName();
+ assert(QTN->getQualifier() == NNS.getNestedNameSpecifier());
+ if (VisitNestedNameSpecifierLoc(NNS))
+ return true;
+ return VisitTemplateName(QTN->getUnderlyingTemplate(), NameLoc, /*NNS=*/{});
+ }
case TemplateName::Template:
case TemplateName::UsingTemplate:
- case TemplateName::QualifiedTemplate: // FIXME: Visit nested-name-specifier.
- return Visit(MakeCursorTemplateRef(Name.getAsTemplateDecl(), Loc, TU));
+ return Visit(MakeCursorTemplateRef(Name.getAsTemplateDecl(), NameLoc, TU));
case TemplateName::OverloadedTemplate:
// Visit the overloaded template set.
- if (Visit(MakeCursorOverloadedDeclRef(Name, Loc, TU)))
+ if (Visit(MakeCursorOverloadedDeclRef(Name, NameLoc, TU)))
return true;
return false;
@@ -1533,17 +1486,19 @@ bool CursorVisitor::VisitTemplateName(TemplateName Name, SourceLocation Loc) {
// FIXME: Visit DeclarationName?
return false;
- case TemplateName::DependentTemplate:
- // FIXME: Visit nested-name-specifier.
- return false;
+ case TemplateName::DependentTemplate: {
+ assert(Name.getAsDependentTemplateName()->getQualifier() ==
+ NNS.getNestedNameSpecifier());
+ return VisitNestedNameSpecifierLoc(NNS);
+ }
case TemplateName::SubstTemplateTemplateParm:
return Visit(MakeCursorTemplateRef(
- Name.getAsSubstTemplateTemplateParm()->getParameter(), Loc, TU));
+ Name.getAsSubstTemplateTemplateParm()->getParameter(), NameLoc, TU));
case TemplateName::SubstTemplateTemplateParmPack:
return Visit(MakeCursorTemplateRef(
- Name.getAsSubstTemplateTemplateParmPack()->getParameterPack(), Loc,
+ Name.getAsSubstTemplateTemplateParmPack()->getParameterPack(), NameLoc,
TU));
case TemplateName::DeducedTemplate:
@@ -1587,11 +1542,9 @@ bool CursorVisitor::VisitTemplateArgumentLoc(const TemplateArgumentLoc &TAL) {
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- if (VisitNestedNameSpecifierLoc(TAL.getTemplateQualifierLoc()))
- return true;
-
return VisitTemplateName(TAL.getArgument().getAsTemplateOrTemplatePattern(),
- TAL.getTemplateNameLoc());
+ TAL.getTemplateNameLoc(),
+ TAL.getTemplateQualifierLoc());
}
llvm_unreachable("Invalid TemplateArgument::Kind!");
@@ -1669,7 +1622,10 @@ bool CursorVisitor::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
}
bool CursorVisitor::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- return Visit(MakeCursorTypeRef(TL.getTypedefNameDecl(), TL.getNameLoc(), TU));
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
+ return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitPredefinedSugarTypeLoc(PredefinedSugarTypeLoc TL) {
@@ -1677,14 +1633,20 @@ bool CursorVisitor::VisitPredefinedSugarTypeLoc(PredefinedSugarTypeLoc TL) {
}
bool CursorVisitor::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitTagTypeLoc(TagTypeLoc TL) {
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
if (TL.isDefinition())
- return Visit(MakeCXCursor(TL.getDecl(), TU, RegionOfInterest));
+ return Visit(MakeCXCursor(TL.getOriginalDecl(), TU, RegionOfInterest));
- return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
+ return Visit(MakeCursorTypeRef(TL.getOriginalDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
@@ -1763,7 +1725,10 @@ bool CursorVisitor::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
}
bool CursorVisitor::VisitUsingTypeLoc(UsingTypeLoc TL) {
- auto *underlyingDecl = TL.getUnderlyingType()->getAsTagDecl();
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ return true;
+
+ auto *underlyingDecl = TL.getTypePtr()->desugar()->getAsTagDecl();
if (underlyingDecl) {
return Visit(MakeCursorTypeRef(underlyingDecl, TL.getNameLoc(), TU));
}
@@ -1826,7 +1791,7 @@ bool CursorVisitor::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
bool CursorVisitor::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
if (VisitTemplateName(TL.getTypePtr()->getTemplateName(),
- TL.getTemplateNameLoc()))
+ TL.getTemplateNameLoc(), TL.getQualifierLoc()))
return true;
return false;
@@ -1836,7 +1801,7 @@ bool CursorVisitor::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
// Visit the template name.
if (VisitTemplateName(TL.getTypePtr()->getTemplateName(),
- TL.getTemplateNameLoc()))
+ TL.getTemplateNameLoc(), TL.getQualifierLoc()))
return true;
// Visit the template arguments.
@@ -1871,8 +1836,7 @@ bool CursorVisitor::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
bool CursorVisitor::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- // Visit the nested-name-specifier, if there is one.
- if (TL.getQualifierLoc() && VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
+ if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
return true;
// Visit the template arguments.
@@ -1883,13 +1847,6 @@ bool CursorVisitor::VisitDependentTemplateSpecializationTypeLoc(
return false;
}
-bool CursorVisitor::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- if (VisitNestedNameSpecifierLoc(TL.getQualifierLoc()))
- return true;
-
- return Visit(TL.getNamedTypeLoc());
-}
-
bool CursorVisitor::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
return Visit(TL.getPatternLoc());
}
@@ -1908,7 +1865,7 @@ bool CursorVisitor::VisitPackIndexingTypeLoc(PackIndexingTypeLoc TL) {
}
bool CursorVisitor::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- return Visit(MakeCursorTypeRef(TL.getDecl(), TL.getNameLoc(), TU));
+ return Visit(MakeCursorTypeRef(TL.getOriginalDecl(), TL.getNameLoc(), TU));
}
bool CursorVisitor::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
@@ -2065,7 +2022,7 @@ class NestedNameSpecifierLocVisit : public VisitorJob {
public:
NestedNameSpecifierLocVisit(NestedNameSpecifierLoc Qualifier, CXCursor parent)
: VisitorJob(parent, VisitorJob::NestedNameSpecifierLocVisitKind,
- Qualifier.getNestedNameSpecifier(),
+ Qualifier.getNestedNameSpecifier().getAsVoidPointer(),
Qualifier.getOpaqueData()) {}
static bool classof(const VisitorJob *VJ) {
@@ -2074,8 +2031,7 @@ public:
NestedNameSpecifierLoc get() const {
return NestedNameSpecifierLoc(
- const_cast<NestedNameSpecifier *>(
- static_cast<const NestedNameSpecifier *>(data[0])),
+ NestedNameSpecifier::getFromVoidPointer(data[0]),
const_cast<void *>(data[1]));
}
};
@@ -5363,9 +5319,13 @@ CXString clang_getCursorSpelling(CXCursor C) {
case CXCursor_TypeRef: {
const TypeDecl *Type = getCursorTypeRef(C).first;
assert(Type && "Missing type decl");
+ const ASTContext &Ctx = getCursorContext(C);
+ QualType T = Ctx.getTypeDeclType(Type);
- return cxstring::createDup(
- getCursorContext(C).getTypeDeclType(Type).getAsString());
+ PrintingPolicy Policy = Ctx.getPrintingPolicy();
+ Policy.FullyQualifiedName = true;
+ Policy.SuppressTagKeyword = false;
+ return cxstring::createDup(T.getAsString(Policy));
}
case CXCursor_TemplateRef: {
const TemplateDecl *Template = getCursorTemplateRef(C).first;
diff --git a/clang/tools/libclang/CIndexCodeCompletion.cpp b/clang/tools/libclang/CIndexCodeCompletion.cpp
index 81448b4..6d14f28 100644
--- a/clang/tools/libclang/CIndexCodeCompletion.cpp
+++ b/clang/tools/libclang/CIndexCodeCompletion.cpp
@@ -617,7 +617,7 @@ namespace {
if (!baseType.isNull()) {
// Get the declaration for a class/struct/union/enum type
if (const TagType *Tag = baseType->getAs<TagType>())
- D = Tag->getDecl();
+ D = Tag->getOriginalDecl();
// Get the @interface declaration for a (possibly-qualified) Objective-C
// object pointer type, e.g., NSString*
else if (const ObjCObjectPointerType *ObjPtr =
@@ -629,7 +629,7 @@ namespace {
// Get the class for a C++ injected-class-name
else if (const InjectedClassNameType *Injected =
baseType->getAs<InjectedClassNameType>())
- D = Injected->getDecl();
+ D = Injected->getOriginalDecl();
}
if (D != nullptr) {
diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp
index a6301da..3c40624 100644
--- a/clang/tools/libclang/CXCursor.cpp
+++ b/clang/tools/libclang/CXCursor.cpp
@@ -1332,16 +1332,10 @@ CXCursor cxcursor::getTypeRefCursor(CXCursor cursor) {
TypeLoc TL = Type->getTypeLoc();
SourceLocation Loc = TL.getBeginLoc();
- if (const ElaboratedType *ElabT = Ty->getAs<ElaboratedType>()) {
- Ty = ElabT->getNamedType();
- ElaboratedTypeLoc ElabTL = TL.castAs<ElaboratedTypeLoc>();
- Loc = ElabTL.getNamedTypeLoc().getBeginLoc();
- }
-
if (const TypedefType *Typedef = Ty->getAs<TypedefType>())
return MakeCursorTypeRef(Typedef->getDecl(), Loc, TU);
if (const TagType *Tag = Ty->getAs<TagType>())
- return MakeCursorTypeRef(Tag->getDecl(), Loc, TU);
+ return MakeCursorTypeRef(Tag->getOriginalDecl(), Loc, TU);
if (const TemplateTypeParmType *TemplP = Ty->getAs<TemplateTypeParmType>())
return MakeCursorTypeRef(TemplP->getDecl(), Loc, TU);
diff --git a/clang/tools/libclang/CXIndexDataConsumer.cpp b/clang/tools/libclang/CXIndexDataConsumer.cpp
index 73d04b8..2623f60 100644
--- a/clang/tools/libclang/CXIndexDataConsumer.cpp
+++ b/clang/tools/libclang/CXIndexDataConsumer.cpp
@@ -357,7 +357,7 @@ CXIndexDataConsumer::CXXBasesListInfo::CXXBasesListInfo(const CXXRecordDecl *D,
TST = T->getAs<TemplateSpecializationType>()) {
BaseD = TST->getTemplateName().getAsTemplateDecl();
} else if (const RecordType *RT = T->getAs<RecordType>()) {
- BaseD = RT->getDecl();
+ BaseD = RT->getOriginalDecl();
}
if (BaseD)
@@ -389,13 +389,22 @@ SourceLocation CXIndexDataConsumer::CXXBasesListInfo::getBaseLoc(
if (QualifiedTypeLoc QL = TL.getAs<QualifiedTypeLoc>())
TL = QL.getUnqualifiedLoc();
- if (ElaboratedTypeLoc EL = TL.getAs<ElaboratedTypeLoc>())
- return EL.getNamedTypeLoc().getBeginLoc();
- if (DependentNameTypeLoc DL = TL.getAs<DependentNameTypeLoc>())
- return DL.getNameLoc();
- if (DependentTemplateSpecializationTypeLoc DTL =
- TL.getAs<DependentTemplateSpecializationTypeLoc>())
- return DTL.getTemplateNameLoc();
+ // FIXME: Factor this out, a lot of TypeLoc users seem to need a generic
+ // TypeLoc::getNameLoc()
+ if (auto TTL = TL.getAs<DependentNameTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<DependentTemplateSpecializationTypeLoc>())
+ return TTL.getTemplateNameLoc();
+ if (auto TTL = TL.getAs<TemplateSpecializationTypeLoc>())
+ return TTL.getTemplateNameLoc();
+ if (auto TTL = TL.getAs<TagTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<TypedefTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<UnresolvedUsingTypeLoc>())
+ return TTL.getNameLoc();
+ if (auto TTL = TL.getAs<UsingTypeLoc>())
+ return TTL.getNameLoc();
return Loc;
}
diff --git a/clang/tools/libclang/CXType.cpp b/clang/tools/libclang/CXType.cpp
index e7864e6..d21ac7c 100644
--- a/clang/tools/libclang/CXType.cpp
+++ b/clang/tools/libclang/CXType.cpp
@@ -118,7 +118,6 @@ static CXTypeKind GetTypeKind(QualType T) {
TKCASE(ExtVector);
TKCASE(MemberPointer);
TKCASE(Auto);
- TKCASE(Elaborated);
TKCASE(Pipe);
TKCASE(Attributed);
TKCASE(BTFTagAttributed);
@@ -225,6 +224,11 @@ FindTemplateArgumentTypeAt(ArrayRef<TemplateArgument> TA, unsigned index) {
return std::nullopt;
}
+static CXType getTypeDeclType(const ASTContext &Context, CXTranslationUnit TU,
+ const TypeDecl *TD) {
+ return MakeCXType(Context.getTypeDeclType(TD), TU);
+}
+
CXType clang_getCursorType(CXCursor C) {
using namespace cxcursor;
@@ -244,7 +248,7 @@ CXType clang_getCursorType(CXCursor C) {
return MakeCXType(QualType(), TU);
if (const TypeDecl *TD = dyn_cast<TypeDecl>(D))
- return MakeCXType(Context.getTypeDeclType(TD), TU);
+ return getTypeDeclType(Context, TU, TD);
if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D))
return MakeCXType(Context.getObjCInterfaceType(ID), TU);
if (const DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D))
@@ -271,11 +275,8 @@ CXType clang_getCursorType(CXCursor C) {
return MakeCXType(T, TU);
}
- case CXCursor_TypeRef: {
- QualType T = Context.getTypeDeclType(getCursorTypeRef(C).first);
- return MakeCXType(T, TU);
-
- }
+ case CXCursor_TypeRef:
+ return getTypeDeclType(Context, TU, getCursorTypeRef(C).first);
case CXCursor_CXXBaseSpecifier:
return cxtype::MakeCXType(getCursorCXXBaseSpecifier(C)->getType(), TU);
@@ -545,11 +546,11 @@ try_again:
break;
case Type::Record:
case Type::Enum:
- D = cast<TagType>(TP)->getDecl();
+ D = cast<TagType>(TP)->getOriginalDecl();
break;
case Type::TemplateSpecialization:
if (const RecordType *Record = TP->getAs<RecordType>())
- D = Record->getDecl();
+ D = Record->getOriginalDecl();
else
D = cast<TemplateSpecializationType>(TP)->getTemplateName()
.getAsTemplateDecl();
@@ -563,14 +564,10 @@ try_again:
break;
case Type::InjectedClassName:
- D = cast<InjectedClassNameType>(TP)->getDecl();
+ D = cast<InjectedClassNameType>(TP)->getOriginalDecl();
break;
- // FIXME: Template type parameters!
-
- case Type::Elaborated:
- TP = cast<ElaboratedType>(TP)->getNamedType().getTypePtrOrNull();
- goto try_again;
+ // FIXME: Template type parameters!
default:
break;
@@ -990,7 +987,7 @@ CXType clang_Type_getClassType(CXType CT) {
const Type *TP = T.getTypePtrOrNull();
if (TP && TP->getTypeClass() == Type::MemberPointer) {
- ET = Ctx.getTypeDeclType(
+ ET = Ctx.getCanonicalTagType(
cast<MemberPointerType>(TP)->getMostRecentCXXRecordDecl());
}
return MakeCXType(ET, GetTU(CT));
@@ -1040,7 +1037,7 @@ static long long visitRecordForValidation(const RecordDecl *RD) {
return CXTypeLayoutError_Dependent;
// recurse
if (const RecordType *ChildType = I->getType()->getAs<RecordType>()) {
- if (const RecordDecl *Child = ChildType->getDecl()) {
+ if (const RecordDecl *Child = ChildType->getOriginalDecl()) {
long long ret = visitRecordForValidation(Child);
if (ret < 0)
return ret;
@@ -1390,10 +1387,9 @@ unsigned clang_Cursor_isInlineNamespace(CXCursor C) {
CXType clang_Type_getNamedType(CXType CT){
QualType T = GetQualType(CT);
- const Type *TP = T.getTypePtrOrNull();
- if (TP && TP->getTypeClass() == Type::Elaborated)
- return MakeCXType(cast<ElaboratedType>(TP)->getNamedType(), GetTU(CT));
+ if (!T.isNull() && !T.isCanonical())
+ return MakeCXType(T, GetTU(CT));
return MakeCXType(QualType(), GetTU(CT));
}
diff --git a/clang/tools/libclang/CursorVisitor.h b/clang/tools/libclang/CursorVisitor.h
index 949b739..d5ab699 100644
--- a/clang/tools/libclang/CursorVisitor.h
+++ b/clang/tools/libclang/CursorVisitor.h
@@ -255,12 +255,12 @@ public:
// Name visitor
bool VisitDeclarationNameInfo(DeclarationNameInfo Name);
- bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS, SourceRange Range);
bool VisitNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
// Template visitors
bool VisitTemplateParameters(const TemplateParameterList *Params);
- bool VisitTemplateName(TemplateName Name, SourceLocation Loc);
+ bool VisitTemplateName(TemplateName Name, SourceLocation NameLoc,
+ NestedNameSpecifierLoc NNS);
bool VisitTemplateArgumentLoc(const TemplateArgumentLoc &TAL);
// Type visitors
diff --git a/clang/unittests/AST/ASTContextParentMapTest.cpp b/clang/unittests/AST/ASTContextParentMapTest.cpp
index 9af0a46..4a8aa48 100644
--- a/clang/unittests/AST/ASTContextParentMapTest.cpp
+++ b/clang/unittests/AST/ASTContextParentMapTest.cpp
@@ -131,10 +131,10 @@ TEST(GetParents, FriendTypeLoc) {
auto &FrB = *cast<FriendDecl>(*++(cast<CXXRecordDecl>(B).decls_begin()));
TypeLoc FrALoc = FrA.getFriendType()->getTypeLoc();
TypeLoc FrBLoc = FrB.getFriendType()->getTypeLoc();
+ bool FrAOwnsTag = FrALoc.getTypePtr()->getAs<TagType>()->isTagOwned();
TagDecl *FrATagDecl =
- FrALoc.getTypePtr()->getAs<ElaboratedType>()->getOwnedTagDecl();
- TagDecl *FrBTagDecl =
- FrBLoc.getTypePtr()->getAs<ElaboratedType>()->getOwnedTagDecl();
+ FrALoc.getTypePtr()->getAs<TagType>()->getOriginalDecl();
+ bool FrBOwnsTag = FrBLoc.getTypePtr()->getAs<TagType>()->isTagOwned();
EXPECT_THAT(Ctx.getParents(A), ElementsAre(DynTypedNode::create(TU)));
EXPECT_THAT(Ctx.getParents(B), ElementsAre(DynTypedNode::create(TU)));
@@ -142,8 +142,8 @@ TEST(GetParents, FriendTypeLoc) {
EXPECT_THAT(Ctx.getParents(FrB), ElementsAre(DynTypedNode::create(B)));
EXPECT_THAT(Ctx.getParents(FrALoc), ElementsAre(DynTypedNode::create(FrA)));
EXPECT_THAT(Ctx.getParents(FrBLoc), ElementsAre(DynTypedNode::create(FrB)));
- EXPECT_TRUE(FrATagDecl);
- EXPECT_FALSE(FrBTagDecl);
+ EXPECT_TRUE(FrAOwnsTag);
+ EXPECT_FALSE(FrBOwnsTag);
EXPECT_THAT(Ctx.getParents(*FrATagDecl),
ElementsAre(DynTypedNode::create(FrA)));
}
diff --git a/clang/unittests/AST/ASTExprTest.cpp b/clang/unittests/AST/ASTExprTest.cpp
index 5ec6aea..adaceb7 100644
--- a/clang/unittests/AST/ASTExprTest.cpp
+++ b/clang/unittests/AST/ASTExprTest.cpp
@@ -89,14 +89,14 @@ TEST(ASTExpr, InitListIsConstantInitialized) {
SourceLocation Loc{};
InitListExpr *BaseInit = new (Ctx) InitListExpr(Ctx, Loc, {}, Loc);
- BaseInit->setType(Ctx.getRecordType(Empty));
+ BaseInit->setType(Ctx.getCanonicalTagType(Empty));
Expr *Exprs[3] = {
BaseInit,
createIntLiteral(Ctx, 13),
createIntLiteral(Ctx, 42),
};
InitListExpr *FooInit = new (Ctx) InitListExpr(Ctx, Loc, Exprs, Loc);
- FooInit->setType(Ctx.getRecordType(Foo));
+ FooInit->setType(Ctx.getCanonicalTagType(Foo));
EXPECT_TRUE(FooInit->isConstantInitializer(Ctx, false));
// Replace the last initializer with something non-constant and make sure
diff --git a/clang/unittests/AST/ASTImporterFixtures.h b/clang/unittests/AST/ASTImporterFixtures.h
index 87e62cb..2af69c62 100644
--- a/clang/unittests/AST/ASTImporterFixtures.h
+++ b/clang/unittests/AST/ASTImporterFixtures.h
@@ -425,8 +425,7 @@ public:
};
template <typename T> RecordDecl *getRecordDecl(T *D) {
- auto *ET = cast<ElaboratedType>(D->getType().getTypePtr());
- return cast<RecordType>(ET->getNamedType().getTypePtr())->getDecl();
+ return D->getType()->getAsRecordDecl();
}
template <class T>
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index ac38300..15df9af 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -29,7 +29,7 @@ using internal::Matcher;
static const RecordDecl *getRecordDeclOfFriend(FriendDecl *FD) {
QualType Ty = FD->getFriendType()->getType().getCanonicalType();
- return cast<RecordType>(Ty)->getDecl();
+ return cast<RecordType>(Ty)->getOriginalDecl();
}
struct ImportExpr : TestImportBase {};
@@ -427,15 +427,14 @@ TEST_P(ImportExpr, ImportParenListExpr) {
"typedef dummy<int> declToImport;"
"template class dummy<int>;",
Lang_CXX03, "", Lang_CXX03, Verifier,
- typedefDecl(hasType(elaboratedType(namesType(templateSpecializationType(
+ typedefDecl(hasType(templateSpecializationType(
hasDeclaration(classTemplateSpecializationDecl(hasSpecializedTemplate(
- classTemplateDecl(hasTemplateDecl(cxxRecordDecl(hasMethod(
- allOf(hasName("f"),
- hasBody(compoundStmt(has(declStmt(hasSingleDecl(varDecl(
- hasInitializer(parenListExpr(has(unaryOperator(
- hasOperatorName("*"),
- hasUnaryOperand(
- cxxThisExpr())))))))))))))))))))))))));
+ classTemplateDecl(hasTemplateDecl(cxxRecordDecl(hasMethod(allOf(
+ hasName("f"),
+ hasBody(compoundStmt(has(declStmt(hasSingleDecl(
+ varDecl(hasInitializer(parenListExpr(has(unaryOperator(
+ hasOperatorName("*"),
+ hasUnaryOperand(cxxThisExpr())))))))))))))))))))))));
}
TEST_P(ImportExpr, ImportSwitch) {
@@ -691,8 +690,8 @@ TEST_P(ImportType, ImportUsingType) {
testImport("struct C {};"
"void declToImport() { using ::C; new C{}; }",
Lang_CXX11, "", Lang_CXX11, Verifier,
- functionDecl(hasDescendant(cxxNewExpr(hasType(pointerType(
- pointee(elaboratedType(namesType(usingType())))))))));
+ functionDecl(hasDescendant(
+ cxxNewExpr(hasType(pointerType(pointee(usingType())))))));
}
TEST_P(ImportDecl, ImportFunctionTemplateDecl) {
@@ -785,8 +784,7 @@ TEST_P(ImportType, ImportDeducedTemplateSpecialization) {
"class C { public: C(T); };"
"C declToImport(123);",
Lang_CXX17, "", Lang_CXX17, Verifier,
- varDecl(hasType(elaboratedType(
- namesType(deducedTemplateSpecializationType())))));
+ varDecl(hasType(deducedTemplateSpecializationType())));
}
const internal::VariadicDynCastAllOfMatcher<Stmt, SizeOfPackExpr>
@@ -996,9 +994,9 @@ TEST_P(ImportDecl, ImportUsingTemplate) {
"void declToImport() {"
"using ns::S; X<S> xi; }",
Lang_CXX11, "", Lang_CXX11, Verifier,
- functionDecl(hasDescendant(varDecl(hasTypeLoc(elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- hasAnyTemplateArgumentLoc(templateArgumentLoc())))))))));
+ functionDecl(
+ hasDescendant(varDecl(hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(templateArgumentLoc())))))));
}
TEST_P(ImportDecl, ImportUsingEnumDecl) {
@@ -1019,24 +1017,10 @@ TEST_P(ImportDecl, ImportUsingPackDecl) {
"template<typename ...T> struct C : T... { using T::operator()...; };"
"C<A, B> declToImport;",
Lang_CXX20, "", Lang_CXX20, Verifier,
- varDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(classTemplateSpecializationDecl(
- hasDescendant(usingPackDecl())))))))));
+ varDecl(hasType(templateSpecializationType(hasDeclaration(
+ classTemplateSpecializationDecl(hasDescendant(usingPackDecl())))))));
}
-/// \brief Matches shadow declarations introduced into a scope by a
-/// (resolved) using declaration.
-///
-/// Given
-/// \code
-/// namespace n { int f; }
-/// namespace declToImport { using n::f; }
-/// \endcode
-/// usingShadowDecl()
-/// matches \code f \endcode
-const internal::VariadicDynCastAllOfMatcher<Decl,
- UsingShadowDecl> usingShadowDecl;
-
TEST_P(ImportDecl, ImportUsingShadowDecl) {
MatchVerifier<Decl> Verifier;
// from using-decl
@@ -2855,8 +2839,9 @@ TEST_P(ImportFriendFunctions, ImportFriendFunctionRedeclChainDefWithClass) {
EXPECT_FALSE(InClassFD->doesThisDeclarationHaveABody());
EXPECT_EQ(InClassFD->getPreviousDecl(), ImportedD);
// The parameters must refer the same type
- EXPECT_EQ((*InClassFD->param_begin())->getOriginalType(),
- (*ImportedD->param_begin())->getOriginalType());
+ EXPECT_TRUE(ToTU->getASTContext().hasSameType(
+ (*InClassFD->param_begin())->getOriginalType(),
+ (*ImportedD->param_begin())->getOriginalType()));
}
TEST_P(ImportFriendFunctions,
@@ -2884,8 +2869,9 @@ TEST_P(ImportFriendFunctions,
EXPECT_TRUE(OutOfClassFD->doesThisDeclarationHaveABody());
EXPECT_EQ(ImportedD->getPreviousDecl(), OutOfClassFD);
// The parameters must refer the same type
- EXPECT_EQ((*OutOfClassFD->param_begin())->getOriginalType(),
- (*ImportedD->param_begin())->getOriginalType());
+ EXPECT_TRUE(ToTU->getASTContext().hasSameType(
+ (*OutOfClassFD->param_begin())->getOriginalType(),
+ (*ImportedD->param_begin())->getOriginalType()));
}
TEST_P(ImportFriendFunctions, ImportFriendFunctionFromMultipleTU) {
@@ -4486,8 +4472,7 @@ TEST_P(ImportFriendClasses, TypeForDeclShouldBeSetInTemplated) {
auto *Definition = FirstDeclMatcher<ClassTemplateDecl>().match(
FromTU1, classTemplateDecl(hasName("F")));
auto *Imported1 = cast<ClassTemplateDecl>(Import(Definition, Lang_CXX03));
- EXPECT_EQ(Imported0->getTemplatedDecl()->getTypeForDecl(),
- Imported1->getTemplatedDecl()->getTypeForDecl());
+ EXPECT_TRUE(declaresSameEntity(Imported0, Imported1));
}
TEST_P(ImportFriendClasses, DeclsFromFriendsShouldBeInRedeclChains) {
@@ -4548,8 +4533,10 @@ TEST_P(ImportFriendClasses, SkipComparingFriendTemplateDepth) {
classTemplateDecl(has(cxxRecordDecl(hasDefinition(), hasName("A")))));
auto *ToA = Import(FromA, Lang_CXX11);
EXPECT_TRUE(ToA);
- EXPECT_EQ(Fwd->getTemplatedDecl()->getTypeForDecl(),
- ToA->getTemplatedDecl()->getTypeForDecl());
+ const ASTContext &Ctx = ToTU->getASTContext();
+ EXPECT_TRUE(
+ Ctx.hasSameType(Ctx.getCanonicalTagType(Fwd->getTemplatedDecl()),
+ Ctx.getCanonicalTagType(ToA->getTemplatedDecl())));
}
TEST_P(ImportFriendClasses,
@@ -4627,7 +4614,7 @@ TEST_P(ImportFriendClasses, ImportOfClassDefinitionAndFwdFriendShouldBeLinked) {
auto *Friend = FirstDeclMatcher<FriendDecl>().match(FromTU0, friendDecl());
QualType FT = Friend->getFriendType()->getType();
FT = FromTU0->getASTContext().getCanonicalType(FT);
- auto *Fwd = cast<TagType>(FT)->getDecl();
+ auto *Fwd = cast<TagType>(FT)->getOriginalDecl();
auto *ImportedFwd = Import(Fwd, Lang_CXX03);
Decl *FromTU1 = getTuDecl(
R"(
@@ -7543,7 +7530,8 @@ TEST_P(ImportAutoFunctions, ReturnWithTypedefDeclaredInside) {
ASTContext &Ctx = From->getASTContext();
TypeAliasDecl *FromTA =
FirstDeclMatcher<TypeAliasDecl>().match(FromTU, typeAliasDecl());
- QualType TT = Ctx.getTypedefType(FromTA);
+ QualType TT = Ctx.getTypedefType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, FromTA);
const FunctionProtoType *FPT = cast<FunctionProtoType>(From->getType());
QualType NewFunType =
Ctx.getFunctionType(TT, FPT->getParamTypes(), FPT->getExtProtoInfo());
@@ -9193,41 +9181,39 @@ TEST_P(ASTImporterOptionSpecificTestBase, isNewDecl) {
struct ImportInjectedClassNameType : public ASTImporterOptionSpecificTestBase {
protected:
- const CXXRecordDecl *findInjected(const CXXRecordDecl *Parent) {
- for (Decl *Found : Parent->decls()) {
- const auto *Record = dyn_cast<CXXRecordDecl>(Found);
- if (Record && Record->isInjectedClassName())
- return Record;
- }
- return nullptr;
- }
-
- void checkInjType(const CXXRecordDecl *D) {
- // The whole redecl chain should have the same InjectedClassNameType
- // instance. The injected record declaration is a separate chain, this
- // should contain the same type too.
- const Type *Ty = nullptr;
- for (const Decl *ReD : D->redecls()) {
- const auto *ReRD = cast<CXXRecordDecl>(ReD);
- EXPECT_TRUE(ReRD->getTypeForDecl());
- EXPECT_TRUE(!Ty || Ty == ReRD->getTypeForDecl());
- Ty = ReRD->getTypeForDecl();
- }
+ void checkInjType(const ASTContext &Ctx, const CXXRecordDecl *D) {
+ ASSERT_TRUE(D->hasInjectedClassType());
+ const Type *Ty = Ctx.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, D,
+ /*OwnsTag=*/false)
+ .getTypePtr();
ASSERT_TRUE(Ty);
+ EXPECT_FALSE(Ty->isCanonicalUnqualified());
const auto *InjTy = Ty->castAs<InjectedClassNameType>();
EXPECT_TRUE(InjTy);
- if (CXXRecordDecl *Def = D->getDefinition()) {
- const CXXRecordDecl *InjRD = findInjected(Def);
- EXPECT_TRUE(InjRD);
- EXPECT_EQ(InjRD->getTypeForDecl(), InjTy);
+ for (const Decl *ReD : D->redecls()) {
+ if (ReD == D)
+ continue;
+ const auto *ReRD = cast<CXXRecordDecl>(ReD);
+ ASSERT_TRUE(ReRD->hasInjectedClassType());
+ const Type *ReTy = Ctx.getTagType(ElaboratedTypeKeyword::None,
+ /*Qualifier=*/std::nullopt, ReRD,
+ /*OwnsTag=*/false)
+ .getTypePtr();
+ ASSERT_TRUE(ReTy);
+ EXPECT_FALSE(ReTy->isCanonicalUnqualified());
+ EXPECT_NE(ReTy, Ty);
+ EXPECT_TRUE(Ctx.hasSameType(ReTy, Ty));
+ const auto *ReInjTy = Ty->castAs<InjectedClassNameType>();
+ EXPECT_TRUE(ReInjTy);
}
}
void testImport(Decl *ToTU, Decl *FromTU, Decl *FromD) {
- checkInjType(cast<CXXRecordDecl>(FromD));
+ checkInjType(FromTU->getASTContext(), cast<CXXRecordDecl>(FromD));
Decl *ToD = Import(FromD, Lang_CXX11);
if (auto *ToRD = dyn_cast<CXXRecordDecl>(ToD))
- checkInjType(ToRD);
+ checkInjType(ToTU->getASTContext(), ToRD);
}
const char *ToCodeA =
@@ -9679,8 +9665,7 @@ TEST_P(ASTImporterOptionSpecificTestBase,
auto *ToX = Import(FromX, Lang_CXX11);
auto *ToXType = ToX->getType()->getAs<TypedefType>();
- // FIXME: This should be false.
- EXPECT_TRUE(ToXType->typeMatchesDecl());
+ EXPECT_FALSE(ToXType->typeMatchesDecl());
}
TEST_P(ASTImporterOptionSpecificTestBase,
@@ -10038,7 +10023,12 @@ protected:
.getInheritedFrom(),
GetTemplateParm(ToD));
- EXPECT_EQ(ToD->getPreviousDecl(), ToDInherited);
+ if (FromD->getPreviousDecl() == FromDInherited) {
+ EXPECT_EQ(ToD->getPreviousDecl(), ToDInherited);
+ } else {
+ EXPECT_EQ(FromD, FromDInherited->getPreviousDecl());
+ EXPECT_EQ(ToD, ToDInherited->getPreviousDecl());
+ }
}
const char *CodeFunction =
diff --git a/clang/unittests/AST/DeclPrinterTest.cpp b/clang/unittests/AST/DeclPrinterTest.cpp
index 124b1a1..28750c4 100644
--- a/clang/unittests/AST/DeclPrinterTest.cpp
+++ b/clang/unittests/AST/DeclPrinterTest.cpp
@@ -356,42 +356,40 @@ TEST(DeclPrinter, TestCXXRecordDecl11) {
}
TEST(DeclPrinter, TestCXXRecordDecl12) {
- ASSERT_TRUE(
- PrintedDeclCXX98Matches("struct S { int x; };"
- "namespace NS { class C {};}"
- "void foo() {using namespace NS; C c;}",
- "foo",
- "void foo() {\nusing namespace NS;\nclass "
- "NS::C c;\n}\n",
- [](PrintingPolicy &Policy) {
- Policy.SuppressTagKeyword = false;
- Policy.SuppressScope = true;
- Policy.TerseOutput = false;
- }));
+ ASSERT_TRUE(PrintedDeclCXX98Matches("struct S { int x; };"
+ "namespace NS { class C {};}"
+ "void foo() {using namespace NS; C c;}",
+ "foo",
+ "void foo() {\nusing namespace NS;\n"
+ "C c;\n}\n",
+ [](PrintingPolicy &Policy) {
+ Policy.SuppressTagKeyword = false;
+ Policy.SuppressScope = true;
+ Policy.TerseOutput = false;
+ }));
}
TEST(DeclPrinter, TestCXXRecordDecl13) {
- ASSERT_TRUE(PrintedDeclCXX98Matches(
- "struct S { int x; };"
- "S s1;"
- "S foo() {return s1;}",
- "foo", "struct S foo() {\nreturn s1;\n}\n", [](PrintingPolicy &Policy) {
- Policy.SuppressTagKeyword = false;
- Policy.SuppressScope = true;
- Policy.TerseOutput = false;
- }));
+ ASSERT_TRUE(PrintedDeclCXX98Matches("struct S { int x; };"
+ "S s1;"
+ "S foo() {return s1;}",
+ "foo", "S foo() {\nreturn s1;\n}\n",
+ [](PrintingPolicy &Policy) {
+ Policy.SuppressTagKeyword = false;
+ Policy.SuppressScope = true;
+ Policy.TerseOutput = false;
+ }));
}
TEST(DeclPrinter, TestCXXRecordDecl14) {
- ASSERT_TRUE(PrintedDeclCXX98Matches(
- "struct S { int x; };"
- "S foo(S s1) {return s1;}",
- "foo", "struct S foo(struct S s1) {\nreturn s1;\n}\n",
- [](PrintingPolicy &Policy) {
- Policy.SuppressTagKeyword = false;
- Policy.SuppressScope = true;
- Policy.TerseOutput = false;
- }));
+ ASSERT_TRUE(PrintedDeclCXX98Matches("struct S { int x; };"
+ "S foo(S s1) {return s1;}",
+ "foo", "S foo(S s1) {\nreturn s1;\n}\n",
+ [](PrintingPolicy &Policy) {
+ Policy.SuppressTagKeyword = false;
+ Policy.SuppressScope = true;
+ Policy.TerseOutput = false;
+ }));
}
TEST(DeclPrinter, TestCXXRecordDecl15) {
ASSERT_TRUE(PrintedDeclCXX98Matches(
@@ -399,8 +397,8 @@ TEST(DeclPrinter, TestCXXRecordDecl15) {
"namespace NS { class C {};}"
"S foo(S s1, NS::C c1) {using namespace NS; C c; return s1;}",
"foo",
- "struct S foo(struct S s1, class NS::C c1) {\nusing namespace NS;\nclass "
- "NS::C c;\nreturn s1;\n}\n",
+ "S foo(S s1, NS::C c1) {\nusing namespace NS;\n"
+ "C c;\nreturn s1;\n}\n",
[](PrintingPolicy &Policy) {
Policy.SuppressTagKeyword = false;
Policy.SuppressScope = true;
diff --git a/clang/unittests/AST/DeclTest.cpp b/clang/unittests/AST/DeclTest.cpp
index afaf413..6b44391 100644
--- a/clang/unittests/AST/DeclTest.cpp
+++ b/clang/unittests/AST/DeclTest.cpp
@@ -90,7 +90,7 @@ TEST(Decl, AsmLabelAttr) {
DeclF->addAttr(AsmLabelAttr::Create(Ctx, "foo"));
// Mangle the decl names.
- std::string MangleF, MangleG;
+ std::string MangleF;
std::unique_ptr<ItaniumMangleContext> MC(
ItaniumMangleContext::create(Ctx, Diags));
{
diff --git a/clang/unittests/AST/ProfilingTest.cpp b/clang/unittests/AST/ProfilingTest.cpp
index 27a4a19..b46bade 100644
--- a/clang/unittests/AST/ProfilingTest.cpp
+++ b/clang/unittests/AST/ProfilingTest.cpp
@@ -63,11 +63,11 @@ TEST(Profiling, DeducedTemplateSpecializationType_Name) {
ASTContext &Ctx = AST->getASTContext();
auto *T1 = cast<DeducedTemplateSpecializationType>(
- Ctx.getDeducedTemplateSpecializationType(TemplateName(CTD1), QualType(),
- false));
+ Ctx.getDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, TemplateName(CTD1), QualType(), false));
auto *T2 = cast<DeducedTemplateSpecializationType>(
- Ctx.getDeducedTemplateSpecializationType(TemplateName(CTD2), QualType(),
- false));
+ Ctx.getDeducedTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, TemplateName(CTD2), QualType(), false));
testTypeNode(T1, T2);
}
diff --git a/clang/unittests/AST/RandstructTest.cpp b/clang/unittests/AST/RandstructTest.cpp
index c22d866..cba446f 100644
--- a/clang/unittests/AST/RandstructTest.cpp
+++ b/clang/unittests/AST/RandstructTest.cpp
@@ -532,7 +532,7 @@ TEST(RANDSTRUCT_TEST, AnonymousStructsAndUnionsRetainFieldOrder) {
for (const Decl *D : RD->decls())
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
if (const auto *Record = FD->getType()->getAs<RecordType>()) {
- RD = Record->getDecl();
+ RD = Record->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isAnonymousStructOrUnion()) {
// These field orders shouldn't change.
if (RD->isUnion()) {
diff --git a/clang/unittests/AST/RecursiveASTVisitorTest.cpp b/clang/unittests/AST/RecursiveASTVisitorTest.cpp
index 9d7ff59..c5ad29a 100644
--- a/clang/unittests/AST/RecursiveASTVisitorTest.cpp
+++ b/clang/unittests/AST/RecursiveASTVisitorTest.cpp
@@ -95,9 +95,10 @@ public:
return Ret;
}
- bool TraverseTypedefTypeLoc(TypedefTypeLoc TL) {
+ bool TraverseTypedefTypeLoc(TypedefTypeLoc TL, bool TraverseQualifier) {
Events.push_back(VisitEvent::StartTraverseTypedefType);
- bool Ret = RecursiveASTVisitor::TraverseTypedefTypeLoc(TL);
+ bool Ret =
+ RecursiveASTVisitor::TraverseTypedefTypeLoc(TL, TraverseQualifier);
Events.push_back(VisitEvent::EndTraverseTypedefType);
return Ret;
diff --git a/clang/unittests/AST/SizelessTypesTest.cpp b/clang/unittests/AST/SizelessTypesTest.cpp
index 0b984b6..4a945a4 100644
--- a/clang/unittests/AST/SizelessTypesTest.cpp
+++ b/clang/unittests/AST/SizelessTypesTest.cpp
@@ -24,7 +24,7 @@ struct SizelessTypeTester : public ::testing::Test {
ASTContext &Ctx = AST->getASTContext();
TranslationUnitDecl &TU = *Ctx.getTranslationUnitDecl();
TypeDecl *Foo = cast<TypeDecl>(TU.lookup(&Ctx.Idents.get("foo")).front());
- const Type *FooTy = Foo->getTypeForDecl();
+ const Type *FooTy = Ctx.getTypeDeclType(Foo).getTypePtr();
};
TEST_F(SizelessTypeTester, TestSizelessBuiltin) {
diff --git a/clang/unittests/AST/StructuralEquivalenceTest.cpp b/clang/unittests/AST/StructuralEquivalenceTest.cpp
index ef82afa..bee288d 100644
--- a/clang/unittests/AST/StructuralEquivalenceTest.cpp
+++ b/clang/unittests/AST/StructuralEquivalenceTest.cpp
@@ -617,8 +617,7 @@ TEST_F(StructuralEquivalenceCXXMethodTest, OutOfClass2) {
struct StructuralEquivalenceRecordTest : StructuralEquivalenceTest {
// FIXME Use a common getRecordDecl with ASTImporterTest.cpp!
RecordDecl *getRecordDecl(FieldDecl *FD) {
- auto *ET = cast<ElaboratedType>(FD->getType().getTypePtr());
- return cast<RecordType>(ET->getNamedType().getTypePtr())->getDecl();
+ return FD->getType()->getAsRecordDecl();
};
};
@@ -720,11 +719,13 @@ TEST_F(StructuralEquivalenceRecordTest, AnonymousRecordsShouldBeInequivalent) {
auto *A = FirstDeclMatcher<IndirectFieldDecl>().match(
TU, indirectFieldDecl(hasName("a")));
auto *FA = cast<FieldDecl>(A->chain().front());
- RecordDecl *RA = cast<RecordType>(FA->getType().getTypePtr())->getDecl();
+ RecordDecl *RA =
+ cast<RecordType>(FA->getType().getTypePtr())->getOriginalDecl();
auto *B = FirstDeclMatcher<IndirectFieldDecl>().match(
TU, indirectFieldDecl(hasName("b")));
auto *FB = cast<FieldDecl>(B->chain().front());
- RecordDecl *RB = cast<RecordType>(FB->getType().getTypePtr())->getDecl();
+ RecordDecl *RB =
+ cast<RecordType>(FB->getType().getTypePtr())->getOriginalDecl();
ASSERT_NE(RA, RB);
EXPECT_TRUE(testStructuralMatch(RA, RA));
@@ -753,13 +754,15 @@ TEST_F(StructuralEquivalenceRecordTest,
auto *A = FirstDeclMatcher<IndirectFieldDecl>().match(
TU, indirectFieldDecl(hasName("a")));
auto *FA = cast<FieldDecl>(A->chain().front());
- RecordDecl *RA = cast<RecordType>(FA->getType().getTypePtr())->getDecl();
+ RecordDecl *RA =
+ cast<RecordType>(FA->getType().getTypePtr())->getOriginalDecl();
auto *TU1 = get<1>(t);
auto *A1 = FirstDeclMatcher<IndirectFieldDecl>().match(
TU1, indirectFieldDecl(hasName("a")));
auto *FA1 = cast<FieldDecl>(A1->chain().front());
- RecordDecl *RA1 = cast<RecordType>(FA1->getType().getTypePtr())->getDecl();
+ RecordDecl *RA1 =
+ cast<RecordType>(FA1->getType().getTypePtr())->getOriginalDecl();
RecordDecl *X =
FirstDeclMatcher<RecordDecl>().match(TU, recordDecl(hasName("X")));
diff --git a/clang/unittests/AST/TemplateNameTest.cpp b/clang/unittests/AST/TemplateNameTest.cpp
index 2eac5c5..31655e2 100644
--- a/clang/unittests/AST/TemplateNameTest.cpp
+++ b/clang/unittests/AST/TemplateNameTest.cpp
@@ -120,14 +120,14 @@ TEST(TemplateName, UsingTemplate) {
// are rather part of the ElaboratedType)!
absl::vector<int> v(123);
)cpp");
- auto Matcher = elaboratedTypeLoc(
- hasNamedTypeLoc(loc(templateSpecializationType().bind("id"))));
+ auto Matcher = templateSpecializationTypeLoc().bind("id");
auto MatchResults = match(Matcher, AST->getASTContext());
const auto *TST =
- MatchResults.front().getNodeAs<TemplateSpecializationType>("id");
+ MatchResults.front().getNodeAs<TemplateSpecializationTypeLoc>("id");
ASSERT_TRUE(TST);
- EXPECT_EQ(TST->getTemplateName().getKind(), TemplateName::QualifiedTemplate);
- EXPECT_TRUE(TST->getTemplateName().getAsUsingShadowDecl() != nullptr);
+ TemplateName TN = TST->getTypePtr()->getTemplateName();
+ EXPECT_EQ(TN.getKind(), TemplateName::QualifiedTemplate);
+ EXPECT_TRUE(TN.getAsUsingShadowDecl() != nullptr);
AST = tooling::buildASTFromCodeWithArgs(R"cpp(
namespace std {
@@ -139,8 +139,7 @@ TEST(TemplateName, UsingTemplate) {
absl::vector DTST(123);
)cpp",
{"-std=c++17"});
- Matcher = elaboratedTypeLoc(
- hasNamedTypeLoc(loc(deducedTemplateSpecializationType().bind("id"))));
+ Matcher = loc(deducedTemplateSpecializationType().bind("id"));
MatchResults = match(Matcher, AST->getASTContext());
const auto *DTST =
MatchResults.front().getNodeAs<DeducedTemplateSpecializationType>("id");
diff --git a/clang/unittests/AST/TypePrinterTest.cpp b/clang/unittests/AST/TypePrinterTest.cpp
index 2b37add..ca0380b 100644
--- a/clang/unittests/AST/TypePrinterTest.cpp
+++ b/clang/unittests/AST/TypePrinterTest.cpp
@@ -60,7 +60,7 @@ TEST(TypePrinter, TemplateId) {
[](PrintingPolicy &Policy) { Policy.FullyQualifiedName = false; }));
ASSERT_TRUE(PrintedTypeMatches(
- Code, {}, Matcher, "const Type<T> &",
+ Code, {}, Matcher, "const N::Type<T> &",
[](PrintingPolicy &Policy) { Policy.FullyQualifiedName = true; }));
}
@@ -97,7 +97,7 @@ TEST(TypePrinter, ParamsUglified) {
"const f<Tp &> *", Clean));
}
-TEST(TypePrinter, SuppressElaboration) {
+TEST(TypePrinter, TemplateSpecializationFullyQualified) {
llvm::StringLiteral Code = R"cpp(
namespace shared {
namespace a {
@@ -115,13 +115,10 @@ TEST(TypePrinter, SuppressElaboration) {
hasType(qualType().bind("id")));
ASSERT_TRUE(PrintedTypeMatches(
Code, {}, Matcher, "a::S<b::Foo>",
+ [](PrintingPolicy &Policy) { Policy.FullyQualifiedName = false; }));
+ ASSERT_TRUE(PrintedTypeMatches(
+ Code, {}, Matcher, "shared::a::S<shared::b::Foo>",
[](PrintingPolicy &Policy) { Policy.FullyQualifiedName = true; }));
- ASSERT_TRUE(PrintedTypeMatches(Code, {}, Matcher,
- "shared::a::S<shared::b::Foo>",
- [](PrintingPolicy &Policy) {
- Policy.SuppressElaboration = true;
- Policy.FullyQualifiedName = true;
- }));
}
TEST(TypePrinter, TemplateIdWithNTTP) {
diff --git a/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp
index 2871223..8a957864 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersNarrowingTest.cpp
@@ -1172,8 +1172,8 @@ TEST_P(ASTMatchersTest, IsDerivedFrom_ElaboratedType) {
return;
}
- DeclarationMatcher IsDerivenFromBase =
- cxxRecordDecl(isDerivedFrom(decl().bind("typedef")));
+ DeclarationMatcher IsDerivenFromBase = cxxRecordDecl(
+ isDerivedFrom(decl().bind("typedef")), unless(isImplicit()));
EXPECT_TRUE(matchAndVerifyResultTrue(
"struct AnInterface {};"
@@ -2302,10 +2302,9 @@ TEST(ASTMatchersTest, NamesMember_CXXDependentScopeMemberExpr) {
EXPECT_TRUE(matches(
Code,
cxxDependentScopeMemberExpr(
- hasObjectExpression(declRefExpr(hasType(elaboratedType(namesType(
- templateSpecializationType(hasDeclaration(classTemplateDecl(
- has(cxxRecordDecl(has(cxxMethodDecl(hasName("mem"))
- .bind("templMem")))))))))))),
+ hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
+ hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
+ has(cxxMethodDecl(hasName("mem")).bind("templMem")))))))))),
memberHasSameNameAsBoundNode("templMem"))));
EXPECT_TRUE(
@@ -2323,10 +2322,9 @@ TEST(ASTMatchersTest, NamesMember_CXXDependentScopeMemberExpr) {
EXPECT_TRUE(matches(
Code,
cxxDependentScopeMemberExpr(
- hasObjectExpression(declRefExpr(
- hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has(
- fieldDecl(hasName("mem")).bind("templMem")))))))))))),
+ hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
+ hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
+ has(fieldDecl(hasName("mem")).bind("templMem")))))))))),
memberHasSameNameAsBoundNode("templMem"))));
}
@@ -2341,10 +2339,9 @@ TEST(ASTMatchersTest, NamesMember_CXXDependentScopeMemberExpr) {
EXPECT_TRUE(matches(
Code,
cxxDependentScopeMemberExpr(
- hasObjectExpression(declRefExpr(
- hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
- has(varDecl(hasName("mem")).bind("templMem")))))))))))),
+ hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
+ hasDeclaration(classTemplateDecl(has(cxxRecordDecl(
+ has(varDecl(hasName("mem")).bind("templMem")))))))))),
memberHasSameNameAsBoundNode("templMem"))));
}
{
diff --git a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
index 07450a0..b55928f 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
@@ -1938,8 +1938,7 @@ TEST_P(ASTMatchersTest, PointerType_MatchesPointersToConstTypes) {
TEST_P(ASTMatchersTest, TypedefType) {
EXPECT_TRUE(matches("typedef int X; X a;",
- varDecl(hasName("a"), hasType(elaboratedType(
- namesType(typedefType()))))));
+ varDecl(hasName("a"), hasType(typedefType()))));
}
TEST_P(ASTMatchersTest, MacroQualifiedType) {
@@ -2018,22 +2017,6 @@ TEST_P(ASTMatchersTest, RecordType_CXX) {
recordType(hasDeclaration(recordDecl(hasName("S"))))));
}
-TEST_P(ASTMatchersTest, ElaboratedType) {
- if (!GetParam().isCXX()) {
- // FIXME: Add a test for `elaboratedType()` that does not depend on C++.
- return;
- }
- EXPECT_TRUE(matches("namespace N {"
- " namespace M {"
- " class D {};"
- " }"
- "}"
- "N::M::D d;",
- elaboratedType()));
- EXPECT_TRUE(matches("class C {} c;", elaboratedType()));
- EXPECT_TRUE(matches("class C {}; C c;", elaboratedType()));
-}
-
TEST_P(ASTMatchersTest, SubstTemplateTypeParmType) {
if (!GetParam().isCXX()) {
return;
@@ -2133,16 +2116,16 @@ TEST_P(ASTMatchersTest,
if (!GetParam().isCXX()) {
return;
}
- EXPECT_TRUE(matches(
- "struct A { struct B { struct C {}; }; }; A::B::C c;",
- nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A"))))));
+ EXPECT_TRUE(
+ matches("struct A { struct B { struct C {}; }; }; A::B::C c;",
+ nestedNameSpecifier(hasPrefix(specifiesType(asString("A"))))));
EXPECT_TRUE(matches("struct A { struct B { struct C {}; }; }; A::B::C c;",
- nestedNameSpecifierLoc(hasPrefix(specifiesTypeLoc(
- loc(qualType(asString("struct A"))))))));
+ nestedNameSpecifierLoc(hasPrefix(
+ specifiesTypeLoc(loc(qualType(asString("A"))))))));
EXPECT_TRUE(matches(
"namespace N { struct A { struct B { struct C {}; }; }; } N::A::B::C c;",
- nestedNameSpecifierLoc(hasPrefix(
- specifiesTypeLoc(loc(qualType(asString("struct N::A"))))))));
+ nestedNameSpecifierLoc(
+ hasPrefix(specifiesTypeLoc(loc(qualType(asString("N::A"))))))));
}
template <typename T>
@@ -2338,8 +2321,7 @@ TEST_P(ASTMatchersTest,
}
EXPECT_TRUE(matches(
"template <typename T> class C {}; C<char> var;",
- varDecl(hasName("var"), hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc()))))));
+ varDecl(hasName("var"), hasTypeLoc(templateSpecializationTypeLoc()))));
}
TEST_P(
@@ -2353,58 +2335,6 @@ TEST_P(
varDecl(hasName("var"), hasTypeLoc(templateSpecializationTypeLoc()))));
}
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("class C {}; class C c;",
- varDecl(hasName("c"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToNamespaceElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("namespace N { class D {}; } N::D d;",
- varDecl(hasName("d"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToElaboratedStructDeclaration) {
- EXPECT_TRUE(matches("struct s {}; struct s ss;",
- varDecl(hasName("ss"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToBareElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("class C {}; C c;",
- varDecl(hasName("c"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(
- ASTMatchersTest,
- ElaboratedTypeLocTest_DoesNotBindToNamespaceNonElaboratedObjectDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("namespace N { class D {}; } using N::D; D d;",
- varDecl(hasName("d"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
-TEST_P(ASTMatchersTest,
- ElaboratedTypeLocTest_BindsToBareElaboratedStructDeclaration) {
- if (!GetParam().isCXX()) {
- return;
- }
- EXPECT_TRUE(matches("struct s {}; s ss;",
- varDecl(hasName("ss"), hasTypeLoc(elaboratedTypeLoc()))));
-}
-
TEST_P(ASTMatchersTest, LambdaCaptureTest) {
if (!GetParam().isCXX11OrLater()) {
return;
diff --git a/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp
index 8ddae4e..d58bc00 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersTraversalTest.cpp
@@ -190,13 +190,11 @@ TEST(TypeMatcher, MatchesDeclTypes) {
EXPECT_TRUE(matches("template <typename T> struct S {"
" void f(S s);"
"};",
- parmVarDecl(hasType(elaboratedType(
- namesType(injectedClassNameType()))))));
+ parmVarDecl(hasType(injectedClassNameType()))));
EXPECT_TRUE(notMatches("template <typename T> struct S {"
" void g(S<T> s);"
"};",
- parmVarDecl(hasType(elaboratedType(
- namesType(injectedClassNameType()))))));
+ parmVarDecl(hasType(injectedClassNameType()))));
// InjectedClassNameType -> CXXRecordDecl
EXPECT_TRUE(matches("template <typename T> struct S {"
" void f(S s);"
@@ -228,46 +226,39 @@ TEST(HasDeclaration, HasDeclarationOfEnumType) {
TEST(HasDeclaration, HasGetDeclTraitTest) {
static_assert(internal::has_getDecl<TypedefType>,
"Expected TypedefType to have a getDecl.");
- static_assert(internal::has_getDecl<RecordType>,
- "Expected RecordType to have a getDecl.");
static_assert(!internal::has_getDecl<TemplateSpecializationType>,
"Expected TemplateSpecializationType to *not* have a getDecl.");
}
TEST(HasDeclaration, ElaboratedType) {
- EXPECT_TRUE(matches(
- "namespace n { template <typename T> struct X {}; }"
- "void f(n::X<int>);",
- parmVarDecl(hasType(qualType(hasDeclaration(cxxRecordDecl()))))));
- EXPECT_TRUE(matches(
- "namespace n { template <typename T> struct X {}; }"
- "void f(n::X<int>);",
- parmVarDecl(hasType(elaboratedType(hasDeclaration(cxxRecordDecl()))))));
+ static const char Elaborated[] = "namespace n { struct X {}; }"
+ "void f(n::X);";
+ EXPECT_TRUE(
+ matches(Elaborated,
+ parmVarDecl(hasType(qualType(hasDeclaration(cxxRecordDecl()))))));
+ EXPECT_TRUE(matches(Elaborated, parmVarDecl(hasType(recordType(
+ hasDeclaration(cxxRecordDecl()))))));
}
TEST(HasDeclaration, HasDeclarationOfTypeWithDecl) {
EXPECT_TRUE(matches(
"typedef int X; X a;",
- varDecl(hasName("a"), hasType(elaboratedType(namesType(
- typedefType(hasDeclaration(decl()))))))));
+ varDecl(hasName("a"), hasType(typedefType(hasDeclaration(decl()))))));
// FIXME: Add tests for other types with getDecl() (e.g. RecordType)
}
TEST(HasDeclaration, HasDeclarationOfTemplateSpecializationType) {
- EXPECT_TRUE(matches(
- "template <typename T> class A {}; A<int> a;",
- varDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(namedDecl(hasName("A"))))))))));
- EXPECT_TRUE(matches(
- "template <typename T> class A {};"
- "template <typename T> class B { A<T> a; };",
- fieldDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(namedDecl(hasName("A"))))))))));
- EXPECT_TRUE(matches(
- "template <typename T> class A {}; A<int> a;",
- varDecl(hasType(elaboratedType(namesType(
- templateSpecializationType(hasDeclaration(cxxRecordDecl()))))))));
+ EXPECT_TRUE(matches("template <typename T> class A {}; A<int> a;",
+ varDecl(hasType(templateSpecializationType(
+ hasDeclaration(namedDecl(hasName("A"))))))));
+ EXPECT_TRUE(matches("template <typename T> class A {};"
+ "template <typename T> class B { A<T> a; };",
+ fieldDecl(hasType(templateSpecializationType(
+ hasDeclaration(namedDecl(hasName("A"))))))));
+ EXPECT_TRUE(matches("template <typename T> class A {}; A<int> a;",
+ varDecl(hasType(templateSpecializationType(
+ hasDeclaration(cxxRecordDecl()))))));
}
TEST(HasDeclaration, HasDeclarationOfCXXNewExpr) {
@@ -277,10 +268,9 @@ TEST(HasDeclaration, HasDeclarationOfCXXNewExpr) {
}
TEST(HasDeclaration, HasDeclarationOfTypeAlias) {
- EXPECT_TRUE(matches(
- "template <typename T> using C = T; C<int> c;",
- varDecl(hasType(elaboratedType(namesType(templateSpecializationType(
- hasDeclaration(typeAliasTemplateDecl()))))))));
+ EXPECT_TRUE(matches("template <typename T> using C = T; C<int> c;",
+ varDecl(hasType(templateSpecializationType(
+ hasDeclaration(typeAliasTemplateDecl()))))));
}
TEST(HasDeclaration, HasDeclarationOfObjCInterface) {
@@ -5410,14 +5400,15 @@ TEST(LoopingMatchers, DoNotOverwritePreviousMatchResultOnFailure) {
functionDecl(parameterCountIs(1))))),
std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
EXPECT_TRUE(matchAndVerifyResultTrue(
- "class A{}; class B{}; class C : B, A {};",
- cxxRecordDecl(decl().bind("x"), isDerivedFrom("::A")),
- std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
+ "class A{}; class B{}; class C : B, A {};",
+ cxxRecordDecl(decl().bind("x"), isDerivedFrom("::A"),
+ unless(isImplicit())),
+ std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
EXPECT_TRUE(matchAndVerifyResultTrue(
- "class A{}; typedef A B; typedef A C; typedef A D;"
+ "class A{}; typedef A B; typedef A C; typedef A D;"
"class E : A {};",
- cxxRecordDecl(decl().bind("x"), isDerivedFrom("C")),
- std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
+ cxxRecordDecl(decl().bind("x"), isDerivedFrom("C"), unless(isImplicit())),
+ std::make_unique<VerifyIdIsBoundTo<Decl>>("x", 1)));
EXPECT_TRUE(matchAndVerifyResultTrue(
"class A { class B { void f() {} }; };",
functionDecl(decl().bind("x"), hasAncestor(recordDecl(hasName("::A")))),
@@ -5710,7 +5701,7 @@ TEST(HasAnyBase, BindsInnerBoundNodes) {
EXPECT_TRUE(matchAndVerifyResultTrue(
"struct Inner {}; struct Proxy : Inner {}; struct Main : public "
"Proxy {};",
- cxxRecordDecl(hasName("Main"),
+ cxxRecordDecl(hasName("Main"), unless(isImplicit()),
hasAnyBase(cxxBaseSpecifier(hasType(
cxxRecordDecl(hasName("Inner")).bind("base-class")))))
.bind("class"),
@@ -5736,47 +5727,28 @@ TEST(TypeMatching, PointeeTypes) {
TEST(ElaboratedTypeNarrowing, hasQualifier) {
EXPECT_TRUE(matches(
- "namespace N {"
+ "namespace N {"
" namespace M {"
" class D {};"
" }"
"}"
"N::M::D d;",
- elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
+ recordType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
EXPECT_TRUE(notMatches(
- "namespace M {"
+ "namespace M {"
" class D {};"
"}"
"M::D d;",
- elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
- EXPECT_TRUE(notMatches(
- "struct D {"
- "} d;",
- elaboratedType(hasQualifier(nestedNameSpecifier()))));
-}
-
-TEST(ElaboratedTypeNarrowing, namesType) {
- EXPECT_TRUE(matches(
- "namespace N {"
- " namespace M {"
- " class D {};"
- " }"
- "}"
- "N::M::D d;",
- elaboratedType(elaboratedType(namesType(recordType(
- hasDeclaration(namedDecl(hasName("D")))))))));
- EXPECT_TRUE(notMatches(
- "namespace M {"
- " class D {};"
- "}"
- "M::D d;",
- elaboratedType(elaboratedType(namesType(typedefType())))));
+ recordType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))))));
+ EXPECT_TRUE(notMatches("struct D {"
+ "} d;",
+ recordType(hasQualifier(nestedNameSpecifier()))));
}
TEST(NNS, BindsNestedNameSpecifiers) {
EXPECT_TRUE(matchAndVerifyResultTrue(
"namespace ns { struct E { struct B {}; }; } ns::E::B b;",
- nestedNameSpecifier(specifiesType(asString("struct ns::E"))).bind("nns"),
+ nestedNameSpecifier(specifiesType(asString("ns::E"))).bind("nns"),
std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifier>>("nns",
"ns::E::")));
}
@@ -5793,38 +5765,33 @@ TEST(NNS, DescendantsOfNestedNameSpecifiers) {
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A::B")),
- hasDescendant(nestedNameSpecifier(
- specifiesNamespace(hasName("a")))))));
+ Fragment, nestedNameSpecifier(specifiesType(asString("a::A::B")),
+ hasDescendant(nestedNameSpecifier(
+ specifiesNamespace(hasName("a")))))));
EXPECT_TRUE(notMatches(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A::B")),
- has(nestedNameSpecifier(
- specifiesNamespace(hasName("a")))))));
+ Fragment, nestedNameSpecifier(specifiesType(asString("a::A::B")),
+ has(nestedNameSpecifier(
+ specifiesNamespace(hasName("a")))))));
EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A")),
- has(nestedNameSpecifier(
- specifiesNamespace(hasName("a")))))));
+ Fragment, nestedNameSpecifier(specifiesType(asString("a::A")),
+ has(nestedNameSpecifier(
+ specifiesNamespace(hasName("a")))))));
// Not really useful because a NestedNameSpecifier can af at most one child,
// but to complete the interface.
EXPECT_TRUE(matchAndVerifyResultTrue(
- Fragment,
- nestedNameSpecifier(specifiesType(asString("struct a::A::B")),
- forEach(nestedNameSpecifier().bind("x"))),
- std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifier>>("x", 1)));
+ Fragment,
+ nestedNameSpecifier(specifiesType(asString("a::A::B")),
+ forEach(nestedNameSpecifier().bind("x"))),
+ std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifier>>("x", 1)));
}
TEST(NNS, NestedNameSpecifiersAsDescendants) {
StringRef Fragment =
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
- EXPECT_TRUE(matches(
- Fragment,
- decl(hasDescendant(nestedNameSpecifier(specifiesType(
- asString("struct a::A")))))));
+ EXPECT_TRUE(matches(Fragment, decl(hasDescendant(nestedNameSpecifier(
+ specifiesType(asString("a::A")))))));
EXPECT_TRUE(matchAndVerifyResultTrue(
Fragment,
functionDecl(hasName("f"),
@@ -5837,37 +5804,34 @@ TEST(NNSLoc, DescendantsOfNestedNameSpecifierLocs) {
StringRef Fragment =
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
- EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A::B"))),
- hasDescendant(loc(nestedNameSpecifier(
- specifiesNamespace(hasName("a"))))))));
+ EXPECT_TRUE(matches(Fragment, nestedNameSpecifierLoc(
+ loc(specifiesType(asString("a::A::B"))),
+ hasDescendant(loc(nestedNameSpecifier(
+ specifiesNamespace(hasName("a"))))))));
EXPECT_TRUE(notMatches(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A::B"))),
- has(loc(nestedNameSpecifier(
- specifiesNamespace(hasName("a"))))))));
+ Fragment,
+ nestedNameSpecifierLoc(
+ loc(specifiesType(asString("a::A::B"))),
+ has(loc(nestedNameSpecifier(specifiesNamespace(hasName("a"))))))));
EXPECT_TRUE(matches(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A"))),
- has(loc(nestedNameSpecifier(
- specifiesNamespace(hasName("a"))))))));
+ Fragment,
+ nestedNameSpecifierLoc(
+ loc(specifiesType(asString("a::A"))),
+ has(loc(nestedNameSpecifier(specifiesNamespace(hasName("a"))))))));
EXPECT_TRUE(matchAndVerifyResultTrue(
- Fragment,
- nestedNameSpecifierLoc(loc(specifiesType(asString("struct a::A::B"))),
- forEach(nestedNameSpecifierLoc().bind("x"))),
- std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifierLoc>>("x", 1)));
+ Fragment,
+ nestedNameSpecifierLoc(loc(specifiesType(asString("a::A::B"))),
+ forEach(nestedNameSpecifierLoc().bind("x"))),
+ std::make_unique<VerifyIdIsBoundTo<NestedNameSpecifierLoc>>("x", 1)));
}
TEST(NNSLoc, NestedNameSpecifierLocsAsDescendants) {
StringRef Fragment =
"namespace a { struct A { struct B { struct C {}; }; }; };"
"void f() { a::A::B::C c; }";
- EXPECT_TRUE(matches(
- Fragment,
- decl(hasDescendant(loc(nestedNameSpecifier(specifiesType(
- asString("struct a::A"))))))));
+ EXPECT_TRUE(matches(Fragment, decl(hasDescendant(loc(nestedNameSpecifier(
+ specifiesType(asString("a::A"))))))));
EXPECT_TRUE(matchAndVerifyResultTrue(
Fragment,
functionDecl(hasName("f"),
@@ -5883,7 +5847,9 @@ TEST(Attr, AttrsAsDescendants) {
EXPECT_TRUE(matchAndVerifyResultTrue(
Fragment,
namespaceDecl(hasName("a"),
- forEachDescendant(attr(unless(isImplicit())).bind("x"))),
+ forEachDescendant(decl(
+ hasDescendant(attr(unless(isImplicit())).bind("x")),
+ unless(isImplicit())))),
std::make_unique<VerifyIdIsBoundTo<Attr>>("x", 2)));
}
@@ -6531,21 +6497,19 @@ TEST(HasReferentLoc, DoesNotBindToParameterWithoutIntReferenceTypeLoc) {
}
TEST(HasAnyTemplateArgumentLoc, BindsToSpecializationWithIntArgument) {
- EXPECT_TRUE(matches(
- "template<typename T> class A {}; A<int> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc(
- hasTypeLoc(loc(asString("int")))))))))));
+ EXPECT_TRUE(
+ matches("template<typename T> class A {}; A<int> a;",
+ varDecl(hasName("a"), hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(hasTypeLoc(
+ loc(asString("int")))))))));
}
TEST(HasAnyTemplateArgumentLoc, BindsToSpecializationWithDoubleArgument) {
- EXPECT_TRUE(matches(
- "template<typename T> class A {}; A<double> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc(
- hasTypeLoc(loc(asString("double")))))))))));
+ EXPECT_TRUE(
+ matches("template<typename T> class A {}; A<double> a;",
+ varDecl(hasName("a"), hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(hasTypeLoc(
+ loc(asString("double")))))))));
}
TEST(HasAnyTemplateArgumentLoc, BindsToExplicitSpecializationWithIntArgument) {
@@ -6598,30 +6562,27 @@ TEST(HasAnyTemplateArgumentLoc,
}
TEST(HasTemplateArgumentLoc, BindsToSpecializationWithIntArgument) {
- EXPECT_TRUE(
- matches("template<typename T> class A {}; A<int> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasTemplateArgumentLoc(
- 0, hasTypeLoc(loc(asString("int")))))))))));
+ EXPECT_TRUE(matches(
+ "template<typename T> class A {}; A<int> a;",
+ varDecl(hasName("a"),
+ hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(
+ 0, hasTypeLoc(loc(asString("int")))))))));
}
TEST(HasTemplateArgumentLoc, BindsToSpecializationWithDoubleArgument) {
- EXPECT_TRUE(
- matches("template<typename T> class A {}; A<double> a;",
- varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasTemplateArgumentLoc(
- 0, hasTypeLoc(loc(asString("double")))))))))));
+ EXPECT_TRUE(matches(
+ "template<typename T> class A {}; A<double> a;",
+ varDecl(hasName("a"),
+ hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(
+ 0, hasTypeLoc(loc(asString("double")))))))));
}
TEST(HasTemplateArgumentLoc, DoesNotBindToSpecializationWithIntArgument) {
EXPECT_TRUE(notMatches(
"template<typename T> class A {}; A<int> a;",
varDecl(hasName("a"),
- hasTypeLoc(elaboratedTypeLoc(hasNamedTypeLoc(
- templateSpecializationTypeLoc(hasTemplateArgumentLoc(
- 0, hasTypeLoc(loc(asString("double")))))))))));
+ hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(
+ 0, hasTypeLoc(loc(asString("double")))))))));
}
TEST(HasTemplateArgumentLoc, BindsToExplicitSpecializationWithIntArgument) {
@@ -6735,12 +6696,11 @@ TEST(HasNamedTypeLoc, BindsToElaboratedObjectDeclaration) {
class C<int> c;
)",
varDecl(hasName("c"),
- hasTypeLoc(elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- hasAnyTemplateArgumentLoc(templateArgumentLoc()))))))));
+ hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(templateArgumentLoc()))))));
}
-TEST(HasNamedTypeLoc, DoesNotBindToNonElaboratedObjectDeclaration) {
+TEST(HasNamedTypeLoc, BindsToNonElaboratedObjectDeclaration) {
EXPECT_TRUE(matches(
R"(
template <typename T>
@@ -6748,9 +6708,8 @@ TEST(HasNamedTypeLoc, DoesNotBindToNonElaboratedObjectDeclaration) {
C<int> c;
)",
varDecl(hasName("c"),
- hasTypeLoc(elaboratedTypeLoc(
- hasNamedTypeLoc(templateSpecializationTypeLoc(
- hasAnyTemplateArgumentLoc(templateArgumentLoc()))))))));
+ hasTypeLoc(templateSpecializationTypeLoc(
+ hasAnyTemplateArgumentLoc(templateArgumentLoc()))))));
}
} // namespace ast_matchers
diff --git a/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp b/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp
index ffc50fb..fb3ab7c 100644
--- a/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp
@@ -80,7 +80,7 @@ struct CommonTestInputs {
)cpp") {
auto *SDecl = cast<CXXRecordDecl>(
lookup("S", *AST.context().getTranslationUnitDecl()));
- SType = AST.context().getRecordType(SDecl);
+ SType = AST.context().getCanonicalTagType(SDecl);
CallVal = selectFirst<CallExpr>(
"call",
match(cxxMemberCallExpr(callee(functionDecl(hasName("valProperty"))))
@@ -163,7 +163,7 @@ TEST_F(CachedConstAccessorsLatticeTest,
)cpp");
auto *SDecl =
cast<CXXRecordDecl>(lookup("S", *AST.context().getTranslationUnitDecl()));
- QualType SType = AST.context().getRecordType(SDecl);
+ CanQualType SType = AST.context().getCanonicalTagType(SDecl);
const CallExpr *CE = selectFirst<CallExpr>(
"call", match(cxxMemberCallExpr(
callee(functionDecl(hasName("structValProperty"))))
@@ -237,7 +237,7 @@ TEST_F(CachedConstAccessorsLatticeTest, DifferentValsFromDifferentLocs) {
)cpp");
auto *SDecl =
cast<CXXRecordDecl>(lookup("S", *AST.context().getTranslationUnitDecl()));
- QualType SType = AST.context().getRecordType(SDecl);
+ CanQualType SType = AST.context().getCanonicalTagType(SDecl);
SmallVector<BoundNodes, 1> valPropertyCalls =
match(cxxMemberCallExpr(callee(functionDecl(hasName("valProperty"))))
.bind("call"),
diff --git a/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp b/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
index 737277e..0780db9 100644
--- a/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
@@ -402,7 +402,7 @@ TEST_F(EnvironmentTest,
// `Target<&S::accessor>`.
Environment Env(DAContext, *Fun);
Env.initialize();
- EXPECT_THAT(DAContext.getModeledFields(QualType(Struct->getTypeForDecl(), 0)),
+ EXPECT_THAT(DAContext.getModeledFields(Context.getCanonicalTagType(Struct)),
Contains(Member));
}
diff --git a/clang/unittests/CodeGen/CodeGenExternalTest.cpp b/clang/unittests/CodeGen/CodeGenExternalTest.cpp
index 0081588..be3be1474 100644
--- a/clang/unittests/CodeGen/CodeGenExternalTest.cpp
+++ b/clang/unittests/CodeGen/CodeGenExternalTest.cpp
@@ -172,6 +172,7 @@ static void test_codegen_fns(MyASTConsumer *my) {
bool mytest_struct_ok = false;
CodeGen::CodeGenModule &CGM = my->Builder->CGM();
+ const ASTContext &Ctx = my->toplevel_decls.front()->getASTContext();
for (auto decl : my->toplevel_decls ) {
if (FunctionDecl *fd = dyn_cast<FunctionDecl>(decl)) {
@@ -189,9 +190,7 @@ static void test_codegen_fns(MyASTConsumer *my) {
if (rd->getName() == "mytest_struct") {
RecordDecl *def = rd->getDefinition();
ASSERT_TRUE(def != NULL);
- const clang::Type *clangTy = rd->getCanonicalDecl()->getTypeForDecl();
- ASSERT_TRUE(clangTy != NULL);
- QualType qType = clangTy->getCanonicalTypeInternal();
+ CanQualType qType = Ctx.getCanonicalTagType(rd);
// Check convertTypeForMemory
llvm::Type *llvmTy = CodeGen::convertTypeForMemory(CGM, qType);
diff --git a/clang/unittests/Index/IndexTests.cpp b/clang/unittests/Index/IndexTests.cpp
index 05ce448..6df4b57 100644
--- a/clang/unittests/Index/IndexTests.cpp
+++ b/clang/unittests/Index/IndexTests.cpp
@@ -347,7 +347,7 @@ TEST(IndexTest, Constructors) {
WrittenAt(Position(4, 8)))));
}
-TEST(IndexTest, InjecatedNameClass) {
+TEST(IndexTest, InjectedNameClass) {
std::string Code = R"cpp(
template <typename T>
class Foo {
diff --git a/clang/unittests/Interpreter/CMakeLists.txt b/clang/unittests/Interpreter/CMakeLists.txt
index 1dda902..24f65ca 100644
--- a/clang/unittests/Interpreter/CMakeLists.txt
+++ b/clang/unittests/Interpreter/CMakeLists.txt
@@ -1,10 +1,22 @@
-add_distinct_clang_unittest(ClangReplInterpreterTests
+set(CLANG_REPL_TEST_SOURCES
IncrementalCompilerBuilderTest.cpp
IncrementalProcessingTest.cpp
InterpreterTest.cpp
InterpreterExtensionsTest.cpp
CodeCompletionTest.cpp
+)
+if(TARGET compiler-rt)
+ list(APPEND CLANG_REPL_TEST_SOURCES
+ OutOfProcessInterpreterTests.cpp
+ )
+ message(STATUS "Compiler-RT found, enabling out of process JIT tests")
+endif()
+
+add_distinct_clang_unittest(ClangReplInterpreterTests
+ ${CLANG_REPL_TEST_SOURCES}
+
+ PARTIAL_SOURCES_INTENDED
EXPORT_SYMBOLS
CLANG_LIBS
@@ -26,6 +38,14 @@ add_distinct_clang_unittest(ClangReplInterpreterTests
TargetParser
)
+if(TARGET compiler-rt)
+ add_dependencies(ClangReplInterpreterTests
+ llvm-jitlink-executor
+ compiler-rt
+ )
+ message(STATUS "Adding dependency on compiler-rt for out of process JIT tests")
+endif()
+
# Exceptions on Windows are not yet supported.
if(NOT WIN32)
add_subdirectory(ExceptionTests)
diff --git a/clang/unittests/Interpreter/InterpreterTest.cpp b/clang/unittests/Interpreter/InterpreterTest.cpp
index 768058b..5d49089 100644
--- a/clang/unittests/Interpreter/InterpreterTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterTest.cpp
@@ -15,12 +15,17 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclGroup.h"
#include "clang/AST/Mangle.h"
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Interpreter/Interpreter.h"
+#include "clang/Interpreter/RemoteJITUtils.h"
#include "clang/Interpreter/Value.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Sema.h"
+#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/Host.h"
@@ -34,6 +39,12 @@ int Global = 42;
REPL_EXTERNAL_VISIBILITY int getGlobal() { return Global; }
REPL_EXTERNAL_VISIBILITY void setGlobal(int val) { Global = val; }
+#ifdef _WIN32
+#define STDIN_FILENO 0
+#define STDOUT_FILENO 1
+#define STDERR_FILENO 2
+#endif
+
namespace {
class InterpreterTest : public InterpreterTestBase {
diff --git a/clang/unittests/Interpreter/OutOfProcessInterpreterTests.cpp b/clang/unittests/Interpreter/OutOfProcessInterpreterTests.cpp
new file mode 100644
index 0000000..4d5ef5c
--- /dev/null
+++ b/clang/unittests/Interpreter/OutOfProcessInterpreterTests.cpp
@@ -0,0 +1,148 @@
+//===- unittests/Interpreter/OutOfProcessInterpreterTest.cpp --- Interpreter
+// tests when Out-of-Process ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Unit tests for Clang's Interpreter library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InterpreterTestFixture.h"
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Mangle.h"
+#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "clang/Interpreter/RemoteJITUtils.h"
+#include "clang/Interpreter/Value.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Host.h"
+
+#include "llvm/TargetParser/Host.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using namespace clang;
+
+llvm::ExitOnError ExitOnError;
+
+#ifdef _WIN32
+#define STDIN_FILENO 0
+#define STDOUT_FILENO 1
+#define STDERR_FILENO 2
+#endif
+
+namespace {
+
+using Args = std::vector<const char *>;
+
+static void removePathComponent(unsigned N, llvm::SmallString<256> &Path) {
+ for (unsigned i = 0; i < N; ++i)
+ llvm::sys::path::remove_filename(Path);
+}
+
+static std::string getExecutorPath() {
+ llvm::SmallString<256> ExecutorPath(llvm::sys::fs::getMainExecutable(
+ nullptr, reinterpret_cast<void *>(&getExecutorPath)));
+ removePathComponent(5, ExecutorPath);
+ llvm::sys::path::append(ExecutorPath, "bin", "llvm-jitlink-executor");
+ return ExecutorPath.str().str();
+}
+
+static std::string getOrcRuntimePath() {
+ llvm::SmallString<256> RuntimePath(llvm::sys::fs::getMainExecutable(
+ nullptr, reinterpret_cast<void *>(&getOrcRuntimePath)));
+ removePathComponent(5, RuntimePath);
+ llvm::sys::path::append(RuntimePath, CLANG_INSTALL_LIBDIR_BASENAME, "clang",
+ CLANG_VERSION_MAJOR_STRING, "lib");
+
+ llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
+ if (SystemTriple.isOSBinFormatMachO()) {
+ llvm::sys::path::append(RuntimePath, "darwin", "liborc_rt_osx.a");
+ } else if (SystemTriple.isOSBinFormatELF()) {
+ llvm::sys::path::append(RuntimePath, "x86_64-unknown-linux-gnu",
+ "liborc_rt.a");
+ }
+
+ return RuntimePath.str().str();
+}
+
+static std::unique_ptr<Interpreter>
+createInterpreterWithRemoteExecution(const Args &ExtraArgs = {},
+ DiagnosticConsumer *Client = nullptr) {
+ Args ClangArgs = {"-Xclang", "-emit-llvm-only"};
+ llvm::append_range(ClangArgs, ExtraArgs);
+ auto CB = clang::IncrementalCompilerBuilder();
+ CB.SetCompilerArgs(ClangArgs);
+ auto CI = cantFail(CB.CreateCpp());
+ if (Client)
+ CI->getDiagnostics().setClient(Client, /*ShouldOwnClient=*/false);
+
+ std::unique_ptr<llvm::orc::LLJITBuilder> JB;
+
+ llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
+
+ if ((SystemTriple.isOSBinFormatELF() || SystemTriple.isOSBinFormatMachO())) {
+ std::string OOPExecutor = getExecutorPath();
+ std::string OrcRuntimePath = getOrcRuntimePath();
+ bool UseSharedMemory = false;
+ std::string SlabAllocateSizeString = "";
+ std::unique_ptr<llvm::orc::ExecutorProcessControl> EPC;
+ EPC = ExitOnError(launchExecutor(OOPExecutor, UseSharedMemory,
+ SlabAllocateSizeString,
+ [=] { // Lambda defined inline
+ auto redirect = [](int from, int to) {
+ if (from != to) {
+ dup2(from, to);
+ close(from);
+ }
+ };
+
+ redirect(0, STDIN_FILENO);
+ redirect(1, STDOUT_FILENO);
+ redirect(2, STDERR_FILENO);
+
+ setvbuf(stdout, nullptr, _IONBF, 0);
+ setvbuf(stderr, nullptr, _IONBF, 0);
+ }));
+ if (EPC) {
+ CB.SetTargetTriple(EPC->getTargetTriple().getTriple());
+ JB = ExitOnError(clang::Interpreter::createLLJITBuilder(std::move(EPC),
+ OrcRuntimePath));
+ }
+ }
+
+ return cantFail(clang::Interpreter::create(std::move(CI), std::move(JB)));
+}
+
+static size_t DeclsSize(TranslationUnitDecl *PTUDecl) {
+ return std::distance(PTUDecl->decls().begin(), PTUDecl->decls().end());
+}
+
+TEST_F(InterpreterTestBase, SanityWithRemoteExecution) {
+ if (!HostSupportsJIT())
+ GTEST_SKIP();
+
+ std::unique_ptr<Interpreter> Interp = createInterpreterWithRemoteExecution();
+
+ using PTU = PartialTranslationUnit;
+
+ PTU &R1(cantFail(Interp->Parse("void g(); void g() {}")));
+ EXPECT_EQ(2U, DeclsSize(R1.TUPart));
+
+ PTU &R2(cantFail(Interp->Parse("int i;")));
+ EXPECT_EQ(1U, DeclsSize(R2.TUPart));
+}
+
+} // end anonymous namespace
diff --git a/clang/unittests/Sema/HeuristicResolverTest.cpp b/clang/unittests/Sema/HeuristicResolverTest.cpp
index ee434f7..7df25e0 100644
--- a/clang/unittests/Sema/HeuristicResolverTest.cpp
+++ b/clang/unittests/Sema/HeuristicResolverTest.cpp
@@ -29,7 +29,7 @@ MATCHER_P2(matchAdapter, MatcherForElement, MatchFunction, "matchAdapter") {
template <typename InputNode>
using ResolveFnT = std::function<std::vector<const NamedDecl *>(
- const HeuristicResolver *, const InputNode *)>;
+ const HeuristicResolver *, InputNode)>;
// Test heuristic resolution on `Code` using the resolution procedure
// `ResolveFn`, which takes a `HeuristicResolver` and an input AST node of type
@@ -37,8 +37,9 @@ using ResolveFnT = std::function<std::vector<const NamedDecl *>(
// `InputMatcher` should be an AST matcher that matches a single node to pass as
// input to `ResolveFn`, bound to the ID "input". `OutputMatchers` should be AST
// matchers that each match a single node, bound to the ID "output".
-template <typename InputNode, typename InputMatcher, typename... OutputMatchers>
-void expectResolution(llvm::StringRef Code, ResolveFnT<InputNode> ResolveFn,
+template <typename InputNode, typename ParamT, typename InputMatcher,
+ typename... OutputMatchers>
+void expectResolution(llvm::StringRef Code, ResolveFnT<ParamT> ResolveFn,
const InputMatcher &IM, const OutputMatchers &...OMS) {
auto TU = tooling::buildASTFromCodeWithArgs(Code, {"-std=c++20"});
auto &Ctx = TU->getASTContext();
@@ -59,7 +60,11 @@ void expectResolution(llvm::StringRef Code, ResolveFnT<InputNode> ResolveFn,
};
HeuristicResolver H(Ctx);
- auto Results = ResolveFn(&H, Input);
+ std::vector<const NamedDecl *> Results;
+ if constexpr (std::is_pointer_v<ParamT>)
+ Results = ResolveFn(&H, Input);
+ else
+ Results = ResolveFn(&H, *Input);
EXPECT_THAT(Results, ElementsAre(matchAdapter(OMS, OutputNodeMatches)...));
}
@@ -71,8 +76,8 @@ void expectResolution(llvm::StringRef Code,
HeuristicResolver::*ResolveFn)(const InputNode *)
const,
const InputMatcher &IM, const OutputMatchers &...OMS) {
- expectResolution(Code, ResolveFnT<InputNode>(std::mem_fn(ResolveFn)), IM,
- OMS...);
+ expectResolution<InputNode>(
+ Code, ResolveFnT<const InputNode *>(std::mem_fn(ResolveFn)), IM, OMS...);
}
TEST(HeuristicResolver, MemberExpr) {
@@ -643,15 +648,16 @@ TEST(HeuristicResolver, NestedNameSpecifier) {
// expected by expectResolution() (returning a vector of decls).
ResolveFnT<NestedNameSpecifier> ResolveFn =
[](const HeuristicResolver *H,
- const NestedNameSpecifier *NNS) -> std::vector<const NamedDecl *> {
+ NestedNameSpecifier NNS) -> std::vector<const NamedDecl *> {
return {H->resolveNestedNameSpecifierToType(NNS)->getAsCXXRecordDecl()};
};
- expectResolution(Code, ResolveFn,
- nestedNameSpecifier(hasPrefix(specifiesType(hasDeclaration(
- classTemplateDecl(hasName("A"))))))
- .bind("input"),
- classTemplateDecl(has(cxxRecordDecl(
- has(cxxRecordDecl(hasName("B")).bind("output"))))));
+ expectResolution<NestedNameSpecifier>(
+ Code, ResolveFn,
+ nestedNameSpecifier(hasPrefix(specifiesType(
+ hasDeclaration(classTemplateDecl(hasName("A"))))))
+ .bind("input"),
+ classTemplateDecl(
+ has(cxxRecordDecl(has(cxxRecordDecl(hasName("B")).bind("output"))))));
}
TEST(HeuristicResolver, TemplateSpecializationType) {
diff --git a/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp b/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp
index 12be228..ab4b8c7 100644
--- a/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp
+++ b/clang/unittests/StaticAnalyzer/ExprEngineVisitTest.cpp
@@ -55,11 +55,13 @@ public:
", Stmt = " + S->getStmtClassName());
}
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const {
emitErrorReport(C, Bug,
"checkBind: Loc = " + dumpToString(Loc) +
", Val = " + dumpToString(Val) +
- ", Stmt = " + S->getStmtClassName());
+ ", Stmt = " + S->getStmtClassName() +
+ ", AtDeclInit = " + (AtDeclInit ? "true" : "false"));
}
private:
@@ -140,7 +142,7 @@ TEST(ExprEngineVisitTest, checkLocationAndBind) {
"Stmt = ImplicitCastExpr";
std::string BindMsg =
"checkBind: Loc = &MyClassWrite, Val = lazyCompoundVal{0x0,MyClassRead}, "
- "Stmt = CXXOperatorCallExpr";
+ "Stmt = CXXOperatorCallExpr, AtDeclInit = false";
std::size_t LocPos = Diags.find(LocMsg);
std::size_t BindPos = Diags.find(BindMsg);
EXPECT_NE(LocPos, std::string::npos);
@@ -150,4 +152,20 @@ TEST(ExprEngineVisitTest, checkLocationAndBind) {
EXPECT_TRUE(LocPos > BindPos);
}
+TEST(ExprEngineVisitTest, checkLocationAndBindInitialization) {
+ std::string Diags;
+ EXPECT_TRUE(runCheckerOnCode<addMemAccessChecker>(R"(
+ class MyClass{
+ public:
+ int Value;
+ };
+ void top(MyClass param) {
+ MyClass MyClassWrite = param;
+ }
+ )",
+ Diags));
+
+ EXPECT_TRUE(StringRef(Diags).contains("AtDeclInit = true"));
+}
+
} // namespace
diff --git a/clang/unittests/StaticAnalyzer/SValTest.cpp b/clang/unittests/StaticAnalyzer/SValTest.cpp
index 58e9a8d..1f4a18b 100644
--- a/clang/unittests/StaticAnalyzer/SValTest.cpp
+++ b/clang/unittests/StaticAnalyzer/SValTest.cpp
@@ -61,7 +61,8 @@ using SVals = llvm::StringMap<SVal>;
/// can test whatever we gathered.
class SValCollector : public Checker<check::Bind, check::EndAnalysis> {
public:
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, bool AtDeclInit,
+ CheckerContext &C) const {
// Skip instantly if we finished testing.
// Also, we care only for binds happening in variable initializations.
if (Tested || !isa<DeclStmt>(S))
@@ -301,13 +302,13 @@ void foo(int x) {
ASSERT_FALSE(B.getType(Context).isNull());
const auto *BRecordType = dyn_cast<RecordType>(B.getType(Context));
ASSERT_NE(BRecordType, nullptr);
- EXPECT_EQ("TestStruct", BRecordType->getDecl()->getName());
+ EXPECT_EQ("TestStruct", BRecordType->getOriginalDecl()->getName());
SVal C = getByName("c");
ASSERT_FALSE(C.getType(Context).isNull());
const auto *CRecordType = dyn_cast<RecordType>(C.getType(Context));
ASSERT_NE(CRecordType, nullptr);
- EXPECT_EQ("TestUnion", CRecordType->getDecl()->getName());
+ EXPECT_EQ("TestUnion", CRecordType->getOriginalDecl()->getName());
auto D = getByName("d").getAs<nonloc::CompoundVal>();
ASSERT_TRUE(D.has_value());
@@ -319,12 +320,9 @@ void foo(int x) {
ASSERT_TRUE(LD.has_value());
auto LDT = LD->getType(Context);
ASSERT_FALSE(LDT.isNull());
- const auto *DElaboratedType = dyn_cast<ElaboratedType>(LDT);
- ASSERT_NE(DElaboratedType, nullptr);
- const auto *DRecordType =
- dyn_cast<RecordType>(DElaboratedType->getNamedType());
+ const auto *DRecordType = dyn_cast<RecordType>(LDT);
ASSERT_NE(DRecordType, nullptr);
- EXPECT_EQ("TestStruct", DRecordType->getDecl()->getName());
+ EXPECT_EQ("TestStruct", DRecordType->getOriginalDecl()->getName());
}
SVAL_TEST(GetStringType, R"(
@@ -353,7 +351,7 @@ void TestClass::foo() {
ASSERT_NE(APtrTy, nullptr);
const auto *ARecordType = dyn_cast<RecordType>(APtrTy->getPointeeType());
ASSERT_NE(ARecordType, nullptr);
- EXPECT_EQ("TestClass", ARecordType->getDecl()->getName());
+ EXPECT_EQ("TestClass", ARecordType->getOriginalDecl()->getName());
}
SVAL_TEST(GetFunctionPtrType, R"(
diff --git a/clang/unittests/Tooling/LookupTest.cpp b/clang/unittests/Tooling/LookupTest.cpp
index acd1714..4c49ebe 100644
--- a/clang/unittests/Tooling/LookupTest.cpp
+++ b/clang/unittests/Tooling/LookupTest.cpp
@@ -193,16 +193,16 @@ TEST(LookupTest, replaceNestedClassName) {
auto replaceTypeLoc = [&](const NamedDecl *ND, SourceLocation Loc,
StringRef ReplacementString) {
return tooling::replaceNestedName(
- nullptr, Loc, Visitor.DeclStack.back()->getDeclContext(), ND,
- ReplacementString);
+ /*Use=*/std::nullopt, Loc, Visitor.DeclStack.back()->getDeclContext(),
+ ND, ReplacementString);
};
Visitor.OnRecordTypeLoc = [&](RecordTypeLoc Type) {
// Filter Types by name since there are other `RecordTypeLoc` in the test
// file.
- if (Type.getDecl()->getQualifiedNameAsString() == "a::b::Foo") {
- EXPECT_EQ("x::Bar", replaceTypeLoc(Type.getDecl(), Type.getBeginLoc(),
- "::a::x::Bar"));
+ if (Type.getOriginalDecl()->getQualifiedNameAsString() == "a::b::Foo") {
+ EXPECT_EQ("x::Bar", replaceTypeLoc(Type.getOriginalDecl(),
+ Type.getBeginLoc(), "::a::x::Bar"));
}
};
Visitor.runOver("namespace a { namespace b {\n"
@@ -214,7 +214,7 @@ TEST(LookupTest, replaceNestedClassName) {
// Filter Types by name since there are other `RecordTypeLoc` in the test
// file.
// `a::b::Foo` in using shadow decl is not `TypeLoc`.
- auto *TD = Type.getFoundDecl()->getTargetDecl();
+ auto *TD = Type.getDecl()->getTargetDecl();
if (TD->getQualifiedNameAsString() == "a::b::Foo") {
EXPECT_EQ("Bar", replaceTypeLoc(TD, Type.getBeginLoc(), "::a::x::Bar"));
}
@@ -227,9 +227,9 @@ TEST(LookupTest, replaceNestedClassName) {
// `x::y::Foo` in c.cc [1], it should not make "Foo" at [0] ambiguous because
// it's not visible at [0].
Visitor.OnRecordTypeLoc = [&](RecordTypeLoc Type) {
- if (Type.getDecl()->getQualifiedNameAsString() == "x::y::Old") {
- EXPECT_EQ("Foo",
- replaceTypeLoc(Type.getDecl(), Type.getBeginLoc(), "::x::Foo"));
+ if (Type.getOriginalDecl()->getQualifiedNameAsString() == "x::y::Old") {
+ EXPECT_EQ("Foo", replaceTypeLoc(Type.getOriginalDecl(),
+ Type.getBeginLoc(), "::x::Foo"));
}
};
Visitor.runOver(R"(
diff --git a/clang/unittests/Tooling/QualTypeNamesTest.cpp b/clang/unittests/Tooling/QualTypeNamesTest.cpp
index bcf7d89..1139392 100644
--- a/clang/unittests/Tooling/QualTypeNamesTest.cpp
+++ b/clang/unittests/Tooling/QualTypeNamesTest.cpp
@@ -295,9 +295,9 @@ TEST(QualTypeNameTest, TemplatedClass) {
auto *A2 = *ASpec;
// Their type names follow the records.
- QualType A1RecordTy = Context.getRecordType(A1);
+ CanQualType A1RecordTy = Context.getCanonicalTagType(A1);
EXPECT_EQ(getFullyQualifiedName(A1RecordTy), "A<1>");
- QualType A2RecordTy = Context.getRecordType(A2);
+ CanQualType A2RecordTy = Context.getCanonicalTagType(A2);
EXPECT_EQ(getFullyQualifiedName(A2RecordTy), "A<2U>");
// getTemplateSpecializationType() gives types that print the integral
@@ -305,13 +305,13 @@ TEST(QualTypeNameTest, TemplatedClass) {
TemplateArgument Args1[] = {
{Context, llvm::APSInt::getUnsigned(1u), Context.UnsignedIntTy}};
QualType A1TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A), Args1, Args1, A1RecordTy);
+ ElaboratedTypeKeyword::None, TemplateName(A), Args1, Args1, A1RecordTy);
EXPECT_EQ(A1TemplateSpecTy.getAsString(), "A<1>");
TemplateArgument Args2[] = {
{Context, llvm::APSInt::getUnsigned(2u), Context.UnsignedIntTy}};
QualType A2TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A), Args2, Args2, A2RecordTy);
+ ElaboratedTypeKeyword::None, TemplateName(A), Args2, Args2, A2RecordTy);
EXPECT_EQ(A2TemplateSpecTy.getAsString(), "A<2>");
// Find A<1>::B and its specialization B<3>.
@@ -321,21 +321,19 @@ TEST(QualTypeNameTest, TemplatedClass) {
auto A1BSpec = A1B->spec_begin();
ASSERT_NE(A1BSpec, A1B->spec_end());
auto *A1B3 = *A1BSpec;
- QualType A1B3RecordTy = Context.getRecordType(A1B3);
+ CanQualType A1B3RecordTy = Context.getCanonicalTagType(A1B3);
EXPECT_EQ(getFullyQualifiedName(A1B3RecordTy), "A<1>::B<3>");
// Construct A<1>::B<3> and check name.
+ NestedNameSpecifier A1Nested(A1TemplateSpecTy.getTypePtr());
+ TemplateName A1B3Name = Context.getQualifiedTemplateName(
+ A1Nested, /*TemplateKeyword=*/false, TemplateName(A1B));
+
TemplateArgument Args3[] = {
{Context, llvm::APSInt::getUnsigned(3u), Context.UnsignedIntTy}};
QualType A1B3TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A1B), Args3, Args3, A1B3RecordTy);
- EXPECT_EQ(A1B3TemplateSpecTy.getAsString(), "B<3>");
-
- NestedNameSpecifier *A1Nested = NestedNameSpecifier::Create(
- Context, nullptr, A1TemplateSpecTy.getTypePtr());
- QualType A1B3ElaboratedTy = Context.getElaboratedType(
- ElaboratedTypeKeyword::None, A1Nested, A1B3TemplateSpecTy);
- EXPECT_EQ(A1B3ElaboratedTy.getAsString(), "A<1>::B<3>");
+ ElaboratedTypeKeyword::None, A1B3Name, Args3, Args3, A1B3RecordTy);
+ EXPECT_EQ(A1B3TemplateSpecTy.getAsString(), "A<1>::B<3>");
// Find A<2u>::B and its specialization B<4u>.
auto *A2B =
@@ -344,21 +342,19 @@ TEST(QualTypeNameTest, TemplatedClass) {
auto A2BSpec = A2B->spec_begin();
ASSERT_NE(A2BSpec, A2B->spec_end());
auto *A2B4 = *A2BSpec;
- QualType A2B4RecordTy = Context.getRecordType(A2B4);
+ CanQualType A2B4RecordTy = Context.getCanonicalTagType(A2B4);
EXPECT_EQ(getFullyQualifiedName(A2B4RecordTy), "A<2U>::B<4U>");
// Construct A<2>::B<4> and check name.
+ NestedNameSpecifier A2Nested(A2TemplateSpecTy.getTypePtr());
+ TemplateName A2B4Name = Context.getQualifiedTemplateName(
+ A2Nested, /*TemplateKeyword=*/false, TemplateName(A2B));
+
TemplateArgument Args4[] = {
{Context, llvm::APSInt::getUnsigned(4u), Context.UnsignedIntTy}};
QualType A2B4TemplateSpecTy = Context.getTemplateSpecializationType(
- TemplateName(A2B), Args4, Args4, A2B4RecordTy);
- EXPECT_EQ(A2B4TemplateSpecTy.getAsString(), "B<4>");
-
- NestedNameSpecifier *A2Nested = NestedNameSpecifier::Create(
- Context, nullptr, A2TemplateSpecTy.getTypePtr());
- QualType A2B4ElaboratedTy = Context.getElaboratedType(
- ElaboratedTypeKeyword::None, A2Nested, A2B4TemplateSpecTy);
- EXPECT_EQ(A2B4ElaboratedTy.getAsString(), "A<2>::B<4>");
+ ElaboratedTypeKeyword::None, A2B4Name, Args4, Args4, A2B4RecordTy);
+ EXPECT_EQ(A2B4TemplateSpecTy.getAsString(), "A<2>::B<4>");
}
TEST(QualTypeNameTest, AnonStrucs) {
diff --git a/clang/unittests/Tooling/RangeSelectorTest.cpp b/clang/unittests/Tooling/RangeSelectorTest.cpp
index 12f7a8c..adf5e74 100644
--- a/clang/unittests/Tooling/RangeSelectorTest.cpp
+++ b/clang/unittests/Tooling/RangeSelectorTest.cpp
@@ -474,15 +474,15 @@ TEST(RangeSelectorTest, NameOpTypeLoc) {
// Matches declaration of `a`
TestMatch MatchA = matchCode(
Code, varDecl(hasName("a"), hasTypeLoc(typeLoc().bind(CtorTy))));
- EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchA), HasValue("Foo"));
+ EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchA), HasValue("ns::Foo"));
// Matches call of Foo(int)
TestMatch MatchB = matchCode(
Code, cxxFunctionalCastExpr(hasTypeLoc(typeLoc().bind(CtorTy))));
- EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchB), HasValue("Foo"));
+ EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchB), HasValue("ns::Foo"));
// Matches call of Foo(int, int)
TestMatch MatchC = matchCode(
Code, cxxTemporaryObjectExpr(hasTypeLoc(typeLoc().bind(CtorTy))));
- EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchC), HasValue("Foo"));
+ EXPECT_THAT_EXPECTED(select(name(CtorTy), MatchC), HasValue("ns::Foo"));
}
TEST(RangeSelectorTest, NameOpTemplateSpecializationTypeLoc) {
diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp
index eec628c..7a35661 100644
--- a/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp
+++ b/clang/unittests/Tooling/RecursiveASTVisitorTestTypeLocVisitor.cpp
@@ -22,13 +22,13 @@ public:
TEST(RecursiveASTVisitor, VisitsBaseClassDeclarations) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 1, 30);
+ Visitor.ExpectMatch("X", 1, 30);
EXPECT_TRUE(Visitor.runOver("class X {}; class Y : public X {};"));
}
TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfForwardDeclaredClass) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 3, 18);
+ Visitor.ExpectMatch("X", 3, 18);
EXPECT_TRUE(Visitor.runOver(
"class Y;\n"
"class X {};\n"
@@ -37,7 +37,7 @@ TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfForwardDeclaredClass) {
TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersWithIncompleteInnerClass) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 2, 18);
+ Visitor.ExpectMatch("X", 2, 18);
EXPECT_TRUE(Visitor.runOver(
"class X {};\n"
"class Y : public X { class Z; };"));
@@ -45,7 +45,7 @@ TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersWithIncompleteInnerClass) {
TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfSelfReferentialType) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("X<Y>", 2, 18, 2);
+ Visitor.ExpectMatch("X<Y>", 2, 18);
EXPECT_TRUE(Visitor.runOver(
"template<typename T> class X {};\n"
"class Y : public X<Y> {};"));
@@ -53,7 +53,7 @@ TEST(RecursiveASTVisitor, VisitsCXXBaseSpecifiersOfSelfReferentialType) {
TEST(RecursiveASTVisitor, VisitsClassTemplateTypeParmDefaultArgument) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("class X", 2, 23);
+ Visitor.ExpectMatch("X", 2, 23);
EXPECT_TRUE(Visitor.runOver(
"class X;\n"
"template<typename T = X> class Y;\n"
@@ -62,7 +62,7 @@ TEST(RecursiveASTVisitor, VisitsClassTemplateTypeParmDefaultArgument) {
TEST(RecursiveASTVisitor, VisitsCompoundLiteralType) {
TypeLocVisitor Visitor;
- Visitor.ExpectMatch("struct S", 1, 26);
+ Visitor.ExpectMatch("struct S", 1, 19);
EXPECT_TRUE(Visitor.runOver(
"int f() { return (struct S { int a; }){.a = 0}.a; }",
TypeLocVisitor::Lang_C));
diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp
index 587a00d..88cebb7 100644
--- a/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp
+++ b/clang/unittests/Tooling/RecursiveASTVisitorTests/MemberPointerTypeLoc.cpp
@@ -24,7 +24,7 @@ public:
bool VisitRecordTypeLoc(RecordTypeLoc RTL) override {
if (!RTL)
return true;
- Match(RTL.getDecl()->getName(), RTL.getNameLoc());
+ Match(RTL.getOriginalDecl()->getName(), RTL.getNameLoc());
return true;
}
};
diff --git a/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp b/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp
index 23a2df4..4181cd2 100644
--- a/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp
+++ b/clang/unittests/Tooling/RecursiveASTVisitorTests/NestedNameSpecifiers.cpp
@@ -18,17 +18,20 @@ public:
bool VisitRecordTypeLoc(RecordTypeLoc RTL) override {
if (!RTL)
return true;
- Match(RTL.getDecl()->getName(), RTL.getNameLoc());
+ Match(RTL.getOriginalDecl()->getName(), RTL.getNameLoc());
return true;
}
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) override {
- if (!NNS)
- return true;
- if (const auto *ND = dyn_cast_if_present<NamespaceDecl>(
- NNS.getNestedNameSpecifier()->getAsNamespace()))
- Match(ND->getName(), NNS.getLocalBeginLoc());
- return ExpectedLocationVisitor::TraverseNestedNameSpecifierLoc(NNS);
+ bool
+ TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc QualifierLoc) override {
+ NestedNameSpecifier Qualifier = QualifierLoc.getNestedNameSpecifier();
+ if (Qualifier.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ if (const auto *ND = dyn_cast<NamespaceDecl>(
+ Qualifier.getAsNamespaceAndPrefix().Namespace))
+ Match(ND->getName(), QualifierLoc.getLocalBeginLoc());
+ }
+ return ExpectedLocationVisitor::TraverseNestedNameSpecifierLoc(
+ QualifierLoc);
}
};
diff --git a/clang/unittests/Tooling/RefactoringTest.cpp b/clang/unittests/Tooling/RefactoringTest.cpp
index aff7523e..171dc6d 100644
--- a/clang/unittests/Tooling/RefactoringTest.cpp
+++ b/clang/unittests/Tooling/RefactoringTest.cpp
@@ -747,9 +747,12 @@ TEST(Replacement, TemplatedFunctionCall) {
class NestedNameSpecifierAVisitor : public TestVisitor {
public:
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLoc) override {
- if (NNSLoc.getNestedNameSpecifier()) {
- if (const auto *NS = dyn_cast_if_present<NamespaceDecl>(
- NNSLoc.getNestedNameSpecifier()->getAsNamespace())) {
+ if (NestedNameSpecifier NNS = NNSLoc.getNestedNameSpecifier();
+ NNS.getKind() == NestedNameSpecifier::Kind::Namespace) {
+ if (const auto *NS =
+ dyn_cast<NamespaceDecl>(NNSLoc.getNestedNameSpecifier()
+ .getAsNamespaceAndPrefix()
+ .Namespace)) {
if (NS->getName() == "a") {
Replace = Replacement(*SM, &NNSLoc, "", Context->getLangOpts());
}
diff --git a/compiler-rt/lib/builtins/aarch64/lse.S b/compiler-rt/lib/builtins/aarch64/lse.S
index d7c1db7..a444d82 100644
--- a/compiler-rt/lib/builtins/aarch64/lse.S
+++ b/compiler-rt/lib/builtins/aarch64/lse.S
@@ -264,7 +264,7 @@ END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
NO_EXEC_STACK_DIRECTIVE
-// GNU property note for BTI and PAC
-GNU_PROPERTY_BTI_PAC
+// GNU property note for BTI, PAC, and GCS
+GNU_PROPERTY_BTI_PAC_GCS
#endif // defined(__aarch64__) || defined(__arm64ec__)
diff --git a/compiler-rt/lib/builtins/aarch64/sme-abi.S b/compiler-rt/lib/builtins/aarch64/sme-abi.S
index 7c47336..d5510ac 100644
--- a/compiler-rt/lib/builtins/aarch64/sme-abi.S
+++ b/compiler-rt/lib/builtins/aarch64/sme-abi.S
@@ -371,5 +371,5 @@ END_COMPILERRT_FUNCTION(__arm_sme_restore)
NO_EXEC_STACK_DIRECTIVE
-// GNU property note for BTI and PAC
-GNU_PROPERTY_BTI_PAC
+// GNU property note for BTI, PAC, and GCS
+GNU_PROPERTY_BTI_PAC_GCS
diff --git a/compiler-rt/lib/builtins/assembly.h b/compiler-rt/lib/builtins/assembly.h
index 89372f1..d7db7d8 100644
--- a/compiler-rt/lib/builtins/assembly.h
+++ b/compiler-rt/lib/builtins/assembly.h
@@ -79,11 +79,12 @@
#define FUNC_ALIGN
#endif
-// BTI and PAC gnu property note
+// BTI, PAC, and GCS gnu property note
#define NT_GNU_PROPERTY_TYPE_0 5
#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
+#define GNU_PROPERTY_AARCH64_FEATURE_1_GCS 4
#if defined(__ARM_FEATURE_BTI_DEFAULT)
#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
@@ -97,6 +98,12 @@
#define PAC_FLAG 0
#endif
+#if defined(__ARM_FEATURE_GCS_DEFAULT)
+#define GCS_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+#else
+#define GCS_FLAG 0
+#endif
+
#define GNU_PROPERTY(type, value) \
.pushsection .note.gnu.property, "a" SEPARATOR \
.p2align 3 SEPARATOR \
@@ -118,11 +125,12 @@
#define BTI_J
#endif
-#if (BTI_FLAG | PAC_FLAG) != 0
-#define GNU_PROPERTY_BTI_PAC \
- GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
+#if (BTI_FLAG | PAC_FLAG | GCS_FLAG) != 0
+#define GNU_PROPERTY_BTI_PAC_GCS \
+ GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, \
+ BTI_FLAG | PAC_FLAG | GCS_FLAG)
#else
-#define GNU_PROPERTY_BTI_PAC
+#define GNU_PROPERTY_BTI_PAC_GCS
#endif
#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
index ae22ecf..b1bb138 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
@@ -78,10 +78,14 @@ static void SetSigaction(int signum,
}
struct sigaction new_sigact = {};
- // Address sanitizer needs SA_ONSTACK (causing the signal handler to run on a
- // dedicated stack) in order to be able to detect stack overflows; keep the
- // flag if it's set.
- new_sigact.sa_flags = SA_SIGINFO | (sigact.sa_flags & SA_ONSTACK);
+ // SA_ONSTACK is required for certain runtimes that use small stacks, for
+ // instance the Go runtime.
+ // See https://github.com/golang/go/issues/49075
+ // Address sanitizer also wants SA_ONSTACK, and the fuzzer and sanitizer
+ // often run together.
+ // SA_ONSTACK is a no-op unless someone also calls sigaltstack. That is left
+ // up to code that needs it.
+ new_sigact.sa_flags = SA_SIGINFO | SA_ONSTACK;
new_sigact.sa_sigaction = callback;
if (sigaction(signum, &new_sigact, nullptr)) {
Printf("libFuzzer: sigaction failed with %d\n", errno);
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
index fd20825e..825f411 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
@@ -11,4 +11,4 @@
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
diff --git a/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S b/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
index 0c0abb6..b8d98b0 100644
--- a/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
+++ b/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
@@ -99,4 +99,4 @@ ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
diff --git a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
index fd060c5..be82475 100644
--- a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -157,4 +157,4 @@ mismatch:
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 51ac1b6..b39eb15 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -113,6 +113,24 @@ class SizeClassAllocator64 {
// ~(uptr)0.
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+
+ uptr MaxAddr = GetMaxUserVirtualAddress();
+ // VReport does not call the sanitizer allocator.
+ VReport(3, "Max user virtual address: 0x%zx\n", MaxAddr);
+ VReport(3, "Total space size for primary allocator: 0x%zx\n",
+ TotalSpaceSize);
+ // TODO: revise the check if we ever configure sanitizers to deliberately
+ // map beyond the 2**48 barrier (note that Linux pretends the VMA is
+ // limited to 48-bit for backwards compatibility, but allows apps to
+ // explicitly specify an address beyond that).
+ if (heap_start + TotalSpaceSize >= MaxAddr) {
+ // We can't easily adjust the requested heap size, because kSpaceSize is
+ // const (for optimization) and used throughout the code.
+ VReport(0, "Error: heap size %zx exceeds max user virtual address %zx\n",
+ TotalSpaceSize, MaxAddr);
+ VReport(
+ 0, "Try using a kernel that allows a larger virtual address space\n");
+ }
PremappedHeap = heap_start != 0;
if (PremappedHeap) {
CHECK(!kUsingConstantSpaceBeg);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
index cdfa6f1..5066953 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
@@ -43,6 +43,6 @@ ASM_SIZE(vfork)
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
index 7d920be..f1d11a3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
@@ -222,6 +222,6 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
#endif
diff --git a/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp b/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
index b1f4eab..a2a2e36 100644
--- a/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
+++ b/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
@@ -97,15 +97,19 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __ubsan_report_error_fatal, const char *kind,
#if defined(__ANDROID__)
extern "C" __attribute__((weak)) void android_set_abort_message(const char *);
-static void abort_with_message(const char *kind, uintptr_t caller) {
+static void abort_with_message(const char *kind, uintptr_t caller,
+ const uintptr_t *address) {
char msg_buf[128];
- format_msg(kind, caller, msg_buf, msg_buf + sizeof(msg_buf));
+ format_msg(kind, caller, address, msg_buf, msg_buf + sizeof(msg_buf));
if (&android_set_abort_message)
android_set_abort_message(msg_buf);
abort();
}
#else
-static void abort_with_message(const char *kind, uintptr_t caller) { abort(); }
+static void abort_with_message(const char *kind, uintptr_t caller,
+ const uintptr_t *address) {
+ abort();
+}
#endif
#if SANITIZER_DEBUG
@@ -132,7 +136,7 @@ void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) {
INTERFACE void __ubsan_handle_##name##_minimal_abort() { \
uintptr_t caller = GET_CALLER_PC(); \
__ubsan_report_error_fatal(kind, caller, nullptr); \
- abort_with_message(kind, caller); \
+ abort_with_message(kind, caller, nullptr); \
}
#define HANDLER(name, kind) \
@@ -149,7 +153,7 @@ void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) {
const uintptr_t address) { \
uintptr_t caller = GET_CALLER_PC(); \
__ubsan_report_error_fatal(kind, caller, &address); \
- abort_with_message(kind, caller); \
+ abort_with_message(kind, caller, &address); \
}
// A version of a handler that takes a pointer to a value.
diff --git a/compiler-rt/lib/xray/xray_fdr_logging.cpp b/compiler-rt/lib/xray/xray_fdr_logging.cpp
index 7def356..977a0b9 100644
--- a/compiler-rt/lib/xray/xray_fdr_logging.cpp
+++ b/compiler-rt/lib/xray/xray_fdr_logging.cpp
@@ -73,7 +73,7 @@ static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
static pthread_key_t Key;
// Global BufferQueue.
-static std::byte BufferQueueStorage[sizeof(BufferQueue)];
+alignas(BufferQueue) static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
// Global thresholds for function durations.
diff --git a/compiler-rt/test/profile/Linux/coverage_short_circuit.cpp b/compiler-rt/test/profile/Linux/coverage_short_circuit.cpp
new file mode 100644
index 0000000..54f0c4c
--- /dev/null
+++ b/compiler-rt/test/profile/Linux/coverage_short_circuit.cpp
@@ -0,0 +1,36 @@
+// RUN: %clangxx_profgen -std=c++17 -fuse-ld=lld -fcoverage-mapping -o %t %s
+// RUN: env LLVM_PROFILE_FILE=%t.profraw %run %t
+// RUN: llvm-profdata merge -o %t.profdata %t.profraw
+// RUN: llvm-cov show %t -instr-profile=%t.profdata 2>&1 | FileCheck %s
+
+void foo() { // CHECK: [[@LINE]]| 1|void foo() {
+ bool cond1 = false; // CHECK-NEXT: [[@LINE]]| 1| bool cond1 = false;
+ bool cond2 = true; // CHECK-NEXT: [[@LINE]]| 1| bool cond2 = true;
+ if (cond1 && // CHECK-NEXT: [[@LINE]]| 1| if (cond1 &&
+ cond2) { // CHECK-NEXT: [[@LINE]]| 0| cond2) {
+ } // CHECK-NEXT: [[@LINE]]| 0| }
+} // CHECK-NEXT: [[@LINE]]| 1|}
+
+void bar() { // CHECK: [[@LINE]]| 1|void bar() {
+ bool cond1 = true; // CHECK-NEXT: [[@LINE]]| 1| bool cond1 = true;
+ bool cond2 = false; // CHECK-NEXT: [[@LINE]]| 1| bool cond2 = false;
+ if (cond1 && // CHECK-NEXT: [[@LINE]]| 1| if (cond1 &&
+ cond2) { // CHECK-NEXT: [[@LINE]]| 1| cond2) {
+ } // CHECK-NEXT: [[@LINE]]| 0| }
+} // CHECK-NEXT: [[@LINE]]| 1|}
+
+void baz() { // CHECK: [[@LINE]]| 1|void baz() {
+ bool cond1 = false; // CHECK-NEXT: [[@LINE]]| 1| bool cond1 = false;
+ bool cond2 = true; // CHECK-NEXT: [[@LINE]]| 1| bool cond2 = true;
+ if (cond1 // CHECK-NEXT: [[@LINE]]| 1| if (cond1
+ && // CHECK-NEXT: [[@LINE]]| 0| &&
+ cond2) { // CHECK-NEXT: [[@LINE]]| 0| cond2) {
+ } // CHECK-NEXT: [[@LINE]]| 0| }
+} // CHECK-NEXT: [[@LINE]]| 1|}
+
+int main() {
+ foo();
+ bar();
+ baz();
+ return 0;
+}
diff --git a/flang-rt/lib/runtime/CMakeLists.txt b/flang-rt/lib/runtime/CMakeLists.txt
index 332c087..dc2db1d 100644
--- a/flang-rt/lib/runtime/CMakeLists.txt
+++ b/flang-rt/lib/runtime/CMakeLists.txt
@@ -251,19 +251,33 @@ else()
add_win_flangrt_runtime(STATIC dynamic MultiThreadedDLL INSTALL_WITH_TOOLCHAIN)
add_win_flangrt_runtime(STATIC dynamic_dbg MultiThreadedDebugDLL INSTALL_WITH_TOOLCHAIN)
- # Unittests link against LLVMSupport which is using CMake's default runtime
- # library selection, which is either MultiThreadedDLL or MultiThreadedDebugDLL
- # depending on the configuration. They have to match or linking will fail.
+ # Unittests link against LLVMSupport. If CMAKE_MSVC_RUNTIME_LIBRARY is set,
+ # that will have been used for LLVMSupport so it must also be used here.
+ # Otherwise this will use CMake's default runtime library selection, which
+ # is either MultiThreadedDLL or MultiThreadedDebugDLL depending on the configuration.
+ # They have to match or linking will fail.
if (GENERATOR_IS_MULTI_CONFIG)
# We cannot select an ALIAS library because it may be different
# per configuration. Fallback to CMake's default.
add_win_flangrt_runtime(STATIC unittest "" EXCLUDE_FROM_ALL)
else ()
- string(TOLOWER ${CMAKE_BUILD_TYPE} build_type)
- if (build_type STREQUAL "debug")
- add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.dynamic_dbg)
- else ()
- add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.dynamic)
- endif ()
+ # Check if CMAKE_MSVC_RUNTIME_LIBRARY was set.
+ if (CMAKE_MSVC_RUNTIME_LIBRARY STREQUAL "MultiThreaded")
+ add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.static)
+ elseif (CMAKE_MSVC_RUNTIME_LIBRARY STREQUAL "MultiThreadedDLL")
+ add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.dynamic)
+ elseif (CMAKE_MSVC_RUNTIME_LIBRARY STREQUAL "MultiThreadedDebug")
+ add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.static_dbg)
+ elseif (CMAKE_MSVC_RUNTIME_LIBRARY STREQUAL "MultiThreadedDebugDLL")
+ add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.dynamic_dbg)
+ else()
+ # Default based on the build type.
+ string(TOLOWER ${CMAKE_BUILD_TYPE} build_type)
+ if (build_type STREQUAL "debug")
+ add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.dynamic_dbg)
+ else ()
+ add_library(flang_rt.runtime.unittest ALIAS flang_rt.runtime.dynamic)
+ endif ()
+ endif()
endif ()
endif()
diff --git a/flang-rt/unittests/CMakeLists.txt b/flang-rt/unittests/CMakeLists.txt
index 831bc8a..fd63ad1 100644
--- a/flang-rt/unittests/CMakeLists.txt
+++ b/flang-rt/unittests/CMakeLists.txt
@@ -94,14 +94,6 @@ function(add_flangrt_unittest test_dirname)
target_link_libraries(${test_dirname} PRIVATE ${ARG_LINK_LIBS})
add_flangrt_unittest_offload_properties(${test_dirname})
add_flangrt_dependent_libs(${test_dirname})
-
- # Required because LLVMSupport is compiled with this option.
- # FIXME: According to CMake documentation, this is the default. Why is it
- # needed? LLVM's add_unittest doesn't set it either.
- set_target_properties(${test_dirname}
- PROPERTIES
- MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>DLL"
- )
endfunction()
function(add_flangrt_nongtest_unittest test_name)
diff --git a/flang/CMakeLists.txt b/flang/CMakeLists.txt
index 0bfada4..1d16c33 100644
--- a/flang/CMakeLists.txt
+++ b/flang/CMakeLists.txt
@@ -317,7 +317,7 @@ if (NOT ENABLE_LINKER_BUILD_ID)
set(ENABLE_LINKER_BUILD_ID OFF CACHE BOOL "pass --build-id to ld")
endif()
-set(FLANG_DEFAULT_LINKER "" CACHE STRING
+set(FLANG_DEFAULT_LINKER "${CLANG_DEFAULT_LINKER}" CACHE STRING
"Default linker to use (linker name or absolute path, empty for platform default)")
set(FLANG_DEFAULT_RTLIB "" CACHE STRING
diff --git a/flang/include/flang/Config/config.h.cmake b/flang/include/flang/Config/config.h.cmake
index fd34d3f..92fbd14 100644
--- a/flang/include/flang/Config/config.h.cmake
+++ b/flang/include/flang/Config/config.h.cmake
@@ -1,10 +1,10 @@
-#===-- include/flang/Config/config.h.cmake ---------------------------------===#
-#
-# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-#
-#===------------------------------------------------------------------------===#
+//===-- include/flang/Config/config.h.cmake -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
/* This generated file is for internal use. Do not include it from headers. */
@@ -16,6 +16,8 @@
#define FLANG_VERSION "${FLANG_VERSION}"
+#define FLANG_DEFAULT_LINKER "${FLANG_DEFAULT_LINKER}"
+
#endif
diff --git a/flang/include/flang/Lower/Cuda.h b/flang/include/flang/Lower/CUDA.h
index b6f849e..6c2e6d7 100644
--- a/flang/include/flang/Lower/Cuda.h
+++ b/flang/include/flang/Lower/CUDA.h
@@ -1,4 +1,4 @@
-//===-- Lower/Cuda.h -- Cuda Fortran utilities ------------------*- C++ -*-===//
+//===-- Lower/CUDA.h -- CUDA Fortran utilities ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -14,13 +14,23 @@
#define FORTRAN_LOWER_CUDA_H
#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/MutableBox.h"
#include "flang/Optimizer/Dialect/CUF/CUFOps.h"
+#include "flang/Runtime/allocator-registry-consts.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/OpenACC/OpenACC.h"
+namespace mlir {
+class Value;
+class Location;
+class MLIRContext;
+} // namespace mlir
+
namespace Fortran::lower {
+class AbstractConverter;
+
static inline unsigned getAllocatorIdx(const Fortran::semantics::Symbol &sym) {
std::optional<Fortran::common::CUDADataAttr> cudaAttr =
Fortran::semantics::GetCUDADataAttr(&sym.GetUltimate());
@@ -37,6 +47,21 @@ static inline unsigned getAllocatorIdx(const Fortran::semantics::Symbol &sym) {
return kDefaultAllocator;
}
+void initializeDeviceComponentAllocator(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::semantics::Symbol &sym, const fir::MutableBoxValue &box);
+
+mlir::Type gatherDeviceComponentCoordinatesAndType(
+ fir::FirOpBuilder &builder, mlir::Location loc,
+ const Fortran::semantics::Symbol &sym, fir::RecordType recTy,
+ llvm::SmallVector<mlir::Value> &coordinates);
+
+/// Translate the CUDA Fortran attributes of \p sym into the FIR CUDA attribute
+/// representation.
+cuf::DataAttributeAttr
+translateSymbolCUFDataAttribute(mlir::MLIRContext *mlirContext,
+ const Fortran::semantics::Symbol &sym);
+
} // end namespace Fortran::lower
#endif // FORTRAN_LOWER_CUDA_H
diff --git a/flang/include/flang/Lower/ConvertVariable.h b/flang/include/flang/Lower/ConvertVariable.h
index e05625a..b938f6be 100644
--- a/flang/include/flang/Lower/ConvertVariable.h
+++ b/flang/include/flang/Lower/ConvertVariable.h
@@ -162,12 +162,6 @@ translateSymbolAttributes(mlir::MLIRContext *mlirContext,
fir::FortranVariableFlagsEnum extraFlags =
fir::FortranVariableFlagsEnum::None);
-/// Translate the CUDA Fortran attributes of \p sym into the FIR CUDA attribute
-/// representation.
-cuf::DataAttributeAttr
-translateSymbolCUFDataAttribute(mlir::MLIRContext *mlirContext,
- const Fortran::semantics::Symbol &sym);
-
/// Map a symbol to a given fir::ExtendedValue. This will generate an
/// hlfir.declare when lowering to HLFIR and map the hlfir.declare result to the
/// symbol.
diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
index d8b6a9f..e3a44f1 100644
--- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
@@ -944,16 +944,15 @@ void genDimInfoFromBox(fir::FirOpBuilder &builder, mlir::Location loc,
llvm::SmallVectorImpl<mlir::Value> *strides);
/// Generate an LLVM dialect lifetime start marker at the current insertion
-/// point given an fir.alloca and its constant size in bytes. Returns the value
-/// to be passed to the lifetime end marker.
+/// point given an fir.alloca. Returns the value to be passed to the lifetime
+/// end marker.
mlir::Value genLifetimeStart(mlir::OpBuilder &builder, mlir::Location loc,
- fir::AllocaOp alloc, int64_t size,
- const mlir::DataLayout *dl);
+ fir::AllocaOp alloc, const mlir::DataLayout *dl);
/// Generate an LLVM dialect lifetime end marker at the current insertion point
-/// given an llvm.ptr value and the constant size in bytes of its storage.
+/// given an llvm.ptr value.
void genLifetimeEnd(mlir::OpBuilder &builder, mlir::Location loc,
- mlir::Value mem, int64_t size);
+ mlir::Value mem);
} // namespace fir::factory
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index 8302e40..e72190f 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -4986,9 +4986,9 @@ struct OmpEndCriticalDirective {
CharBlock source;
std::tuple<Verbatim, std::optional<Name>> t;
};
-struct OpenMPCriticalConstruct {
- TUPLE_CLASS_BOILERPLATE(OpenMPCriticalConstruct);
- std::tuple<OmpCriticalDirective, Block, OmpEndCriticalDirective> t;
+
+struct OpenMPCriticalConstruct : public OmpBlockConstruct {
+ INHERITED_TUPLE_CLASS_BOILERPLATE(OpenMPCriticalConstruct, OmpBlockConstruct);
};
// 2.11.3 allocate -> ALLOCATE [(variable-name-list)] [clause]
diff --git a/flang/lib/Semantics/openmp-utils.h b/flang/include/flang/Semantics/openmp-utils.h
index b8ad9ed..b8ad9ed 100644
--- a/flang/lib/Semantics/openmp-utils.h
+++ b/flang/include/flang/Semantics/openmp-utils.h
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index 9c059b0..3b2c4f9 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -1129,7 +1129,7 @@ struct CollectCudaSymbolsHelper : public SetTraverse<CollectCudaSymbolsHelper,
CollectCudaSymbolsHelper() : Base{*this} {}
using Base::operator();
semantics::UnorderedSymbolSet operator()(const Symbol &symbol) const {
- return {symbol};
+ return {symbol.GetUltimate()};
}
// Overload some of the operator() to filter out the symbols that are not
// of interest for CUDA data transfer logic.
diff --git a/flang/lib/Lower/Allocatable.cpp b/flang/lib/Lower/Allocatable.cpp
index 219f920..ce9d894 100644
--- a/flang/lib/Lower/Allocatable.cpp
+++ b/flang/lib/Lower/Allocatable.cpp
@@ -13,9 +13,9 @@
#include "flang/Lower/Allocatable.h"
#include "flang/Evaluate/tools.h"
#include "flang/Lower/AbstractConverter.h"
+#include "flang/Lower/CUDA.h"
#include "flang/Lower/ConvertType.h"
#include "flang/Lower/ConvertVariable.h"
-#include "flang/Lower/Cuda.h"
#include "flang/Lower/IterationSpace.h"
#include "flang/Lower/Mangler.h"
#include "flang/Lower/OpenACC.h"
@@ -445,10 +445,14 @@ private:
/*mustBeHeap=*/true);
}
- void postAllocationAction(const Allocation &alloc) {
+ void postAllocationAction(const Allocation &alloc,
+ const fir::MutableBoxValue &box) {
if (alloc.getSymbol().test(Fortran::semantics::Symbol::Flag::AccDeclare))
Fortran::lower::attachDeclarePostAllocAction(converter, builder,
alloc.getSymbol());
+ if (Fortran::semantics::HasCUDAComponent(alloc.getSymbol()))
+ Fortran::lower::initializeDeviceComponentAllocator(
+ converter, alloc.getSymbol(), box);
}
void setPinnedToFalse() {
@@ -481,7 +485,7 @@ private:
// Pointers must use PointerAllocate so that their deallocations
// can be validated.
genInlinedAllocation(alloc, box);
- postAllocationAction(alloc);
+ postAllocationAction(alloc, box);
setPinnedToFalse();
return;
}
@@ -504,7 +508,7 @@ private:
genCudaAllocate(builder, loc, box, errorManager, alloc.getSymbol());
}
fir::factory::syncMutableBoxFromIRBox(builder, loc, box);
- postAllocationAction(alloc);
+ postAllocationAction(alloc, box);
errorManager.assignStat(builder, loc, stat);
}
@@ -647,7 +651,7 @@ private:
setPinnedToFalse();
}
fir::factory::syncMutableBoxFromIRBox(builder, loc, box);
- postAllocationAction(alloc);
+ postAllocationAction(alloc, box);
errorManager.assignStat(builder, loc, stat);
}
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index a5f0bef..d16488d 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -13,6 +13,7 @@
#include "flang/Lower/Bridge.h"
#include "flang/Lower/Allocatable.h"
+#include "flang/Lower/CUDA.h"
#include "flang/Lower/CallInterface.h"
#include "flang/Lower/Coarray.h"
#include "flang/Lower/ConvertCall.h"
@@ -20,7 +21,6 @@
#include "flang/Lower/ConvertExprToHLFIR.h"
#include "flang/Lower/ConvertType.h"
#include "flang/Lower/ConvertVariable.h"
-#include "flang/Lower/Cuda.h"
#include "flang/Lower/DirectivesCommon.h"
#include "flang/Lower/HostAssociations.h"
#include "flang/Lower/IO.h"
@@ -2122,6 +2122,9 @@ private:
}
}
+ if (!doConcurrentLoopOp)
+ return;
+
llvm::SmallVector<bool> reduceVarByRef;
llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
llvm::SmallVector<mlir::Attribute> nestReduceAttrs;
diff --git a/flang/lib/Lower/CMakeLists.txt b/flang/lib/Lower/CMakeLists.txt
index 8e20abf..1d1c7ddd 100644
--- a/flang/lib/Lower/CMakeLists.txt
+++ b/flang/lib/Lower/CMakeLists.txt
@@ -15,6 +15,7 @@ add_flang_library(FortranLower
ConvertProcedureDesignator.cpp
ConvertType.cpp
ConvertVariable.cpp
+ CUDA.cpp
CustomIntrinsicCall.cpp
HlfirIntrinsics.cpp
HostAssociations.cpp
diff --git a/flang/lib/Lower/CUDA.cpp b/flang/lib/Lower/CUDA.cpp
new file mode 100644
index 0000000..f6d0078
--- /dev/null
+++ b/flang/lib/Lower/CUDA.cpp
@@ -0,0 +1,157 @@
+//===-- CUDA.cpp -- CUDA Fortran specific lowering ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Lower/CUDA.h"
+#include "flang/Lower/AbstractConverter.h"
+#include "flang/Optimizer/Builder/Todo.h"
+
+#define DEBUG_TYPE "flang-lower-cuda"
+
+void Fortran::lower::initializeDeviceComponentAllocator(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::semantics::Symbol &sym, const fir::MutableBoxValue &box) {
+ if (const auto *details{
+ sym.GetUltimate()
+ .detailsIf<Fortran::semantics::ObjectEntityDetails>()}) {
+ const Fortran::semantics::DeclTypeSpec *type{details->type()};
+ const Fortran::semantics::DerivedTypeSpec *derived{type ? type->AsDerived()
+ : nullptr};
+ if (derived) {
+ if (!FindCUDADeviceAllocatableUltimateComponent(*derived))
+ return; // No device components.
+
+ fir::FirOpBuilder &builder = converter.getFirOpBuilder();
+ mlir::Location loc = converter.getCurrentLocation();
+
+ mlir::Type baseTy = fir::unwrapRefType(box.getAddr().getType());
+
+ // Only pointer and allocatable needs post allocation initialization
+ // of components descriptors.
+ if (!fir::isAllocatableType(baseTy) && !fir::isPointerType(baseTy))
+ return;
+
+ // Extract the derived type.
+ mlir::Type ty = fir::getDerivedType(baseTy);
+ auto recTy = mlir::dyn_cast<fir::RecordType>(ty);
+ assert(recTy && "expected fir::RecordType");
+
+ if (auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(baseTy))
+ baseTy = boxTy.getEleTy();
+ baseTy = fir::unwrapRefType(baseTy);
+
+ Fortran::semantics::UltimateComponentIterator components{*derived};
+ mlir::Value loadedBox = fir::LoadOp::create(builder, loc, box.getAddr());
+ mlir::Value addr;
+ if (auto seqTy = mlir::dyn_cast<fir::SequenceType>(baseTy)) {
+ mlir::Type idxTy = builder.getIndexType();
+ mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
+ mlir::Value zero = builder.createIntegerConstant(loc, idxTy, 0);
+ llvm::SmallVector<fir::DoLoopOp> loops;
+ llvm::SmallVector<mlir::Value> indices;
+ llvm::SmallVector<mlir::Value> extents;
+ for (unsigned i = 0; i < seqTy.getDimension(); ++i) {
+ mlir::Value dim = builder.createIntegerConstant(loc, idxTy, i);
+ auto dimInfo = fir::BoxDimsOp::create(builder, loc, idxTy, idxTy,
+ idxTy, loadedBox, dim);
+ mlir::Value lbub = mlir::arith::AddIOp::create(
+ builder, loc, dimInfo.getResult(0), dimInfo.getResult(1));
+ mlir::Value ext =
+ mlir::arith::SubIOp::create(builder, loc, lbub, one);
+ mlir::Value cmp = mlir::arith::CmpIOp::create(
+ builder, loc, mlir::arith::CmpIPredicate::sgt, ext, zero);
+ ext = mlir::arith::SelectOp::create(builder, loc, cmp, ext, zero);
+ extents.push_back(ext);
+
+ auto loop = fir::DoLoopOp::create(
+ builder, loc, dimInfo.getResult(0), dimInfo.getResult(1),
+ dimInfo.getResult(2), /*isUnordered=*/true,
+ /*finalCount=*/false, mlir::ValueRange{});
+ loops.push_back(loop);
+ indices.push_back(loop.getInductionVar());
+ builder.setInsertionPointToStart(loop.getBody());
+ }
+ mlir::Value boxAddr = fir::BoxAddrOp::create(builder, loc, loadedBox);
+ auto shape = fir::ShapeOp::create(builder, loc, extents);
+ addr = fir::ArrayCoorOp::create(
+ builder, loc, fir::ReferenceType::get(recTy), boxAddr, shape,
+ /*slice=*/mlir::Value{}, indices, /*typeparms=*/mlir::ValueRange{});
+ } else {
+ addr = fir::BoxAddrOp::create(builder, loc, loadedBox);
+ }
+ for (const auto &compSym : components) {
+ if (Fortran::semantics::IsDeviceAllocatable(compSym)) {
+ llvm::SmallVector<mlir::Value> coord;
+ mlir::Type fieldTy = gatherDeviceComponentCoordinatesAndType(
+ builder, loc, compSym, recTy, coord);
+ assert(coord.size() == 1 && "expect one coordinate");
+ mlir::Value comp = fir::CoordinateOp::create(
+ builder, loc, builder.getRefType(fieldTy), addr, coord[0]);
+ cuf::DataAttributeAttr dataAttr =
+ Fortran::lower::translateSymbolCUFDataAttribute(
+ builder.getContext(), compSym);
+ cuf::SetAllocatorIndexOp::create(builder, loc, comp, dataAttr);
+ }
+ }
+ }
+ }
+}
+
+mlir::Type Fortran::lower::gatherDeviceComponentCoordinatesAndType(
+ fir::FirOpBuilder &builder, mlir::Location loc,
+ const Fortran::semantics::Symbol &sym, fir::RecordType recTy,
+ llvm::SmallVector<mlir::Value> &coordinates) {
+ unsigned fieldIdx = recTy.getFieldIndex(sym.name().ToString());
+ mlir::Type fieldTy;
+ if (fieldIdx != std::numeric_limits<unsigned>::max()) {
+ // Field found in the base record type.
+ auto fieldName = recTy.getTypeList()[fieldIdx].first;
+ fieldTy = recTy.getTypeList()[fieldIdx].second;
+ mlir::Value fieldIndex = fir::FieldIndexOp::create(
+ builder, loc, fir::FieldType::get(fieldTy.getContext()), fieldName,
+ recTy,
+ /*typeParams=*/mlir::ValueRange{});
+ coordinates.push_back(fieldIndex);
+ } else {
+ // Field not found in base record type, search in potential
+ // record type components.
+ for (auto component : recTy.getTypeList()) {
+ if (auto childRecTy = mlir::dyn_cast<fir::RecordType>(component.second)) {
+ fieldIdx = childRecTy.getFieldIndex(sym.name().ToString());
+ if (fieldIdx != std::numeric_limits<unsigned>::max()) {
+ mlir::Value parentFieldIndex = fir::FieldIndexOp::create(
+ builder, loc, fir::FieldType::get(childRecTy.getContext()),
+ component.first, recTy,
+ /*typeParams=*/mlir::ValueRange{});
+ coordinates.push_back(parentFieldIndex);
+ auto fieldName = childRecTy.getTypeList()[fieldIdx].first;
+ fieldTy = childRecTy.getTypeList()[fieldIdx].second;
+ mlir::Value childFieldIndex = fir::FieldIndexOp::create(
+ builder, loc, fir::FieldType::get(fieldTy.getContext()),
+ fieldName, childRecTy,
+ /*typeParams=*/mlir::ValueRange{});
+ coordinates.push_back(childFieldIndex);
+ break;
+ }
+ }
+ }
+ }
+ if (coordinates.empty())
+ TODO(loc, "device resident component in complex derived-type hierarchy");
+ return fieldTy;
+}
+
+cuf::DataAttributeAttr Fortran::lower::translateSymbolCUFDataAttribute(
+ mlir::MLIRContext *mlirContext, const Fortran::semantics::Symbol &sym) {
+ std::optional<Fortran::common::CUDADataAttr> cudaAttr =
+ Fortran::semantics::GetCUDADataAttr(&sym.GetUltimate());
+ return cuf::getDataAttribute(mlirContext, cudaAttr);
+}
diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp
index a4a8a69..fd66592 100644
--- a/flang/lib/Lower/ConvertVariable.cpp
+++ b/flang/lib/Lower/ConvertVariable.cpp
@@ -14,12 +14,12 @@
#include "flang/Lower/AbstractConverter.h"
#include "flang/Lower/Allocatable.h"
#include "flang/Lower/BoxAnalyzer.h"
+#include "flang/Lower/CUDA.h"
#include "flang/Lower/CallInterface.h"
#include "flang/Lower/ConvertConstant.h"
#include "flang/Lower/ConvertExpr.h"
#include "flang/Lower/ConvertExprToHLFIR.h"
#include "flang/Lower/ConvertProcedureDesignator.h"
-#include "flang/Lower/Cuda.h"
#include "flang/Lower/Mangler.h"
#include "flang/Lower/PFTBuilder.h"
#include "flang/Lower/StatementContext.h"
@@ -814,81 +814,24 @@ initializeDeviceComponentAllocator(Fortran::lower::AbstractConverter &converter,
baseTy = boxTy.getEleTy();
baseTy = fir::unwrapRefType(baseTy);
- if (mlir::isa<fir::SequenceType>(baseTy) &&
- (fir::isAllocatableType(fir::getBase(exv).getType()) ||
- fir::isPointerType(fir::getBase(exv).getType())))
+ if (fir::isAllocatableType(fir::getBase(exv).getType()) ||
+ fir::isPointerType(fir::getBase(exv).getType()))
return; // Allocator index need to be set after allocation.
auto recTy =
mlir::dyn_cast<fir::RecordType>(fir::unwrapSequenceType(baseTy));
assert(recTy && "expected fir::RecordType");
- llvm::SmallVector<mlir::Value> coordinates;
Fortran::semantics::UltimateComponentIterator components{*derived};
for (const auto &sym : components) {
if (Fortran::semantics::IsDeviceAllocatable(sym)) {
- unsigned fieldIdx = recTy.getFieldIndex(sym.name().ToString());
- mlir::Type fieldTy;
- llvm::SmallVector<mlir::Value> coordinates;
-
- if (fieldIdx != std::numeric_limits<unsigned>::max()) {
- // Field found in the base record type.
- auto fieldName = recTy.getTypeList()[fieldIdx].first;
- fieldTy = recTy.getTypeList()[fieldIdx].second;
- mlir::Value fieldIndex = fir::FieldIndexOp::create(
- builder, loc, fir::FieldType::get(fieldTy.getContext()),
- fieldName, recTy,
- /*typeParams=*/mlir::ValueRange{});
- coordinates.push_back(fieldIndex);
- } else {
- // Field not found in base record type, search in potential
- // record type components.
- for (auto component : recTy.getTypeList()) {
- if (auto childRecTy =
- mlir::dyn_cast<fir::RecordType>(component.second)) {
- fieldIdx = childRecTy.getFieldIndex(sym.name().ToString());
- if (fieldIdx != std::numeric_limits<unsigned>::max()) {
- mlir::Value parentFieldIndex = fir::FieldIndexOp::create(
- builder, loc,
- fir::FieldType::get(childRecTy.getContext()),
- component.first, recTy,
- /*typeParams=*/mlir::ValueRange{});
- coordinates.push_back(parentFieldIndex);
- auto fieldName = childRecTy.getTypeList()[fieldIdx].first;
- fieldTy = childRecTy.getTypeList()[fieldIdx].second;
- mlir::Value childFieldIndex = fir::FieldIndexOp::create(
- builder, loc, fir::FieldType::get(fieldTy.getContext()),
- fieldName, childRecTy,
- /*typeParams=*/mlir::ValueRange{});
- coordinates.push_back(childFieldIndex);
- break;
- }
- }
- }
- }
-
- if (coordinates.empty())
- TODO(loc, "device resident component in complex derived-type "
- "hierarchy");
-
+ llvm::SmallVector<mlir::Value> coord;
+ mlir::Type fieldTy =
+ Fortran::lower::gatherDeviceComponentCoordinatesAndType(
+ builder, loc, sym, recTy, coord);
mlir::Value base = fir::getBase(exv);
- mlir::Value comp;
- if (mlir::isa<fir::BaseBoxType>(fir::unwrapRefType(base.getType()))) {
- mlir::Value box = fir::LoadOp::create(builder, loc, base);
- mlir::Value addr = fir::BoxAddrOp::create(builder, loc, box);
- llvm::SmallVector<mlir::Value> lenParams;
- assert(coordinates.size() == 1 && "expect one coordinate");
- auto field = mlir::dyn_cast<fir::FieldIndexOp>(
- coordinates[0].getDefiningOp());
- comp = hlfir::DesignateOp::create(
- builder, loc, builder.getRefType(fieldTy), addr,
- /*component=*/field.getFieldName(),
- /*componentShape=*/mlir::Value{},
- hlfir::DesignateOp::Subscripts{});
- } else {
- comp = fir::CoordinateOp::create(
- builder, loc, builder.getRefType(fieldTy), base, coordinates);
- }
+ mlir::Value comp = fir::CoordinateOp::create(
+ builder, loc, builder.getRefType(fieldTy), base, coord);
cuf::DataAttributeAttr dataAttr =
Fortran::lower::translateSymbolCUFDataAttribute(
builder.getContext(), sym);
@@ -1950,13 +1893,6 @@ fir::FortranVariableFlagsAttr Fortran::lower::translateSymbolAttributes(
return fir::FortranVariableFlagsAttr::get(mlirContext, flags);
}
-cuf::DataAttributeAttr Fortran::lower::translateSymbolCUFDataAttribute(
- mlir::MLIRContext *mlirContext, const Fortran::semantics::Symbol &sym) {
- std::optional<Fortran::common::CUDADataAttr> cudaAttr =
- Fortran::semantics::GetCUDADataAttr(&sym.GetUltimate());
- return cuf::getDataAttribute(mlirContext, cudaAttr);
-}
-
static bool
isCapturedInInternalProcedure(Fortran::lower::AbstractConverter &converter,
const Fortran::semantics::Symbol &sym) {
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index db6a0e2..4ce9a0e 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -34,6 +34,7 @@
#include "flang/Parser/openmp-utils.h"
#include "flang/Parser/parse-tree.h"
#include "flang/Semantics/openmp-directive-sets.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/tools.h"
#include "flang/Support/Flags.h"
#include "flang/Support/OpenMP-utils.h"
@@ -3820,18 +3821,29 @@ static void genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable,
semantics::SemanticsContext &semaCtx,
lower::pft::Evaluation &eval,
const parser::OpenMPCriticalConstruct &criticalConstruct) {
- const auto &cd = std::get<parser::OmpCriticalDirective>(criticalConstruct.t);
- List<Clause> clauses =
- makeClauses(std::get<parser::OmpClauseList>(cd.t), semaCtx);
+ const parser::OmpDirectiveSpecification &beginSpec =
+ criticalConstruct.BeginDir();
+ List<Clause> clauses = makeClauses(beginSpec.Clauses(), semaCtx);
ConstructQueue queue{buildConstructQueue(
- converter.getFirOpBuilder().getModule(), semaCtx, eval, cd.source,
+ converter.getFirOpBuilder().getModule(), semaCtx, eval, beginSpec.source,
llvm::omp::Directive::OMPD_critical, clauses)};
- const auto &name = std::get<std::optional<parser::Name>>(cd.t);
+ std::optional<parser::Name> critName;
+ const parser::OmpArgumentList &args = beginSpec.Arguments();
+ if (!args.v.empty()) {
+ // All of these things should be guaranteed to exist after semantic checks.
+ auto *object = parser::Unwrap<parser::OmpObject>(args.v.front());
+ assert(object && "Expecting object as argument");
+ auto *designator = semantics::omp::GetDesignatorFromObj(*object);
+ assert(designator && "Expecting desginator in argument");
+ auto *name = semantics::getDesignatorNameIfDataRef(*designator);
+ assert(name && "Expecting dataref in designator");
+ critName = *name;
+ }
mlir::Location currentLocation = converter.getCurrentLocation();
genCriticalOp(converter, symTable, semaCtx, eval, currentLocation, queue,
- queue.begin(), name);
+ queue.begin(), critName);
}
static void genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable,
diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
index 87a52ff..b6baefb 100644
--- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp
+++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
@@ -1947,17 +1947,17 @@ void fir::factory::genDimInfoFromBox(
mlir::Value fir::factory::genLifetimeStart(mlir::OpBuilder &builder,
mlir::Location loc,
- fir::AllocaOp alloc, int64_t size,
+ fir::AllocaOp alloc,
const mlir::DataLayout *dl) {
mlir::Type ptrTy = mlir::LLVM::LLVMPointerType::get(
alloc.getContext(), getAllocaAddressSpace(dl));
mlir::Value cast =
fir::ConvertOp::create(builder, loc, ptrTy, alloc.getResult());
- mlir::LLVM::LifetimeStartOp::create(builder, loc, size, cast);
+ mlir::LLVM::LifetimeStartOp::create(builder, loc, cast);
return cast;
}
void fir::factory::genLifetimeEnd(mlir::OpBuilder &builder, mlir::Location loc,
- mlir::Value cast, int64_t size) {
- mlir::LLVM::LifetimeEndOp::create(builder, loc, size, cast);
+ mlir::Value cast) {
+ mlir::LLVM::LifetimeEndOp::create(builder, loc, cast);
}
diff --git a/flang/lib/Optimizer/Transforms/StackArrays.cpp b/flang/lib/Optimizer/Transforms/StackArrays.cpp
index 0d13129..72ea2a6 100644
--- a/flang/lib/Optimizer/Transforms/StackArrays.cpp
+++ b/flang/lib/Optimizer/Transforms/StackArrays.cpp
@@ -813,10 +813,10 @@ void AllocMemConversion::insertLifetimeMarkers(
mlir::OpBuilder::InsertionGuard insertGuard(rewriter);
rewriter.setInsertionPoint(oldAlloc);
mlir::Value ptr = fir::factory::genLifetimeStart(
- rewriter, newAlloc.getLoc(), newAlloc, *size, &*dl);
+ rewriter, newAlloc.getLoc(), newAlloc, &*dl);
visitFreeMemOp(oldAlloc, [&](mlir::Operation *op) {
rewriter.setInsertionPoint(op);
- fir::factory::genLifetimeEnd(rewriter, op->getLoc(), ptr, *size);
+ fir::factory::genLifetimeEnd(rewriter, op->getLoc(), ptr);
});
newAlloc->setAttr(attrName, rewriter.getUnitAttr());
}
diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp
index 84d1e81..46b1486 100644
--- a/flang/lib/Parser/openmp-parsers.cpp
+++ b/flang/lib/Parser/openmp-parsers.cpp
@@ -1758,17 +1758,8 @@ TYPE_PARSER(sourced(construct<OpenMPDeclareMapperConstruct>(
TYPE_PARSER(construct<OmpReductionCombiner>(Parser<AssignmentStmt>{}) ||
construct<OmpReductionCombiner>(Parser<FunctionReference>{}))
-// 2.13.2 OMP CRITICAL
-TYPE_PARSER(startOmpLine >>
- sourced(construct<OmpEndCriticalDirective>(
- verbatim("END CRITICAL"_tok), maybe(parenthesized(name)))) /
- endOmpLine)
-TYPE_PARSER(sourced(construct<OmpCriticalDirective>(verbatim("CRITICAL"_tok),
- maybe(parenthesized(name)), Parser<OmpClauseList>{})) /
- endOmpLine)
-
TYPE_PARSER(construct<OpenMPCriticalConstruct>(
- Parser<OmpCriticalDirective>{}, block, Parser<OmpEndCriticalDirective>{}))
+ OmpBlockConstructParser{llvm::omp::Directive::OMPD_critical}))
// 2.11.3 Executable Allocate directive
TYPE_PARSER(
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index 46141e2..4f8d498 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -2606,9 +2606,7 @@ public:
EndOpenMP();
}
void Unparse(const OpenMPCriticalConstruct &x) {
- Walk(std::get<OmpCriticalDirective>(x.t));
- Walk(std::get<Block>(x.t), "");
- Walk(std::get<OmpEndCriticalDirective>(x.t));
+ Unparse(static_cast<const OmpBlockConstruct &>(x));
}
void Unparse(const OmpDeclareTargetWithList &x) {
Put("("), Walk(x.v), Put(")");
diff --git a/flang/lib/Semantics/check-omp-atomic.cpp b/flang/lib/Semantics/check-omp-atomic.cpp
index a5fdabf..fcb0f9a 100644
--- a/flang/lib/Semantics/check-omp-atomic.cpp
+++ b/flang/lib/Semantics/check-omp-atomic.cpp
@@ -11,13 +11,13 @@
//===----------------------------------------------------------------------===//
#include "check-omp-structure.h"
-#include "openmp-utils.h"
#include "flang/Common/indirection.h"
#include "flang/Evaluate/expression.h"
#include "flang/Evaluate/tools.h"
#include "flang/Parser/char-block.h"
#include "flang/Parser/parse-tree.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
#include "flang/Semantics/type.h"
diff --git a/flang/lib/Semantics/check-omp-loop.cpp b/flang/lib/Semantics/check-omp-loop.cpp
index 59d57a2..8dad1f5 100644
--- a/flang/lib/Semantics/check-omp-loop.cpp
+++ b/flang/lib/Semantics/check-omp-loop.cpp
@@ -13,7 +13,6 @@
#include "check-omp-structure.h"
#include "check-directive-structure.h"
-#include "openmp-utils.h"
#include "flang/Common/idioms.h"
#include "flang/Common/visit.h"
@@ -23,6 +22,7 @@
#include "flang/Parser/parse-tree.h"
#include "flang/Parser/tools.h"
#include "flang/Semantics/openmp-modifiers.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/semantics.h"
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
diff --git a/flang/lib/Semantics/check-omp-metadirective.cpp b/flang/lib/Semantics/check-omp-metadirective.cpp
index 03487da..cf5ea90 100644
--- a/flang/lib/Semantics/check-omp-metadirective.cpp
+++ b/flang/lib/Semantics/check-omp-metadirective.cpp
@@ -12,8 +12,6 @@
#include "check-omp-structure.h"
-#include "openmp-utils.h"
-
#include "flang/Common/idioms.h"
#include "flang/Common/indirection.h"
#include "flang/Common/visit.h"
@@ -21,6 +19,7 @@
#include "flang/Parser/message.h"
#include "flang/Parser/parse-tree.h"
#include "flang/Semantics/openmp-modifiers.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/tools.h"
#include "llvm/Frontend/OpenMP/OMP.h"
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index a9c56c3..cbe6b2c 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -10,7 +10,6 @@
#include "check-directive-structure.h"
#include "definable.h"
-#include "openmp-utils.h"
#include "resolve-names-utils.h"
#include "flang/Common/idioms.h"
@@ -27,6 +26,7 @@
#include "flang/Semantics/expression.h"
#include "flang/Semantics/openmp-directive-sets.h"
#include "flang/Semantics/openmp-modifiers.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/scope.h"
#include "flang/Semantics/semantics.h"
#include "flang/Semantics/symbol.h"
@@ -537,14 +537,6 @@ template <typename Checker> struct DirectiveSpellingVisitor {
checker_(x.v.source, Directive::OMPD_assume);
return false;
}
- bool Pre(const parser::OmpCriticalDirective &x) {
- checker_(std::get<parser::Verbatim>(x.t).source, Directive::OMPD_critical);
- return false;
- }
- bool Pre(const parser::OmpEndCriticalDirective &x) {
- checker_(std::get<parser::Verbatim>(x.t).source, Directive::OMPD_critical);
- return false;
- }
bool Pre(const parser::OmpMetadirectiveDirective &x) {
checker_(
std::get<parser::Verbatim>(x.t).source, Directive::OMPD_metadirective);
@@ -2034,41 +2026,87 @@ void OmpStructureChecker::Leave(const parser::OpenMPCancelConstruct &) {
}
void OmpStructureChecker::Enter(const parser::OpenMPCriticalConstruct &x) {
- const auto &dir{std::get<parser::OmpCriticalDirective>(x.t)};
- const auto &dirSource{std::get<parser::Verbatim>(dir.t).source};
- const auto &endDir{std::get<parser::OmpEndCriticalDirective>(x.t)};
- PushContextAndClauseSets(dirSource, llvm::omp::Directive::OMPD_critical);
+ const parser::OmpBeginDirective &beginSpec{x.BeginDir()};
+ const std::optional<parser::OmpEndDirective> &endSpec{x.EndDir()};
+ PushContextAndClauseSets(beginSpec.DirName().source, beginSpec.DirName().v);
+
const auto &block{std::get<parser::Block>(x.t)};
- CheckNoBranching(block, llvm::omp::Directive::OMPD_critical, dir.source);
- const auto &dirName{std::get<std::optional<parser::Name>>(dir.t)};
- const auto &endDirName{std::get<std::optional<parser::Name>>(endDir.t)};
- const auto &ompClause{std::get<parser::OmpClauseList>(dir.t)};
- if (dirName && endDirName &&
- dirName->ToString().compare(endDirName->ToString())) {
- context_
- .Say(endDirName->source,
- parser::MessageFormattedText{
- "CRITICAL directive names do not match"_err_en_US})
- .Attach(dirName->source, "should be "_en_US);
- } else if (dirName && !endDirName) {
- context_
- .Say(dirName->source,
- parser::MessageFormattedText{
- "CRITICAL directive names do not match"_err_en_US})
- .Attach(dirName->source, "should be NULL"_en_US);
- } else if (!dirName && endDirName) {
- context_
- .Say(endDirName->source,
- parser::MessageFormattedText{
- "CRITICAL directive names do not match"_err_en_US})
- .Attach(endDirName->source, "should be NULL"_en_US);
- }
- if (!dirName && !ompClause.source.empty() &&
- ompClause.source.NULTerminatedToString() != "hint(omp_sync_hint_none)") {
- context_.Say(dir.source,
- parser::MessageFormattedText{
- "Hint clause other than omp_sync_hint_none cannot be specified for "
- "an unnamed CRITICAL directive"_err_en_US});
+ CheckNoBranching(
+ block, llvm::omp::Directive::OMPD_critical, beginSpec.DirName().source);
+
+ auto getNameFromArg{[](const parser::OmpArgument &arg) {
+ if (auto *object{parser::Unwrap<parser::OmpObject>(arg.u)}) {
+ if (auto *designator{omp::GetDesignatorFromObj(*object)}) {
+ return getDesignatorNameIfDataRef(*designator);
+ }
+ }
+ return static_cast<const parser::Name *>(nullptr);
+ }};
+
+ auto checkArgumentList{[&](const parser::OmpArgumentList &args) {
+ if (args.v.size() > 1) {
+ context_.Say(args.source,
+ "Only a single argument is allowed in CRITICAL directive"_err_en_US);
+ } else if (!args.v.empty()) {
+ if (!getNameFromArg(args.v.front())) {
+ context_.Say(args.v.front().source,
+ "CRITICAL argument should be a name"_err_en_US);
+ }
+ }
+ }};
+
+ const parser::Name *beginName{nullptr};
+ const parser::Name *endName{nullptr};
+
+ auto &beginArgs{beginSpec.Arguments()};
+ checkArgumentList(beginArgs);
+
+ if (!beginArgs.v.empty()) {
+ beginName = getNameFromArg(beginArgs.v.front());
+ }
+
+ if (endSpec) {
+ auto &endArgs{endSpec->Arguments()};
+ checkArgumentList(endArgs);
+
+ if (beginArgs.v.empty() != endArgs.v.empty()) {
+ parser::CharBlock source{
+ beginArgs.v.empty() ? endArgs.source : beginArgs.source};
+ context_.Say(source,
+ "Either both CRITICAL and END CRITICAL should have an argument, or none of them should"_err_en_US);
+ } else if (!beginArgs.v.empty()) {
+ endName = getNameFromArg(endArgs.v.front());
+ if (beginName && endName) {
+ if (beginName->ToString() != endName->ToString()) {
+ context_.Say(endName->source,
+ "The names on CRITICAL and END CRITICAL must match"_err_en_US);
+ }
+ }
+ }
+ }
+
+ for (auto &clause : beginSpec.Clauses().v) {
+ auto *hint{std::get_if<parser::OmpClause::Hint>(&clause.u)};
+ if (!hint) {
+ continue;
+ }
+ const int64_t OmpSyncHintNone = 0; // omp_sync_hint_none
+ std::optional<int64_t> hintValue{GetIntValue(hint->v.v)};
+ if (hintValue && *hintValue != OmpSyncHintNone) {
+ // Emit a diagnostic if the name is missing, and point to the directive
+ // with a missing name.
+ parser::CharBlock source;
+ if (!beginName) {
+ source = beginSpec.DirName().source;
+ } else if (endSpec && !endName) {
+ source = endSpec->DirName().source;
+ }
+
+ if (!source.empty()) {
+ context_.Say(source,
+ "When HINT other than 'omp_sync_hint_none' is present, CRITICAL directive should have a name"_err_en_US);
+ }
+ }
}
}
diff --git a/flang/lib/Semantics/openmp-utils.cpp b/flang/lib/Semantics/openmp-utils.cpp
index 7a492a4..e8df346c 100644
--- a/flang/lib/Semantics/openmp-utils.cpp
+++ b/flang/lib/Semantics/openmp-utils.cpp
@@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#include "openmp-utils.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Common/indirection.h"
#include "flang/Common/reference.h"
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index 0557b08..fe0d2a7 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -10,7 +10,6 @@
#include "check-acc-structure.h"
#include "check-omp-structure.h"
-#include "openmp-utils.h"
#include "resolve-names-utils.h"
#include "flang/Common/idioms.h"
#include "flang/Evaluate/fold.h"
@@ -22,6 +21,7 @@
#include "flang/Semantics/expression.h"
#include "flang/Semantics/openmp-dsa.h"
#include "flang/Semantics/openmp-modifiers.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
#include "flang/Support/Flags.h"
@@ -876,6 +876,9 @@ private:
bool IsNestedInDirective(llvm::omp::Directive directive);
void ResolveOmpObjectList(const parser::OmpObjectList &, Symbol::Flag);
+ void ResolveOmpDesignator(
+ const parser::Designator &designator, Symbol::Flag ompFlag);
+ void ResolveOmpCommonBlock(const parser::Name &name, Symbol::Flag ompFlag);
void ResolveOmpObject(const parser::OmpObject &, Symbol::Flag);
Symbol *ResolveOmp(const parser::Name &, Symbol::Flag, Scope &);
Symbol *ResolveOmp(Symbol &, Symbol::Flag, Scope &);
@@ -2139,8 +2142,8 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPSectionConstruct &x) {
}
bool OmpAttributeVisitor::Pre(const parser::OpenMPCriticalConstruct &x) {
- const auto &beginCriticalDir{std::get<parser::OmpCriticalDirective>(x.t)};
- PushContext(beginCriticalDir.source, llvm::omp::Directive::OMPD_critical);
+ const parser::OmpBeginDirective &beginSpec{x.BeginDir()};
+ PushContext(beginSpec.DirName().source, beginSpec.DirName().v);
GetContext().withinConstruct = true;
return true;
}
@@ -2786,196 +2789,182 @@ static bool SymbolOrEquivalentIsInNamelist(const Symbol &symbol) {
});
}
-void OmpAttributeVisitor::ResolveOmpObject(
- const parser::OmpObject &ompObject, Symbol::Flag ompFlag) {
+void OmpAttributeVisitor::ResolveOmpDesignator(
+ const parser::Designator &designator, Symbol::Flag ompFlag) {
unsigned version{context_.langOptions().OpenMPVersion};
- common::visit(
- common::visitors{
- [&](const parser::Designator &designator) {
- if (const auto *name{
- semantics::getDesignatorNameIfDataRef(designator)}) {
- if (auto *symbol{ResolveOmp(*name, ompFlag, currScope())}) {
- auto checkExclusivelists =
- [&](const Symbol *symbol1, Symbol::Flag firstOmpFlag,
- const Symbol *symbol2, Symbol::Flag secondOmpFlag) {
- if ((symbol1->test(firstOmpFlag) &&
- symbol2->test(secondOmpFlag)) ||
- (symbol1->test(secondOmpFlag) &&
- symbol2->test(firstOmpFlag))) {
- context_.Say(designator.source,
- "Variable '%s' may not "
- "appear on both %s and %s "
- "clauses on a %s construct"_err_en_US,
- symbol2->name(),
- Symbol::OmpFlagToClauseName(firstOmpFlag),
- Symbol::OmpFlagToClauseName(secondOmpFlag),
- parser::ToUpperCaseLetters(
- llvm::omp::getOpenMPDirectiveName(
- GetContext().directive, version)
- .str()));
- }
- };
- if (dataCopyingAttributeFlags.test(ompFlag)) {
- CheckDataCopyingClause(*name, *symbol, ompFlag);
- } else {
- AddToContextObjectWithExplicitDSA(*symbol, ompFlag);
- if (dataSharingAttributeFlags.test(ompFlag)) {
- CheckMultipleAppearances(*name, *symbol, ompFlag);
- }
- if (privateDataSharingAttributeFlags.test(ompFlag)) {
- CheckObjectIsPrivatizable(*name, *symbol, ompFlag);
- }
+ llvm::omp::Directive directive{GetContext().directive};
- if (ompFlag == Symbol::Flag::OmpAllocate) {
- AddAllocateName(name);
- }
- }
- if (ompFlag == Symbol::Flag::OmpDeclarativeAllocateDirective &&
- IsAllocatable(*symbol) &&
- !IsNestedInDirective(llvm::omp::Directive::OMPD_allocate)) {
- context_.Say(designator.source,
- "List items specified in the ALLOCATE directive must not "
- "have the ALLOCATABLE attribute unless the directive is "
- "associated with an ALLOCATE statement"_err_en_US);
- }
- if ((ompFlag == Symbol::Flag::OmpDeclarativeAllocateDirective ||
- ompFlag ==
- Symbol::Flag::OmpExecutableAllocateDirective) &&
- ResolveOmpObjectScope(name) == nullptr) {
- context_.Say(designator.source, // 2.15.3
- "List items must be declared in the same scoping unit "
- "in which the %s directive appears"_err_en_US,
- parser::ToUpperCaseLetters(
- llvm::omp::getOpenMPDirectiveName(
- GetContext().directive, version)
- .str()));
- }
- if (ompFlag == Symbol::Flag::OmpReduction) {
- // Using variables inside of a namelist in OpenMP reductions
- // is allowed by the standard, but is not allowed for
- // privatisation. This looks like an oversight. If the
- // namelist is hoisted to a global, we cannot apply the
- // mapping for the reduction variable: resulting in incorrect
- // results. Disabling this hoisting could make some real
- // production code go slower. See discussion in #109303
- if (SymbolOrEquivalentIsInNamelist(*symbol)) {
- context_.Say(name->source,
- "Variable '%s' in NAMELIST cannot be in a REDUCTION clause"_err_en_US,
- name->ToString());
- }
- }
- if (ompFlag == Symbol::Flag::OmpInclusiveScan ||
- ompFlag == Symbol::Flag::OmpExclusiveScan) {
- if (!symbol->test(Symbol::Flag::OmpInScanReduction)) {
- context_.Say(name->source,
- "List item %s must appear in REDUCTION clause "
- "with the INSCAN modifier of the parent "
- "directive"_err_en_US,
- name->ToString());
- }
- }
- if (ompFlag == Symbol::Flag::OmpDeclareTarget) {
- if (symbol->IsFuncResult()) {
- if (Symbol * func{currScope().symbol()}) {
- CHECK(func->IsSubprogram());
- func->set(ompFlag);
- name->symbol = func;
- }
- }
- }
- if (GetContext().directive ==
- llvm::omp::Directive::OMPD_target_data) {
- checkExclusivelists(symbol, Symbol::Flag::OmpUseDevicePtr,
- symbol, Symbol::Flag::OmpUseDeviceAddr);
- }
- if (llvm::omp::allDistributeSet.test(GetContext().directive)) {
- checkExclusivelists(symbol, Symbol::Flag::OmpFirstPrivate,
- symbol, Symbol::Flag::OmpLastPrivate);
- }
- if (llvm::omp::allTargetSet.test(GetContext().directive)) {
- checkExclusivelists(symbol, Symbol::Flag::OmpIsDevicePtr,
- symbol, Symbol::Flag::OmpHasDeviceAddr);
- const auto *hostAssocSym{symbol};
- if (!(symbol->test(Symbol::Flag::OmpIsDevicePtr) ||
- symbol->test(Symbol::Flag::OmpHasDeviceAddr))) {
- if (const auto *details{
- symbol->detailsIf<HostAssocDetails>()}) {
- hostAssocSym = &details->symbol();
- }
- }
- Symbol::Flag dataMappingAttributeFlags[] = {
- Symbol::Flag::OmpMapTo, Symbol::Flag::OmpMapFrom,
- Symbol::Flag::OmpMapToFrom, Symbol::Flag::OmpMapStorage,
- Symbol::Flag::OmpMapDelete, Symbol::Flag::OmpIsDevicePtr,
- Symbol::Flag::OmpHasDeviceAddr};
-
- Symbol::Flag dataSharingAttributeFlags[] = {
- Symbol::Flag::OmpPrivate, Symbol::Flag::OmpFirstPrivate,
- Symbol::Flag::OmpLastPrivate, Symbol::Flag::OmpShared,
- Symbol::Flag::OmpLinear};
-
- // For OMP TARGET TEAMS directive some sharing attribute
- // flags and mapping attribute flags can co-exist.
- if (!(llvm::omp::allTeamsSet.test(GetContext().directive) ||
- llvm::omp::allParallelSet.test(
- GetContext().directive))) {
- for (Symbol::Flag ompFlag1 : dataMappingAttributeFlags) {
- for (Symbol::Flag ompFlag2 : dataSharingAttributeFlags) {
- if ((hostAssocSym->test(ompFlag2) &&
- hostAssocSym->test(
- Symbol::Flag::OmpExplicit)) ||
- (symbol->test(ompFlag2) &&
- symbol->test(Symbol::Flag::OmpExplicit))) {
- checkExclusivelists(
- hostAssocSym, ompFlag1, symbol, ompFlag2);
- }
- }
- }
- }
- }
- }
- } else {
- // Array sections to be changed to substrings as needed
- if (AnalyzeExpr(context_, designator)) {
- if (std::holds_alternative<parser::Substring>(designator.u)) {
- context_.Say(designator.source,
- "Substrings are not allowed on OpenMP "
- "directives or clauses"_err_en_US);
- }
- }
- // other checks, more TBD
- }
- },
- [&](const parser::Name &name) { // common block
- if (auto *symbol{ResolveOmpCommonBlockName(&name)}) {
- if (!dataCopyingAttributeFlags.test(ompFlag)) {
- CheckMultipleAppearances(
- name, *symbol, Symbol::Flag::OmpCommonBlock);
- }
- // 2.15.3 When a named common block appears in a list, it has the
- // same meaning as if every explicit member of the common block
- // appeared in the list
- auto &details{symbol->get<CommonBlockDetails>()};
- unsigned index{0};
- for (auto &object : details.objects()) {
- if (auto *resolvedObject{
- ResolveOmp(*object, ompFlag, currScope())}) {
- if (dataCopyingAttributeFlags.test(ompFlag)) {
- CheckDataCopyingClause(name, *resolvedObject, ompFlag);
- } else {
- AddToContextObjectWithExplicitDSA(*resolvedObject, ompFlag);
- }
- details.replace_object(*resolvedObject, index);
- }
- index++;
- }
- } else {
- context_.Say(name.source, // 2.15.3
- "COMMON block must be declared in the same scoping unit "
- "in which the OpenMP directive or clause appears"_err_en_US);
+ const auto *name{semantics::getDesignatorNameIfDataRef(designator)};
+ if (!name) {
+ // Array sections to be changed to substrings as needed
+ if (AnalyzeExpr(context_, designator)) {
+ if (std::holds_alternative<parser::Substring>(designator.u)) {
+ context_.Say(designator.source,
+ "Substrings are not allowed on OpenMP directives or clauses"_err_en_US);
+ }
+ }
+ // other checks, more TBD
+ return;
+ }
+
+ if (auto *symbol{ResolveOmp(*name, ompFlag, currScope())}) {
+ auto checkExclusivelists{//
+ [&](const Symbol *symbol1, Symbol::Flag firstOmpFlag,
+ const Symbol *symbol2, Symbol::Flag secondOmpFlag) {
+ if ((symbol1->test(firstOmpFlag) && symbol2->test(secondOmpFlag)) ||
+ (symbol1->test(secondOmpFlag) && symbol2->test(firstOmpFlag))) {
+ context_.Say(designator.source,
+ "Variable '%s' may not appear on both %s and %s clauses on a %s construct"_err_en_US,
+ symbol2->name(), Symbol::OmpFlagToClauseName(firstOmpFlag),
+ Symbol::OmpFlagToClauseName(secondOmpFlag),
+ parser::ToUpperCaseLetters(
+ llvm::omp::getOpenMPDirectiveName(directive, version)));
+ }
+ }};
+ if (dataCopyingAttributeFlags.test(ompFlag)) {
+ CheckDataCopyingClause(*name, *symbol, ompFlag);
+ } else {
+ AddToContextObjectWithExplicitDSA(*symbol, ompFlag);
+ if (dataSharingAttributeFlags.test(ompFlag)) {
+ CheckMultipleAppearances(*name, *symbol, ompFlag);
+ }
+ if (privateDataSharingAttributeFlags.test(ompFlag)) {
+ CheckObjectIsPrivatizable(*name, *symbol, ompFlag);
+ }
+
+ if (ompFlag == Symbol::Flag::OmpAllocate) {
+ AddAllocateName(name);
+ }
+ }
+ if (ompFlag == Symbol::Flag::OmpDeclarativeAllocateDirective &&
+ IsAllocatable(*symbol) &&
+ !IsNestedInDirective(llvm::omp::Directive::OMPD_allocate)) {
+ context_.Say(designator.source,
+ "List items specified in the ALLOCATE directive must not have the ALLOCATABLE attribute unless the directive is associated with an ALLOCATE statement"_err_en_US);
+ }
+ if ((ompFlag == Symbol::Flag::OmpDeclarativeAllocateDirective ||
+ ompFlag == Symbol::Flag::OmpExecutableAllocateDirective) &&
+ ResolveOmpObjectScope(name) == nullptr) {
+ context_.Say(designator.source, // 2.15.3
+ "List items must be declared in the same scoping unit in which the %s directive appears"_err_en_US,
+ parser::ToUpperCaseLetters(
+ llvm::omp::getOpenMPDirectiveName(directive, version)));
+ }
+ if (ompFlag == Symbol::Flag::OmpReduction) {
+ // Using variables inside of a namelist in OpenMP reductions
+ // is allowed by the standard, but is not allowed for
+ // privatisation. This looks like an oversight. If the
+ // namelist is hoisted to a global, we cannot apply the
+ // mapping for the reduction variable: resulting in incorrect
+ // results. Disabling this hoisting could make some real
+ // production code go slower. See discussion in #109303
+ if (SymbolOrEquivalentIsInNamelist(*symbol)) {
+ context_.Say(name->source,
+ "Variable '%s' in NAMELIST cannot be in a REDUCTION clause"_err_en_US,
+ name->ToString());
+ }
+ }
+ if (ompFlag == Symbol::Flag::OmpInclusiveScan ||
+ ompFlag == Symbol::Flag::OmpExclusiveScan) {
+ if (!symbol->test(Symbol::Flag::OmpInScanReduction)) {
+ context_.Say(name->source,
+ "List item %s must appear in REDUCTION clause with the INSCAN modifier of the parent directive"_err_en_US,
+ name->ToString());
+ }
+ }
+ if (ompFlag == Symbol::Flag::OmpDeclareTarget) {
+ if (symbol->IsFuncResult()) {
+ if (Symbol * func{currScope().symbol()}) {
+ CHECK(func->IsSubprogram());
+ func->set(ompFlag);
+ name->symbol = func;
+ }
+ }
+ }
+ if (directive == llvm::omp::Directive::OMPD_target_data) {
+ checkExclusivelists(symbol, Symbol::Flag::OmpUseDevicePtr, symbol,
+ Symbol::Flag::OmpUseDeviceAddr);
+ }
+ if (llvm::omp::allDistributeSet.test(directive)) {
+ checkExclusivelists(symbol, Symbol::Flag::OmpFirstPrivate, symbol,
+ Symbol::Flag::OmpLastPrivate);
+ }
+ if (llvm::omp::allTargetSet.test(directive)) {
+ checkExclusivelists(symbol, Symbol::Flag::OmpIsDevicePtr, symbol,
+ Symbol::Flag::OmpHasDeviceAddr);
+ const auto *hostAssocSym{symbol};
+ if (!symbol->test(Symbol::Flag::OmpIsDevicePtr) &&
+ !symbol->test(Symbol::Flag::OmpHasDeviceAddr)) {
+ if (const auto *details{symbol->detailsIf<HostAssocDetails>()}) {
+ hostAssocSym = &details->symbol();
+ }
+ }
+ static Symbol::Flag dataMappingAttributeFlags[] = {//
+ Symbol::Flag::OmpMapTo, Symbol::Flag::OmpMapFrom,
+ Symbol::Flag::OmpMapToFrom, Symbol::Flag::OmpMapStorage,
+ Symbol::Flag::OmpMapDelete, Symbol::Flag::OmpIsDevicePtr,
+ Symbol::Flag::OmpHasDeviceAddr};
+
+ static Symbol::Flag dataSharingAttributeFlags[] = {//
+ Symbol::Flag::OmpPrivate, Symbol::Flag::OmpFirstPrivate,
+ Symbol::Flag::OmpLastPrivate, Symbol::Flag::OmpShared,
+ Symbol::Flag::OmpLinear};
+
+ // For OMP TARGET TEAMS directive some sharing attribute
+ // flags and mapping attribute flags can co-exist.
+ if (!llvm::omp::allTeamsSet.test(directive) &&
+ !llvm::omp::allParallelSet.test(directive)) {
+ for (Symbol::Flag ompFlag1 : dataMappingAttributeFlags) {
+ for (Symbol::Flag ompFlag2 : dataSharingAttributeFlags) {
+ if ((hostAssocSym->test(ompFlag2) &&
+ hostAssocSym->test(Symbol::Flag::OmpExplicit)) ||
+ (symbol->test(ompFlag2) &&
+ symbol->test(Symbol::Flag::OmpExplicit))) {
+ checkExclusivelists(hostAssocSym, ompFlag1, symbol, ompFlag2);
}
- },
- },
+ }
+ }
+ }
+ }
+ }
+}
+
+void OmpAttributeVisitor::ResolveOmpCommonBlock(
+ const parser::Name &name, Symbol::Flag ompFlag) {
+ if (auto *symbol{ResolveOmpCommonBlockName(&name)}) {
+ if (!dataCopyingAttributeFlags.test(ompFlag)) {
+ CheckMultipleAppearances(name, *symbol, Symbol::Flag::OmpCommonBlock);
+ }
+ // 2.15.3 When a named common block appears in a list, it has the
+ // same meaning as if every explicit member of the common block
+ // appeared in the list
+ auto &details{symbol->get<CommonBlockDetails>()};
+ for (auto [index, object] : llvm::enumerate(details.objects())) {
+ if (auto *resolvedObject{ResolveOmp(*object, ompFlag, currScope())}) {
+ if (dataCopyingAttributeFlags.test(ompFlag)) {
+ CheckDataCopyingClause(name, *resolvedObject, ompFlag);
+ } else {
+ AddToContextObjectWithExplicitDSA(*resolvedObject, ompFlag);
+ }
+ details.replace_object(*resolvedObject, index);
+ }
+ }
+ } else {
+ context_.Say(name.source, // 2.15.3
+ "COMMON block must be declared in the same scoping unit in which the OpenMP directive or clause appears"_err_en_US);
+ }
+}
+
+void OmpAttributeVisitor::ResolveOmpObject(
+ const parser::OmpObject &ompObject, Symbol::Flag ompFlag) {
+ common::visit(common::visitors{
+ [&](const parser::Designator &designator) {
+ ResolveOmpDesignator(designator, ompFlag);
+ },
+ [&](const parser::Name &name) { // common block
+ ResolveOmpCommonBlock(name, ompFlag);
+ },
+ },
ompObject.u);
}
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index 66a45dd..5808b4b 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -30,6 +30,7 @@
#include "flang/Semantics/attr.h"
#include "flang/Semantics/expression.h"
#include "flang/Semantics/openmp-modifiers.h"
+#include "flang/Semantics/openmp-utils.h"
#include "flang/Semantics/program-tree.h"
#include "flang/Semantics/scope.h"
#include "flang/Semantics/semantics.h"
@@ -1486,6 +1487,16 @@ public:
void Post(const parser::OpenMPBlockConstruct &);
bool Pre(const parser::OmpBeginDirective &x) {
AddOmpSourceRange(x.source);
+ // Manually resolve names in CRITICAL directives. This is because these
+ // names do not denote Fortran objects, and the CRITICAL directive causes
+ // them to be "auto-declared", i.e. inserted into the global scope.
+ // More specifically, they are not expected to have explicit declarations,
+ // and if they do the behavior is unspeficied.
+ if (x.DirName().v == llvm::omp::Directive::OMPD_critical) {
+ for (const parser::OmpArgument &arg : x.Arguments().v) {
+ ResolveCriticalName(arg);
+ }
+ }
return true;
}
void Post(const parser::OmpBeginDirective &) {
@@ -1493,6 +1504,12 @@ public:
}
bool Pre(const parser::OmpEndDirective &x) {
AddOmpSourceRange(x.source);
+ // Manually resolve names in CRITICAL directives.
+ if (x.DirName().v == llvm::omp::Directive::OMPD_critical) {
+ for (const parser::OmpArgument &arg : x.Arguments().v) {
+ ResolveCriticalName(arg);
+ }
+ }
return true;
}
void Post(const parser::OmpEndDirective &) {
@@ -1591,32 +1608,6 @@ public:
void Post(const parser::OmpEndSectionsDirective &) {
messageHandler().set_currStmtSource(std::nullopt);
}
- bool Pre(const parser::OmpCriticalDirective &x) {
- AddOmpSourceRange(x.source);
- // Manually resolve names in CRITICAL directives. This is because these
- // names do not denote Fortran objects, and the CRITICAL directive causes
- // them to be "auto-declared", i.e. inserted into the global scope.
- // More specifically, they are not expected to have explicit declarations,
- // and if they do the behavior is unspeficied.
- if (auto &maybeName{std::get<std::optional<parser::Name>>(x.t)}) {
- ResolveCriticalName(*maybeName);
- }
- return true;
- }
- void Post(const parser::OmpCriticalDirective &) {
- messageHandler().set_currStmtSource(std::nullopt);
- }
- bool Pre(const parser::OmpEndCriticalDirective &x) {
- AddOmpSourceRange(x.source);
- // Manually resolve names in CRITICAL directives.
- if (auto &maybeName{std::get<std::optional<parser::Name>>(x.t)}) {
- ResolveCriticalName(*maybeName);
- }
- return true;
- }
- void Post(const parser::OmpEndCriticalDirective &) {
- messageHandler().set_currStmtSource(std::nullopt);
- }
bool Pre(const parser::OpenMPThreadprivate &) {
SkipImplicitTyping(true);
return true;
@@ -1732,7 +1723,7 @@ private:
const std::optional<parser::OmpClauseList> &clauses,
const T &wholeConstruct);
- void ResolveCriticalName(const parser::Name &name);
+ void ResolveCriticalName(const parser::OmpArgument &arg);
int metaLevel_{0};
const parser::OmpMetadirectiveDirective *metaDirective_{nullptr};
@@ -1961,7 +1952,7 @@ void OmpVisitor::ProcessReductionSpecifier(
}
}
-void OmpVisitor::ResolveCriticalName(const parser::Name &name) {
+void OmpVisitor::ResolveCriticalName(const parser::OmpArgument &arg) {
auto &globalScope{[&]() -> Scope & {
for (Scope *s{&currScope()};; s = &s->parent()) {
if (s->IsTopLevel()) {
@@ -1971,15 +1962,21 @@ void OmpVisitor::ResolveCriticalName(const parser::Name &name) {
llvm_unreachable("Cannot find global scope");
}()};
- if (auto *symbol{FindInScope(globalScope, name)}) {
- if (!symbol->test(Symbol::Flag::OmpCriticalLock)) {
- SayWithDecl(name, *symbol,
- "CRITICAL construct name '%s' conflicts with a previous declaration"_warn_en_US,
- name.ToString());
+ if (auto *object{parser::Unwrap<parser::OmpObject>(arg.u)}) {
+ if (auto *desg{omp::GetDesignatorFromObj(*object)}) {
+ if (auto *name{getDesignatorNameIfDataRef(*desg)}) {
+ if (auto *symbol{FindInScope(globalScope, *name)}) {
+ if (!symbol->test(Symbol::Flag::OmpCriticalLock)) {
+ SayWithDecl(*name, *symbol,
+ "CRITICAL construct name '%s' conflicts with a previous declaration"_warn_en_US,
+ name->ToString());
+ }
+ } else {
+ name->symbol = &MakeSymbol(globalScope, name->source, Attrs{});
+ name->symbol->set(Symbol::Flag::OmpCriticalLock);
+ }
+ }
}
- } else {
- name.symbol = &MakeSymbol(globalScope, name.source, Attrs{});
- name.symbol->set(Symbol::Flag::OmpCriticalLock);
}
}
diff --git a/flang/lib/Semantics/unparse-with-symbols.cpp b/flang/lib/Semantics/unparse-with-symbols.cpp
index 3093e39..41077e0 100644
--- a/flang/lib/Semantics/unparse-with-symbols.cpp
+++ b/flang/lib/Semantics/unparse-with-symbols.cpp
@@ -70,20 +70,6 @@ public:
currStmt_ = std::nullopt;
}
- bool Pre(const parser::OmpCriticalDirective &x) {
- currStmt_ = x.source;
- return true;
- }
- void Post(const parser::OmpCriticalDirective &) { currStmt_ = std::nullopt; }
-
- bool Pre(const parser::OmpEndCriticalDirective &x) {
- currStmt_ = x.source;
- return true;
- }
- void Post(const parser::OmpEndCriticalDirective &) {
- currStmt_ = std::nullopt;
- }
-
// Directive arguments can be objects with symbols.
bool Pre(const parser::OmpBeginDirective &x) {
currStmt_ = x.source;
diff --git a/flang/module/cudadevice.f90 b/flang/module/cudadevice.f90
index d0c312c..dc72fc5 100644
--- a/flang/module/cudadevice.f90
+++ b/flang/module/cudadevice.f90
@@ -754,11 +754,11 @@ implicit none
end interface
interface __popc
- attributes(device) integer function __popc(i) bind(c)
+ attributes(device) integer function __popc(i) bind(c, name='__nv_popc')
!dir$ ignore_tkr (d) i
integer, value :: i
end function
- attributes(device) integer function __popcll(i) bind(c)
+ attributes(device) integer function __popcll(i) bind(c, name='__nv_popcll')
!dir$ ignore_tkr (d) i
integer(8), value :: i
end function
diff --git a/flang/test/Driver/atomic-control-options.f90 b/flang/test/Driver/atomic-control-options.f90
new file mode 100644
index 0000000..cb382f9
--- /dev/null
+++ b/flang/test/Driver/atomic-control-options.f90
@@ -0,0 +1,20 @@
+! REQUIRES: amdgpu-registered-target
+! RUN: %flang_fc1 -emit-llvm -triple amdgcn-amd-amdhsa -fopenmp -fopenmp-is-device -munsafe-fp-atomics %s -o -|FileCheck -check-prefix=UNSAFE-FP-ATOMICS %s
+! RUN: %flang_fc1 -emit-llvm -triple amdgcn-amd-amdhsa -fopenmp -fopenmp-is-device -fatomic-ignore-denormal-mode %s -o -|FileCheck -check-prefix=IGNORE-DENORMAL-MODE %s
+! RUN: %flang_fc1 -emit-llvm -triple amdgcn-amd-amdhsa -fopenmp -fopenmp-is-device -fatomic-fine-grained-memory %s -o -|FileCheck -check-prefix=FINE-GRAINED-MEMORY %s
+! RUN: %flang_fc1 -emit-llvm -triple amdgcn-amd-amdhsa -fopenmp -fopenmp-is-device -fatomic-remote-memory %s -o -|FileCheck -check-prefix=REMOTE-MEMORY %s
+program test
+ implicit none
+ integer :: A, threads
+ threads = 128
+ A = 0
+ !$omp target parallel num_threads(threads)
+ !$omp atomic
+ A = A + 1
+ !$omp end target parallel
+end program test
+
+!UNSAFE-FP-ATOMICS: %{{.*}} = atomicrmw add ptr {{.*}}, i32 1 monotonic, align 4, !amdgpu.ignore.denormal.mode !{{.*}}, !amdgpu.no.fine.grained.memory !{{.*}}, !amdgpu.no.remote.memory !{{.*}}
+!IGNORE-DENORMAL-MODE: %{{.*}} = atomicrmw add ptr {{.*}}, i32 1 monotonic, align 4, !amdgpu.ignore.denormal.mode !{{.*}}, !amdgpu.no.fine.grained.memory !{{.*}}, !amdgpu.no.remote.memory !{{.*}}
+!FINE-GRAINED-MEMORY: %{{.*}} = atomicrmw add ptr {{.*}}, i32 1 monotonic, align 4, !amdgpu.no.remote.memory !{{.*}}
+!REMOTE-MEMORY: %{{.*}} = atomicrmw add ptr {{.*}}, i32 1 monotonic, align 4, !amdgpu.no.fine.grained.memory !{{.*}}
diff --git a/flang/test/Driver/linker-flags.f90 b/flang/test/Driver/linker-flags.f90
index ad48ea1..2b56fdf 100644
--- a/flang/test/Driver/linker-flags.f90
+++ b/flang/test/Driver/linker-flags.f90
@@ -77,7 +77,7 @@
! MINGW-SAME: -lflang_rt.runtime
! MINGW-STATIC-FLANGRT: "{{.*}}{{\\|/}}libflang_rt.runtime.a"
-! NOTE: This also matches lld-link (when CLANG_DEFAULT_LINKER=lld) and
+! NOTE: This also matches lld-link (when FLANG_DEFAULT_LINKER=lld) and
! any .exe suffix that is added when resolving to the full path of
! (lld-)link.exe on Windows platforms. The suffix may not be added
! when the executable is not found or on non-Windows platforms.
diff --git a/flang/test/Lower/CUDA/cuda-data-transfer.cuf b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
index 9abf19b..8f8bd9b 100644
--- a/flang/test/Lower/CUDA/cuda-data-transfer.cuf
+++ b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
@@ -13,6 +13,8 @@ module mod1
integer, device, dimension(11:20) :: cdev
+ real(kind=8), device, allocatable, dimension(:) :: p
+
contains
function dev1(a)
integer, device :: a(:)
@@ -456,3 +458,19 @@ end
! CHECK: %[[M:.*]]:2 = hlfir.declare %4 {data_attr = #cuf.cuda<managed>, uniq_name = "_QFsub24Em"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
! CHECK: %[[D1:.*]] = hlfir.designate %[[D]]#0 (%c1{{.*}}) : (!fir.ref<!fir.array<4xf32>>, index) -> !fir.ref<f32>
! CHECK: cuf.data_transfer %[[D1]] to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<device_device>} : !fir.ref<f32>, !fir.ref<f32>
+
+subroutine sub25()
+ use mod1
+ integer :: i
+ real(8) :: c
+
+ do i = 1, 10
+ c = c + p(i)
+ end do
+end
+
+! CHECK-LABEL: func.func @_QPsub25()
+! CHECK: fir.allocmem !fir.array<?xf64>, %15#1 {bindc_name = ".tmp", uniq_name = ""}
+! CHECK: cuf.data_transfer %{{.*}} to %{{.*}} {transfer_kind = #cuf.cuda_transfer<device_host>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf64>>>>, !fir.box<!fir.array<?xf64>>
+! CHECK: hlfir.assign %{{.*}} to %{{.*}} : f64, !fir.ref<f64>
+! CHECK: fir.freemem %{{.*}} : !fir.heap<!fir.array<?xf64>>
diff --git a/flang/test/Lower/CUDA/cuda-device-proc.cuf b/flang/test/Lower/CUDA/cuda-device-proc.cuf
index d5e614a..a8cc503 100644
--- a/flang/test/Lower/CUDA/cuda-device-proc.cuf
+++ b/flang/test/Lower/CUDA/cuda-device-proc.cuf
@@ -11,6 +11,7 @@ attributes(global) subroutine devsub()
integer(8) :: al
integer(8) :: time
integer :: smalltime
+ integer(4) :: res
call syncthreads()
call syncwarp(1)
@@ -49,6 +50,9 @@ attributes(global) subroutine devsub()
smalltime = clock()
time = clock64()
time = globalTimer()
+
+ res = __popc(ai)
+ res = __popc(al)
end
! CHECK-LABEL: func.func @_QPdevsub() attributes {cuf.proc_attr = #cuf.cuda_proc<global>}
@@ -89,6 +93,9 @@ end
! CHECK: %{{.*}} = nvvm.read.ptx.sreg.clock64 : i64
! CHECK: %{{.*}} = nvvm.read.ptx.sreg.globaltimer : i64
+! CHECK: %{{.*}} = fir.call @__nv_popc(%{{.*}}) proc_attrs<bind_c> fastmath<contract> : (i32) -> i32
+! CHECK: %{{.*}} = fir.call @__nv_popcll(%{{.*}}) proc_attrs<bind_c> fastmath<contract> : (i64) -> i32
+
subroutine host1()
integer, device :: a(32)
integer, device :: ret
diff --git a/flang/test/Lower/CUDA/cuda-set-allocator.cuf b/flang/test/Lower/CUDA/cuda-set-allocator.cuf
index e3bb181..d783f34 100644
--- a/flang/test/Lower/CUDA/cuda-set-allocator.cuf
+++ b/flang/test/Lower/CUDA/cuda-set-allocator.cuf
@@ -23,34 +23,44 @@ contains
subroutine sub2()
type(ty_device), pointer :: d1
+ allocate(d1)
end subroutine
! CHECK-LABEL: func.func @_QMm1Psub2()
! CHECK: %[[ALLOC:.*]] = cuf.alloc !fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>> {bindc_name = "d1", data_attr = #cuf.cuda<managed>, uniq_name = "_QMm1Fsub2Ed1"} -> !fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOC]] {data_attr = #cuf.cuda<managed>, fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QMm1Fsub2Ed1"} : (!fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>)
-! CHECK: %[[LOAD1:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
-! CHECK: %[[ADDR1:.*]] = fir.box_addr %[[LOAD1]] : (!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
-! CHECK: %[[DESIGNATE1:.*]] = hlfir.designate %[[ADDR1]]{"x"} : (!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: cuf.set_allocator_idx %[[DESIGNATE1]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
-! CHECK: %[[LOAD2:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
-! CHECK: %[[ADDR2:.*]] = fir.box_addr %[[LOAD2]] : (!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
-! CHECK: %[[DESIGNATE2:.*]] = hlfir.designate %[[ADDR2]]{"z"} : (!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: cuf.set_allocator_idx %[[DESIGNATE2]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
+! CHECK: cuf.allocate
+! CHECK: %[[LOAD:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
+! CHECK: %[[ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
+! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ADDR]], x : (!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.set_allocator_idx %[[COORD1]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
+! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ADDR]], z : (!fir.ptr<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.set_allocator_idx %[[COORD2]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
subroutine sub3()
type(ty_device), allocatable :: d1
+ allocate(d1)
end subroutine
! CHECK-LABEL: func.func @_QMm1Psub3()
! CHECK: %[[ALLOC:.*]] = cuf.alloc !fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>> {bindc_name = "d1", data_attr = #cuf.cuda<managed>, uniq_name = "_QMm1Fsub3Ed1"} -> !fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOC]] {data_attr = #cuf.cuda<managed>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QMm1Fsub3Ed1"} : (!fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>, !fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>)
-! CHECK: %[[LOAD1:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
-! CHECK: %[[ADDR1:.*]] = fir.box_addr %[[LOAD1]] : (!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
-! CHECK: %[[DESIGNATE1:.*]] = hlfir.designate %[[ADDR1]]{"x"} : (!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: cuf.set_allocator_idx %[[DESIGNATE1]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
-! CHECK: %[[LOAD2:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
-! CHECK: %[[ADDR2:.*]] = fir.box_addr %[[LOAD2]] : (!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
-! CHECK: %[[DESIGNATE2:.*]] = hlfir.designate %[[ADDR2]]{"z"} : (!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: cuf.set_allocator_idx %[[DESIGNATE2]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
+! CHECK: cuf.allocate
+! CHECK: %[[LOAD:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>>
+! CHECK: %[[ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
+! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ADDR]], x : (!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.set_allocator_idx %[[COORD1]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
+! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ADDR]], z : (!fir.heap<!fir.type<_QMm1Tty_device{x:!fir.box<!fir.heap<!fir.array<?xi32>>>,y:i32,z:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.set_allocator_idx %[[COORD2]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {data_attr = #cuf.cuda<device>}
+
+ subroutine sub4()
+ type(ty_device), allocatable :: d1(:,:)
+ allocate(d1(10, 10))
+ end subroutine
+
+! CHECK-LABEL: func.func @_QMm1Psub4()
+! CHECK: cuf.allocate
+! CHECK-COUNT-2: fir.do_loop
+! CHECK-COUNT-2: cuf.set_allocator_idx
end module
diff --git a/flang/test/Lower/do_loop_unstructured.f90 b/flang/test/Lower/do_loop_unstructured.f90
index d8890b2..176ea5c 100644
--- a/flang/test/Lower/do_loop_unstructured.f90
+++ b/flang/test/Lower/do_loop_unstructured.f90
@@ -232,3 +232,22 @@ end subroutine
! CHECK: cf.br ^[[HEADER]]
! CHECK: ^[[EXIT]]:
! CHECK: return
+
+subroutine unstructured_do_concurrent
+ logical :: success
+ do concurrent (i=1:10) local(success)
+ error stop "fail"
+ enddo
+end
+! CHECK-LABEL: func.func @_QPunstructured_do_concurrent
+! CHECK: %[[ITER_VAR:.*]] = fir.alloca i32
+
+! CHECK: ^[[HEADER]]:
+! CHECK: %{{.*}} = fir.load %[[ITER_VAR]] : !fir.ref<i32>
+! CHECK: cf.cond_br %{{.*}}, ^[[BODY:.*]], ^[[EXIT:.*]]
+
+! CHECK: ^[[BODY]]:
+! CHECK-NEXT: %{{.*}} = fir.alloca !fir.logical<4> {bindc_name = "success", {{.*}}}
+
+! CHECK: ^[[EXIT]]:
+! CHECK-NEXT: return
diff --git a/flang/test/Parser/OpenMP/critical-unparse-with-symbols.f90 b/flang/test/Parser/OpenMP/critical-unparse-with-symbols.f90
index 4d0d93a..e5e7561 100644
--- a/flang/test/Parser/OpenMP/critical-unparse-with-symbols.f90
+++ b/flang/test/Parser/OpenMP/critical-unparse-with-symbols.f90
@@ -13,9 +13,9 @@ end
!UNPARSE: implicit none
!UNPARSE: !DEF: /f/x ObjectEntity INTEGER(4)
!UNPARSE: integer x
-!UNPARSE: !$omp critical (c)
+!UNPARSE: !$omp critical(c)
!UNPARSE: !REF: /f/x
!UNPARSE: x = 0
-!UNPARSE: !$omp end critical (c)
+!UNPARSE: !$omp end critical(c)
!UNPARSE: end subroutine
diff --git a/flang/test/Semantics/OpenMP/sync-critical01.f90 b/flang/test/Semantics/OpenMP/sync-critical01.f90
index b597eb1..01cc0ac 100644
--- a/flang/test/Semantics/OpenMP/sync-critical01.f90
+++ b/flang/test/Semantics/OpenMP/sync-critical01.f90
@@ -17,22 +17,22 @@ integer function timer_tick_sec()
!$OMP CRITICAL (foo)
t = t + 1
- !ERROR: CRITICAL directive names do not match
+ !ERROR: The names on CRITICAL and END CRITICAL must match
!$OMP END CRITICAL (bar)
!$OMP CRITICAL (bar)
t = t + 1
- !ERROR: CRITICAL directive names do not match
+ !ERROR: The names on CRITICAL and END CRITICAL must match
!$OMP END CRITICAL (foo)
- !ERROR: CRITICAL directive names do not match
+ !ERROR: Either both CRITICAL and END CRITICAL should have an argument, or none of them should
!$OMP CRITICAL (bar)
t = t + 1
!$OMP END CRITICAL
!$OMP CRITICAL
t = t + 1
- !ERROR: CRITICAL directive names do not match
+ !ERROR: Either both CRITICAL and END CRITICAL should have an argument, or none of them should
!$OMP END CRITICAL (foo)
timer_tick_sec = t
diff --git a/flang/test/Semantics/OpenMP/sync-critical02.f90 b/flang/test/Semantics/OpenMP/sync-critical02.f90
index 1fa9d6a..b77bd66 100644
--- a/flang/test/Semantics/OpenMP/sync-critical02.f90
+++ b/flang/test/Semantics/OpenMP/sync-critical02.f90
@@ -8,7 +8,7 @@
program sample
use omp_lib
integer i, j
- !ERROR: Hint clause other than omp_sync_hint_none cannot be specified for an unnamed CRITICAL directive
+ !ERROR: When HINT other than 'omp_sync_hint_none' is present, CRITICAL directive should have a name
!$omp critical hint(omp_lock_hint_speculative)
j = j + 1
!$omp end critical
@@ -17,7 +17,7 @@ program sample
i = i - 1
!$omp end critical (foo)
- !ERROR: Hint clause other than omp_sync_hint_none cannot be specified for an unnamed CRITICAL directive
+ !ERROR: When HINT other than 'omp_sync_hint_none' is present, CRITICAL directive should have a name
!$omp critical hint(omp_lock_hint_nonspeculative)
j = j + 1
!$omp end critical
@@ -26,7 +26,7 @@ program sample
i = i - 1
!$omp end critical (foo)
- !ERROR: Hint clause other than omp_sync_hint_none cannot be specified for an unnamed CRITICAL directive
+ !ERROR: When HINT other than 'omp_sync_hint_none' is present, CRITICAL directive should have a name
!$omp critical hint(omp_lock_hint_contended)
j = j + 1
!$omp end critical
@@ -35,7 +35,7 @@ program sample
i = i - 1
!$omp end critical (foo)
- !ERROR: Hint clause other than omp_sync_hint_none cannot be specified for an unnamed CRITICAL directive
+ !ERROR: When HINT other than 'omp_sync_hint_none' is present, CRITICAL directive should have a name
!$omp critical hint(omp_lock_hint_uncontended)
j = j + 1
!$omp end critical
diff --git a/flang/test/Transforms/stack-arrays-lifetime.fir b/flang/test/Transforms/stack-arrays-lifetime.fir
index 5b2faeb..960ce9f 100644
--- a/flang/test/Transforms/stack-arrays-lifetime.fir
+++ b/flang/test/Transforms/stack-arrays-lifetime.fir
@@ -39,15 +39,15 @@ func.func @_QPcst_alloca(%arg0: !fir.ref<!fir.array<100000xf32>> {fir.bindc_name
// CHECK-DAG: %[[VAL_0:.*]] = fir.alloca !fir.array<100000xf32> {bindc_name = ".tmp.array", fir.has_lifetime}
// CHECK-DAG: %[[VAL_2:.*]] = fir.alloca !fir.array<100000xi32> {bindc_name = ".tmp.array", fir.has_lifetime}
// CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.array<100000xf32>>) -> !llvm.ptr
-// CHECK: llvm.intr.lifetime.start 400000, %[[VAL_9]] : !llvm.ptr
+// CHECK: llvm.intr.lifetime.start %[[VAL_9]] : !llvm.ptr
// CHECK: fir.do_loop
// CHECK: fir.call @_QPbar(
-// CHECK: llvm.intr.lifetime.end 400000, %[[VAL_9]] : !llvm.ptr
+// CHECK: llvm.intr.lifetime.end %[[VAL_9]] : !llvm.ptr
// CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_2]] : (!fir.ref<!fir.array<100000xi32>>) -> !llvm.ptr
-// CHECK: llvm.intr.lifetime.start 400000, %[[VAL_17]] : !llvm.ptr
+// CHECK: llvm.intr.lifetime.start %[[VAL_17]] : !llvm.ptr
// CHECK: fir.do_loop
// CHECK: fir.call @_QPibar(
-// CHECK: llvm.intr.lifetime.end 400000, %[[VAL_17]] : !llvm.ptr
+// CHECK: llvm.intr.lifetime.end %[[VAL_17]] : !llvm.ptr
func.func @_QPdyn_alloca(%arg0: !fir.ref<!fir.array<?xf32>> {fir.bindc_name = "x"}, %arg1: !fir.ref<i64> {fir.bindc_name = "n"}) {
diff --git a/flang/tools/flang-driver/driver.cpp b/flang/tools/flang-driver/driver.cpp
index 8321b16..bd878b7 100644
--- a/flang/tools/flang-driver/driver.cpp
+++ b/flang/tools/flang-driver/driver.cpp
@@ -16,6 +16,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Driver.h"
+#include "flang/Config/config.h"
#include "flang/Frontend/CompilerInvocation.h"
#include "flang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Basic/Diagnostic.h"
@@ -137,6 +138,7 @@ int main(int argc, const char **argv) {
llvm::sys::getDefaultTargetTriple(), diags,
"flang LLVM compiler");
theDriver.setTargetAndMode(targetandMode);
+ theDriver.setPreferredLinker(FLANG_DEFAULT_LINKER);
#ifdef FLANG_RUNTIME_F128_MATH_LIB
theDriver.setFlangF128MathLibrary(FLANG_RUNTIME_F128_MATH_LIB);
#endif
diff --git a/libc/config/baremetal/aarch64/entrypoints.txt b/libc/config/baremetal/aarch64/entrypoints.txt
index e24e2b9..82aa94b 100644
--- a/libc/config/baremetal/aarch64/entrypoints.txt
+++ b/libc/config/baremetal/aarch64/entrypoints.txt
@@ -757,9 +757,30 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LIBC_COMPILER_HAS_FIXED_POINT)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# stdfix.h _Fract and _Accum entrypoints
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 44e9c3e..603a974 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -760,9 +760,30 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LIBC_COMPILER_HAS_FIXED_POINT)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# stdfix.h _Fract and _Accum entrypoints
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index 29cf322a..5c28a12 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -760,9 +760,30 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LIBC_COMPILER_HAS_FIXED_POINT)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# stdfix.h _Fract and _Accum entrypoints
diff --git a/libc/config/darwin/aarch64/entrypoints.txt b/libc/config/darwin/aarch64/entrypoints.txt
index 03e00a3..e293db4 100644
--- a/libc/config/darwin/aarch64/entrypoints.txt
+++ b/libc/config/darwin/aarch64/entrypoints.txt
@@ -590,9 +590,30 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LIBC_COMPILER_HAS_FIXED_POINT)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# stdfix.h _Fract and _Accum entrypoints
diff --git a/libc/config/darwin/x86_64/entrypoints.txt b/libc/config/darwin/x86_64/entrypoints.txt
index 00cedab..22c7932 100644
--- a/libc/config/darwin/x86_64/entrypoints.txt
+++ b/libc/config/darwin/x86_64/entrypoints.txt
@@ -233,7 +233,20 @@ set(TARGET_LIBM_ENTRYPOINTS
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
set(TARGET_LLVMLIBC_ENTRYPOINTS
diff --git a/libc/config/gpu/amdgpu/entrypoints.txt b/libc/config/gpu/amdgpu/entrypoints.txt
index cf2b8c6..c466225 100644
--- a/libc/config/gpu/amdgpu/entrypoints.txt
+++ b/libc/config/gpu/amdgpu/entrypoints.txt
@@ -616,7 +616,20 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
set(TARGET_LLVMLIBC_ENTRYPOINTS
diff --git a/libc/config/gpu/nvptx/entrypoints.txt b/libc/config/gpu/nvptx/entrypoints.txt
index 55b27e6..30f3090 100644
--- a/libc/config/gpu/nvptx/entrypoints.txt
+++ b/libc/config/gpu/nvptx/entrypoints.txt
@@ -617,7 +617,20 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
set(TARGET_LLVMLIBC_ENTRYPOINTS
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index d76cdc2..154e1b1 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -844,9 +844,30 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LLVM_LIBC_FULL_BUILD)
list(APPEND TARGET_LIBC_ENTRYPOINTS
# assert.h entrypoints
diff --git a/libc/config/linux/arm/entrypoints.txt b/libc/config/linux/arm/entrypoints.txt
index 813c34d..7b177fc 100644
--- a/libc/config/linux/arm/entrypoints.txt
+++ b/libc/config/linux/arm/entrypoints.txt
@@ -460,7 +460,20 @@ set(TARGET_LIBM_ENTRYPOINTS
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
set(TARGET_LLVMLIBC_ENTRYPOINTS
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index 190aef7..5b0f90d 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -863,9 +863,30 @@ endif()
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LIBC_COMPILER_HAS_FIXED_POINT)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# stdfix.h _Fract and _Accum entrypoints
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index ec41069..66fa3c5 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -893,12 +893,32 @@ if(LIBC_TYPES_HAS_FLOAT128)
)
endif()
-
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
+if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C++23 mixed bfloat16 and _Float128 entrypoints
+ libc.src.math.bf16addf128
+ libc.src.math.bf16subf128
+ )
+endif()
+
if(LIBC_COMPILER_HAS_FIXED_POINT)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# stdfix.h _Fract and _Accum entrypoints
diff --git a/libc/config/windows/entrypoints.txt b/libc/config/windows/entrypoints.txt
index 3160d57..b4aa0a9 100644
--- a/libc/config/windows/entrypoints.txt
+++ b/libc/config/windows/entrypoints.txt
@@ -306,7 +306,20 @@ set(TARGET_LIBM_ENTRYPOINTS
list(APPEND TARGET_LIBM_ENTRYPOINTS
# bfloat16 entrypoints
+ libc.src.math.bf16add
+ libc.src.math.bf16addf
+ libc.src.math.bf16addl
+ libc.src.math.bf16sub
+ libc.src.math.bf16subf
+ libc.src.math.bf16subl
+ libc.src.math.ceilbf16
libc.src.math.fabsbf16
+ libc.src.math.floorbf16
+ libc.src.math.fmaxbf16
+ libc.src.math.fminbf16
+ libc.src.math.roundbf16
+ libc.src.math.roundevenbf16
+ libc.src.math.truncbf16
)
set(TARGET_LLVMLIBC_ENTRYPOINTS
diff --git a/libc/include/dlfcn.yaml b/libc/include/dlfcn.yaml
index db2893a..bf17a11 100644
--- a/libc/include/dlfcn.yaml
+++ b/libc/include/dlfcn.yaml
@@ -46,7 +46,7 @@ enums:
standards:
- gnu
value: 2
- - name: RTLD_DI_CONFIGADDR,
+ - name: RTLD_DI_CONFIGADDR
standards:
- gnu
value: 3
@@ -127,5 +127,5 @@ functions:
- POSIX
return_type: int
arguments:
- - type: const void *
- - type: Dl_info *
+ - type: const void *__restrict
+ - type: Dl_info *__restrict
diff --git a/libc/shared/math.h b/libc/shared/math.h
index 3714f38..a5581ed 100644
--- a/libc/shared/math.h
+++ b/libc/shared/math.h
@@ -31,6 +31,8 @@
#include "math/atanhf.h"
#include "math/atanhf16.h"
#include "math/cbrt.h"
+#include "math/cbrtf.h"
+#include "math/cos.h"
#include "math/erff.h"
#include "math/exp.h"
#include "math/exp10.h"
diff --git a/libc/shared/math/cbrtf.h b/libc/shared/math/cbrtf.h
new file mode 100644
index 0000000..09b86be
--- /dev/null
+++ b/libc/shared/math/cbrtf.h
@@ -0,0 +1,23 @@
+//===-- Shared cbrtf function -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_SHARED_MATH_CBRTF_H
+#define LIBC_SHARED_MATH_CBRTF_H
+
+#include "shared/libc_common.h"
+#include "src/__support/math/cbrtf.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace shared {
+
+using math::cbrtf;
+
+} // namespace shared
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_SHARED_MATH_CBRTF_H
diff --git a/libc/shared/math/cos.h b/libc/shared/math/cos.h
new file mode 100644
index 0000000..c498550
--- /dev/null
+++ b/libc/shared/math/cos.h
@@ -0,0 +1,23 @@
+//===-- Shared cos function -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SHARED_MATH_COS_H
+#define LLVM_LIBC_SHARED_MATH_COS_H
+
+#include "shared/libc_common.h"
+#include "src/__support/math/cos.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace shared {
+
+using math::cos;
+
+} // namespace shared
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SHARED_MATH_COS_H
diff --git a/libc/shared/rpc.h b/libc/shared/rpc.h
index 7295efd..dac2a79 100644
--- a/libc/shared/rpc.h
+++ b/libc/shared/rpc.h
@@ -551,6 +551,9 @@ template <uint32_t opcode> RPC_ATTRS Client::Port Client::open() {
/// port if it has a pending receive operation
RPC_ATTRS rpc::optional<typename Server::Port>
Server::try_open(uint32_t lane_size, uint32_t start) {
+ if (rpc::get_lane_id() >= lane_size)
+ return rpc::nullopt;
+
// Perform a naive linear scan for a port that has a pending request.
for (uint32_t index = start; index < process.port_count; ++index) {
uint64_t lane_mask = rpc::get_lane_mask();
diff --git a/libc/src/__support/GPU/CMakeLists.txt b/libc/src/__support/GPU/CMakeLists.txt
index f8fdfeb..72a7879 100644
--- a/libc/src/__support/GPU/CMakeLists.txt
+++ b/libc/src/__support/GPU/CMakeLists.txt
@@ -9,6 +9,12 @@ add_header_library(
utils.h
)
+add_header_library(
+ fixedstack
+ HDRS
+ fixedstack.h
+)
+
add_object_library(
allocator
SRCS
@@ -23,4 +29,5 @@ add_object_library(
libc.src.__support.CPP.bit
libc.src.__support.CPP.new
.utils
+ .fixedstack
)
diff --git a/libc/src/__support/GPU/allocator.cpp b/libc/src/__support/GPU/allocator.cpp
index 250bebd..534a309 100644
--- a/libc/src/__support/GPU/allocator.cpp
+++ b/libc/src/__support/GPU/allocator.cpp
@@ -20,6 +20,7 @@
#include "src/__support/CPP/atomic.h"
#include "src/__support/CPP/bit.h"
#include "src/__support/CPP/new.h"
+#include "src/__support/GPU/fixedstack.h"
#include "src/__support/GPU/utils.h"
#include "src/__support/RPC/rpc_client.h"
#include "src/__support/threads/sleep.h"
@@ -39,6 +40,9 @@ constexpr static uint32_t MIN_ALIGNMENT = MIN_SIZE - 1;
// The number of times to attempt claiming an in-progress slab allocation.
constexpr static uint32_t MAX_TRIES = 1024;
+// The number of previously allocated slabs we will keep in memory.
+constexpr static uint32_t CACHED_SLABS = 8;
+
static_assert(!(ARRAY_SIZE & (ARRAY_SIZE - 1)), "Must be a power of two");
namespace impl {
@@ -185,20 +189,35 @@ struct Slab {
struct alignas(MIN_SIZE) Header {
uint32_t chunk_size;
uint32_t global_index;
+ uint32_t cached_chunk_size;
};
// Initialize the slab with its chunk size and index in the global table for
// use when freeing.
Slab(uint32_t chunk_size, uint32_t global_index) {
Header *header = reinterpret_cast<Header *>(memory);
+ header->cached_chunk_size = cpp::numeric_limits<uint32_t>::max();
header->chunk_size = chunk_size;
header->global_index = global_index;
}
+ // Reset the memory with a new index and chunk size, not thread safe.
+ Slab *reset(uint32_t chunk_size, uint32_t global_index) {
+ Header *header = reinterpret_cast<Header *>(memory);
+ header->cached_chunk_size = header->chunk_size;
+ header->chunk_size = chunk_size;
+ header->global_index = global_index;
+ return this;
+ }
+
// Set the necessary bitfield bytes to zero in parallel using many lanes. This
// must be called before the bitfield can be accessed safely, memory is not
// guaranteed to be zero initialized in the current implementation.
void initialize(uint64_t uniform) {
+ // If this is a re-used slab the memory is already set to zero.
+ if (get_cached_chunk_size() <= get_chunk_size())
+ return;
+
uint32_t size = (bitfield_bytes(get_chunk_size()) + sizeof(uint32_t) - 1) /
sizeof(uint32_t);
impl::uniform_memset(get_bitfield(), 0, size, uniform);
@@ -236,6 +255,11 @@ struct Slab {
return reinterpret_cast<const Header *>(memory)->chunk_size;
}
+ // Get the chunk size that was previously used.
+ uint32_t get_cached_chunk_size() const {
+ return reinterpret_cast<const Header *>(memory)->cached_chunk_size;
+ }
+
// Get the location in the memory where we will store the global index.
uint32_t get_global_index() const {
return reinterpret_cast<const Header *>(memory)->global_index;
@@ -337,6 +361,9 @@ struct Slab {
uint8_t memory[SLAB_SIZE];
};
+// A global cache of previously allocated slabs for efficient reuse.
+static FixedStack<Slab *, CACHED_SLABS> slab_cache;
+
/// A wait-free guard around a pointer resource to be created dynamically if
/// space is available and freed once there are no more users.
struct GuardPtr {
@@ -408,6 +435,11 @@ private:
reinterpret_cast<Slab *>(cpp::numeric_limits<uintptr_t>::max()),
cpp::MemoryOrder::RELAXED, cpp::MemoryOrder::RELAXED)) {
count = cpp::numeric_limits<uint32_t>::max();
+
+ Slab *cached = nullptr;
+ if (slab_cache.pop(cached))
+ return cached->reset(cpp::forward<Args>(args)...);
+
void *raw = impl::rpc_allocate(sizeof(Slab));
if (!raw)
return nullptr;
@@ -475,8 +507,10 @@ public:
if (gpu::get_lane_id() == uint32_t(cpp::countr_zero(mask)) &&
ref.release(cpp::popcount(mask))) {
Slab *p = ptr.load(cpp::MemoryOrder::RELAXED);
- p->~Slab();
- impl::rpc_free(p);
+ if (!slab_cache.push(p)) {
+ p->~Slab();
+ impl::rpc_free(p);
+ }
cpp::atomic_thread_fence(cpp::MemoryOrder::RELEASE);
ptr.store(nullptr, cpp::MemoryOrder::RELAXED);
}
diff --git a/libc/src/__support/GPU/fixedstack.h b/libc/src/__support/GPU/fixedstack.h
new file mode 100644
index 0000000..6ceaa2f
--- /dev/null
+++ b/libc/src/__support/GPU/fixedstack.h
@@ -0,0 +1,111 @@
+//===-- A lock-free data structure for a fixed capacity stack ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_FIXEDSTACK_H
+#define LLVM_LIBC_SRC___SUPPORT_GPU_FIXEDSTACK_H
+
+#include "src/__support/CPP/atomic.h"
+#include "src/__support/threads/sleep.h"
+
+#include <stdint.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+// A lock-free fixed size stack backed by an underlying array of data. It
+// supports push and pop operations in a completely lock-free manner.
+template <typename T, uint32_t CAPACITY> struct alignas(16) FixedStack {
+ // The index is stored as a 20-bit value and cannot index into any more.
+ static_assert(CAPACITY < 1024 * 1024, "Invalid buffer size");
+
+ // The head of the free and used stacks. Represents as a 20-bit index combined
+ // with a 44-bit ABA tag that is updated in a single atomic operation.
+ uint64_t free;
+ uint64_t used;
+
+ // The stack is a linked list of indices into the underlying data
+ uint32_t next[CAPACITY];
+ T data[CAPACITY];
+
+ // Get the 20-bit index into the underlying array from the head.
+ LIBC_INLINE static constexpr uint32_t get_node(uint64_t head) {
+ return static_cast<uint32_t>(head & 0xfffff);
+ }
+
+ // Increment the old ABA tag and merge it into the new index.
+ LIBC_INLINE static constexpr uint64_t make_head(uint64_t orig,
+ uint32_t node) {
+ return static_cast<uint64_t>(node) | (((orig >> 20ul) + 1ul) << 20ul);
+ }
+
+ // Attempts to pop data from the given stack by making it point to the next
+ // node. We repeatedly attempt to write to the head using compare-and-swap,
+ // expecting that it has not been changed by any other thread.
+ LIBC_INLINE uint32_t pop_impl(cpp::AtomicRef<uint64_t> head) {
+ uint64_t orig = head.load(cpp::MemoryOrder::RELAXED);
+
+ for (;;) {
+ if (get_node(orig) == CAPACITY)
+ return CAPACITY;
+
+ uint32_t node =
+ cpp::AtomicRef(next[get_node(orig)]).load(cpp::MemoryOrder::RELAXED);
+ if (head.compare_exchange_strong(orig, make_head(orig, node),
+ cpp::MemoryOrder::ACQUIRE,
+ cpp::MemoryOrder::RELAXED))
+ break;
+ }
+ return get_node(orig);
+ }
+
+ // Attempts to push data to the given stack by making it point to the new
+ // node. We repeatedly attempt to write to the head using compare-and-swap,
+ // expecting that it has not been changed by any other thread.
+ LIBC_INLINE uint32_t push_impl(cpp::AtomicRef<uint64_t> head, uint32_t node) {
+ uint64_t orig = head.load(cpp::MemoryOrder::RELAXED);
+ for (;;) {
+ next[node] = get_node(orig);
+ if (head.compare_exchange_strong(orig, make_head(orig, node),
+ cpp::MemoryOrder::RELEASE,
+ cpp::MemoryOrder::RELAXED))
+ break;
+ }
+ return get_node(head.load(cpp::MemoryOrder::RELAXED));
+ }
+
+public:
+ // Initialize the free stack to be full and the used stack to be empty. We use
+ // the capacity of the stack as a sentinel value.
+ LIBC_INLINE constexpr FixedStack() : free(0), used(CAPACITY), data{} {
+ for (uint32_t i = 0; i < CAPACITY; ++i)
+ next[i] = i + 1;
+ }
+
+ LIBC_INLINE bool push(const T &val) {
+ uint32_t node = pop_impl(cpp::AtomicRef(free));
+ if (node == CAPACITY)
+ return false;
+
+ data[node] = val;
+ push_impl(cpp::AtomicRef(used), node);
+ return true;
+ }
+
+ LIBC_INLINE bool pop(T &val) {
+ uint32_t node = pop_impl(cpp::AtomicRef(used));
+ if (node == CAPACITY)
+ return false;
+
+ val = data[node];
+ push_impl(cpp::AtomicRef(free), node);
+ return true;
+ }
+};
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_GPU_FIXEDSTACK_H
diff --git a/libc/src/__support/math/CMakeLists.txt b/libc/src/__support/math/CMakeLists.txt
index e1076ed..bf4db4e 100644
--- a/libc/src/__support/math/CMakeLists.txt
+++ b/libc/src/__support/math/CMakeLists.txt
@@ -347,6 +347,35 @@ add_header_library(
)
add_header_library(
+ cbrtf
+ HDRS
+ cbrtf.h
+ DEPENDS
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.macros.optimization
+)
+
+add_header_library(
+ cos
+ HDRS
+ cos.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.src.errno.errno
+ libc.src.__support.FPUtil.double_double
+ libc.src.__support.FPUtil.dyadic_float
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.math.range_reduction_double
+ libc.src.__support.math.sincos_eval
+ libc.src.__support.macros.optimization
+)
+
+
+add_header_library(
erff
HDRS
erff.h
@@ -602,3 +631,32 @@ add_header_library(
libc.src.__support.macros.optimization
libc.src.__support.macros.properties.cpu_features
)
+
+add_header_library(
+ range_reduction_double
+ HDRS
+ range_reduction_double_common.h
+ range_reduction_double_fma.h
+ range_reduction_double_nofma.h
+ DEPENDS
+ libc.src.__support.FPUtil.double_double
+ libc.src.__support.FPUtil.dyadic_float
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.fma
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.nearest_integer
+ libc.src.__support.common
+ libc.src.__support.integer_literals
+)
+
+add_header_library(
+ sincos_eval
+ HDRS
+ sincos_eval.h
+ DEPENDS
+ libc.src.__support.FPUtil.double_double
+ libc.src.__support.FPUtil.dyadic_float
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.integer_literals
+)
diff --git a/libc/src/__support/math/cbrtf.h b/libc/src/__support/math/cbrtf.h
new file mode 100644
index 0000000..f82892b
--- /dev/null
+++ b/libc/src/__support/math/cbrtf.h
@@ -0,0 +1,161 @@
+//===-- Implementation header for cbrtf -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_SRC___SUPPORT_MATH_CBRTF_H
+#define LIBC_SRC___SUPPORT_MATH_CBRTF_H
+
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static constexpr float cbrtf(float x) {
+ // Look up table for 2^(i/3) for i = 0, 1, 2.
+ constexpr double CBRT2[3] = {1.0, 0x1.428a2f98d728bp0, 0x1.965fea53d6e3dp0};
+
+ // Degree-7 polynomials approximation of ((1 + x)^(1/3) - 1)/x for 0 <= x <= 1
+ // generated by Sollya with:
+ // > for i from 0 to 15 do {
+ // P = fpminimax(((1 + x)^(1/3) - 1)/x, 6, [|D...|], [i/16, (i + 1)/16]);
+ // print("{", coeff(P, 0), ",", coeff(P, 1), ",", coeff(P, 2), ",",
+ // coeff(P, 3), ",", coeff(P, 4), ",", coeff(P, 5), ",",
+ // coeff(P, 6), "},");
+ // };
+ // Then (1 + x)^(1/3) ~ 1 + x * P(x).
+ constexpr double COEFFS[16][7] = {
+ {0x1.55555555554ebp-2, -0x1.c71c71c678c0cp-4, 0x1.f9add2776de81p-5,
+ -0x1.511e10aa964a7p-5, 0x1.ee44165937fa2p-6, -0x1.7c5c9e059345dp-6,
+ 0x1.047f75e0aff14p-6},
+ {0x1.5555554d1149ap-2, -0x1.c71c676fcb5bp-4, 0x1.f9ab127dc57ebp-5,
+ -0x1.50ea8fd1d4c15p-5, 0x1.e9d68f28ced43p-6, -0x1.60e0e1e661311p-6,
+ 0x1.716eca1d6e3bcp-7},
+ {0x1.5555546377d45p-2, -0x1.c71bc1c6d49d2p-4, 0x1.f9924cc0ed24dp-5,
+ -0x1.4fea3beb53b3bp-5, 0x1.de028a9a07b1bp-6, -0x1.3b090d2233524p-6,
+ 0x1.0aeca34893785p-7},
+ {0x1.55554dce9f649p-2, -0x1.c7188b34b98f8p-4, 0x1.f93e1af34af49p-5,
+ -0x1.4d9a06be75c63p-5, 0x1.cb943f4f68992p-6, -0x1.139a685a5e3c4p-6,
+ 0x1.88410674c6a5dp-8},
+ {0x1.5555347d211c3p-2, -0x1.c70f2a4b1a5fap-4, 0x1.f88420e8602c3p-5,
+ -0x1.49becfa4ed3ep-5, 0x1.b475cd9013162p-6, -0x1.dcfee1dd2f8efp-7,
+ 0x1.249bb51a1c498p-8},
+ {0x1.5554f01b33dbap-2, -0x1.c6facb929dbf1p-4, 0x1.f73fb7861252ep-5,
+ -0x1.4459a4a0071fap-5, 0x1.9a8df2b504fc2p-6, -0x1.9a7ce3006d06ep-7,
+ 0x1.ba9230918fa2ep-9},
+ {0x1.55545c695db5fp-2, -0x1.c6d6089f20275p-4, 0x1.f556e0ea80efp-5,
+ -0x1.3d91372d083f4p-5, 0x1.7f66cff331f4p-6, -0x1.606a562491737p-7,
+ 0x1.52e3e17c71069p-9},
+ {0x1.55534a879232ap-2, -0x1.c69b836998b84p-4, 0x1.f2bb26dac0e4cp-5,
+ -0x1.359eed43716d7p-5, 0x1.64218cd824fbcp-6, -0x1.2e703e2e091e8p-7,
+ 0x1.0677d9af6aad4p-9},
+ {0x1.5551836bb5494p-2, -0x1.c64658c15353bp-4, 0x1.ef68517451a6ep-5,
+ -0x1.2cc20a980dceep-5, 0x1.49843e0fad93ap-6, -0x1.03c59ccb68e54p-7,
+ 0x1.9ad325dc7adcbp-10},
+ {0x1.554ecacb0d035p-2, -0x1.c5d2664026ffcp-4, 0x1.eb624796ba809p-5,
+ -0x1.233803d19a535p-5, 0x1.300decb1c3c28p-6, -0x1.befe18031ec3dp-8,
+ 0x1.449f5ee175c69p-10},
+ {0x1.554ae1f5ae815p-2, -0x1.c53c6b14ff6b2p-4, 0x1.e6b2d5127bb5bp-5,
+ -0x1.19387336788a3p-5, 0x1.180955a6ab255p-6, -0x1.81696703ba369p-8,
+ 0x1.02cb36389bd79p-10},
+ {0x1.55458a59f356ep-2, -0x1.c4820dd631ae9p-4, 0x1.e167af818bd15p-5,
+ -0x1.0ef35f6f72e52p-5, 0x1.019c33b65e4ebp-6, -0x1.4d25bdd52d3a5p-8,
+ 0x1.a008ae91f5936p-11},
+ {0x1.553e878eafee1p-2, -0x1.c3a1d0b2a3db2p-4, 0x1.db90d8ed9f89bp-5,
+ -0x1.0490e20f1ae91p-5, 0x1.d9a5d1fc42fe3p-7, -0x1.20bf8227c2abfp-8,
+ 0x1.50f8174cdb6e9p-11},
+ {0x1.5535a0dedf1b1p-2, -0x1.c29afb8bd01a1p-4, 0x1.d53f6371c1e27p-5,
+ -0x1.f463209b433e2p-6, 0x1.b35222a17e44p-7, -0x1.f5efbf505e133p-9,
+ 0x1.12e0e94e8586dp-11},
+ {0x1.552aa25e57bfdp-2, -0x1.c16d811e4acadp-4, 0x1.ce8489b47aa51p-5,
+ -0x1.dfde7ff758ea8p-6, 0x1.901f43aac38c8p-7, -0x1.b581d07df5ad5p-9,
+ 0x1.c3726535f1fc6p-12},
+ {0x1.551d5d9b204d3p-2, -0x1.c019e328f8db1p-4, 0x1.c7710f44fc3cep-5,
+ -0x1.cbbbe25ea8ba4p-6, 0x1.6fe270088623dp-7, -0x1.7e6fc79733761p-9,
+ 0x1.75077abf18d84p-12},
+ };
+
+ using FloatBits = typename fputil::FPBits<float>;
+ using DoubleBits = typename fputil::FPBits<double>;
+
+ FloatBits x_bits(x);
+
+ uint32_t x_abs = x_bits.uintval() & 0x7fff'ffff;
+ uint32_t sign_bit = (x_bits.uintval() >> 31) << DoubleBits::EXP_LEN;
+
+ if (LIBC_UNLIKELY(x == 0.0f || x_abs >= 0x7f80'0000)) {
+ // x is 0, Inf, or NaN.
+ // Make sure it works for FTZ/DAZ modes.
+ return x + x;
+ }
+
+ double xd = static_cast<double>(x);
+ DoubleBits xd_bits(xd);
+
+ // When using biased exponent of x in double precision,
+ // x_e = real_exponent_of_x + 1023
+ // Then:
+ // x_e / 3 = real_exponent_of_x / 3 + 1023/3
+ // = real_exponent_of_x / 3 + 341
+ // So to make it the correct biased exponent of x^(1/3), we add
+ // 1023 - 341 = 682
+ // to the quotient x_e / 3.
+ unsigned x_e = static_cast<unsigned>(xd_bits.get_biased_exponent());
+ unsigned out_e = (x_e / 3 + 682) | sign_bit;
+ unsigned shift_e = x_e % 3;
+
+ // Set x_m = 2^(x_e % 3) * (1.mantissa)
+ uint64_t x_m = xd_bits.get_mantissa();
+ // Use the leading 4 bits for look up table
+ unsigned idx = static_cast<unsigned>(x_m >> (DoubleBits::FRACTION_LEN - 4));
+
+ x_m |= static_cast<uint64_t>(DoubleBits::EXP_BIAS)
+ << DoubleBits::FRACTION_LEN;
+
+ double x_reduced = DoubleBits(x_m).get_val();
+ double dx = x_reduced - 1.0;
+
+ double dx_sq = dx * dx;
+ double c0 = fputil::multiply_add(dx, COEFFS[idx][0], 1.0);
+ double c1 = fputil::multiply_add(dx, COEFFS[idx][2], COEFFS[idx][1]);
+ double c2 = fputil::multiply_add(dx, COEFFS[idx][4], COEFFS[idx][3]);
+ double c3 = fputil::multiply_add(dx, COEFFS[idx][6], COEFFS[idx][5]);
+
+ double dx_4 = dx_sq * dx_sq;
+ double p0 = fputil::multiply_add(dx_sq, c1, c0);
+ double p1 = fputil::multiply_add(dx_sq, c3, c2);
+
+ double r = fputil::multiply_add(dx_4, p1, p0) * CBRT2[shift_e];
+
+ uint64_t r_m = DoubleBits(r).get_mantissa();
+ // Check if the output is exact. To be exact, the smallest 1-bit of the
+ // output has to be at least 2^-7 or higher. So we check the lowest 44 bits
+ // to see if they are within 2^(-52 + 3) errors from all zeros, then the
+ // result cube root is exact.
+ if (LIBC_UNLIKELY(((r_m + 8) & 0xfffffffffff) <= 16)) {
+ if ((r_m & 0xfffffffffff) <= 8)
+ r_m &= 0xffff'ffff'ffff'ffe0;
+ else
+ r_m = (r_m & 0xffff'ffff'ffff'ffe0) + 0x20;
+ fputil::clear_except_if_required(FE_INEXACT);
+ }
+ // Adjust exponent and sign.
+ uint64_t r_bits =
+ r_m | (static_cast<uint64_t>(out_e) << DoubleBits::FRACTION_LEN);
+
+ return static_cast<float>(DoubleBits(r_bits).get_val());
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_SRC___SUPPORT_MATH_CBRTF_H
diff --git a/libc/src/__support/math/cos.h b/libc/src/__support/math/cos.h
new file mode 100644
index 0000000..0802f9e
--- /dev/null
+++ b/libc/src/__support/math/cos.h
@@ -0,0 +1,173 @@
+//===-- Implementation header for cos ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_SRC___SUPPORT_MATH_COS_H
+#define LIBC_SRC___SUPPORT_MATH_COS_H
+
+#include "range_reduction_double_common.h"
+#include "sincos_eval.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/double_double.h"
+#include "src/__support/FPUtil/dyadic_float.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
+
+#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
+#include "range_reduction_double_fma.h"
+#else
+#include "range_reduction_double_nofma.h"
+#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static constexpr double cos(double x) {
+ using namespace range_reduction_double_internal;
+ using DoubleDouble = fputil::DoubleDouble;
+ using FPBits = typename fputil::FPBits<double>;
+ FPBits xbits(x);
+
+ uint16_t x_e = xbits.get_biased_exponent();
+
+ DoubleDouble y;
+ unsigned k = 0;
+ LargeRangeReduction range_reduction_large;
+
+ // |x| < 2^16.
+ if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT)) {
+ // |x| < 2^-7
+ if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 7)) {
+ // |x| < 2^-27
+ if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 27)) {
+ // Signed zeros.
+ if (LIBC_UNLIKELY(x == 0.0))
+ return 1.0;
+
+ // For |x| < 2^-27, |cos(x) - 1| < |x|^2/2 < 2^-54 = ulp(1 - 2^-53)/2.
+ return fputil::round_result_slightly_down(1.0);
+ }
+ // No range reduction needed.
+ k = 0;
+ y.lo = 0.0;
+ y.hi = x;
+ } else {
+ // Small range reduction.
+ k = range_reduction_small(x, y);
+ }
+ } else {
+ // Inf or NaN
+ if (LIBC_UNLIKELY(x_e > 2 * FPBits::EXP_BIAS)) {
+ if (xbits.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+ // cos(+-Inf) = NaN
+ if (xbits.get_mantissa() == 0) {
+ fputil::set_errno_if_required(EDOM);
+ fputil::raise_except_if_required(FE_INVALID);
+ }
+ return x + FPBits::quiet_nan().get_val();
+ }
+
+ // Large range reduction.
+ k = range_reduction_large.fast(x, y);
+ }
+
+ DoubleDouble sin_y, cos_y;
+
+ [[maybe_unused]] double err =
+ math::sincos_eval_internal::sincos_eval(y, sin_y, cos_y);
+
+ // Look up sin(k * pi/128) and cos(k * pi/128)
+#ifdef LIBC_MATH_HAS_SMALL_TABLES
+ // Memory saving versions. Use 65-entry table.
+ auto get_idx_dd = [](unsigned kk) -> DoubleDouble {
+ unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
+ DoubleDouble ans = SIN_K_PI_OVER_128[idx];
+ if (kk & 128) {
+ ans.hi = -ans.hi;
+ ans.lo = -ans.lo;
+ }
+ return ans;
+ };
+ DoubleDouble msin_k = get_idx_dd(k + 128);
+ DoubleDouble cos_k = get_idx_dd(k + 64);
+#else
+ // Fast look up version, but needs 256-entry table.
+ // -sin(k * pi/128) = sin((k + 128) * pi/128)
+ // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
+ DoubleDouble msin_k = SIN_K_PI_OVER_128[(k + 128) & 255];
+ DoubleDouble cos_k = SIN_K_PI_OVER_128[(k + 64) & 255];
+#endif // LIBC_MATH_HAS_SMALL_TABLES
+
+ // After range reduction, k = round(x * 128 / pi) and y = x - k * (pi / 128).
+ // So k is an integer and -pi / 256 <= y <= pi / 256.
+ // Then cos(x) = cos((k * pi/128 + y)
+ // = cos(y) * cos(k*pi/128) - sin(y) * sin(k*pi/128)
+ DoubleDouble cos_k_cos_y = fputil::quick_mult(cos_y, cos_k);
+ DoubleDouble msin_k_sin_y = fputil::quick_mult(sin_y, msin_k);
+
+ DoubleDouble rr = fputil::exact_add<false>(cos_k_cos_y.hi, msin_k_sin_y.hi);
+ rr.lo += msin_k_sin_y.lo + cos_k_cos_y.lo;
+
+#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ return rr.hi + rr.lo;
+#else
+ using Float128 = typename fputil::DyadicFloat<128>;
+ double rlp = rr.lo + err;
+ double rlm = rr.lo - err;
+
+ double r_upper = rr.hi + rlp; // (rr.lo + ERR);
+ double r_lower = rr.hi + rlm; // (rr.lo - ERR);
+
+ // Ziv's rounding test.
+ if (LIBC_LIKELY(r_upper == r_lower))
+ return r_upper;
+
+ Float128 u_f128, sin_u, cos_u;
+ if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT))
+ u_f128 = range_reduction_small_f128(x);
+ else
+ u_f128 = range_reduction_large.accurate();
+
+ math::sincos_eval_internal::sincos_eval(u_f128, sin_u, cos_u);
+
+ auto get_sin_k = [](unsigned kk) -> Float128 {
+ unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
+ Float128 ans = SIN_K_PI_OVER_128_F128[idx];
+ if (kk & 128)
+ ans.sign = Sign::NEG;
+ return ans;
+ };
+
+ // -sin(k * pi/128) = sin((k + 128) * pi/128)
+ // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
+ Float128 msin_k_f128 = get_sin_k(k + 128);
+ Float128 cos_k_f128 = get_sin_k(k + 64);
+
+ // cos(x) = cos((k * pi/128 + u)
+ // = cos(u) * cos(k*pi/128) - sin(u) * sin(k*pi/128)
+ Float128 r = fputil::quick_add(fputil::quick_mul(cos_k_f128, cos_u),
+ fputil::quick_mul(msin_k_f128, sin_u));
+
+ // TODO: Add assertion if Ziv's accuracy tests fail in debug mode.
+ // https://github.com/llvm/llvm-project/issues/96452.
+
+ return static_cast<double>(r);
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_SRC___SUPPORT_MATH_COS_H
diff --git a/libc/src/math/generic/range_reduction_double_common.h b/libc/src/__support/math/range_reduction_double_common.h
index a93ee25..a12c25d 100644
--- a/libc/src/math/generic/range_reduction_double_common.h
+++ b/libc/src/__support/math/range_reduction_double_common.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_COMMON_H
-#define LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_COMMON_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_COMMON_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_COMMON_H
#include "src/__support/FPUtil/double_double.h"
#include "src/__support/FPUtil/dyadic_float.h"
@@ -20,6 +20,10 @@
namespace LIBC_NAMESPACE_DECL {
+namespace math {
+
+namespace range_reduction_double_internal {
+
#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
static constexpr unsigned SPLIT = fputil::DefaultSplit<double>::VALUE;
#else
@@ -40,7 +44,7 @@ using Float128 = LIBC_NAMESPACE::fputil::DyadicFloat<128>;
// Error bound:
// |(x - k * pi/128) - (u_hi + u_lo)| <= max(ulp(ulp(u_hi)), 2^-119)
// <= 2^-111.
-LIBC_INLINE unsigned range_reduction_small(double x, DoubleDouble &u) {
+LIBC_INLINE static unsigned range_reduction_small(double x, DoubleDouble &u) {
// Values of -pi/128 used for inputs with absolute value <= 2^16.
// The first 3 parts are generated with (53 - 21 = 32)-bit precision, so that
// the product k * MPI_OVER_128[i] is exact.
@@ -267,13 +271,15 @@ struct LargeRangeReduction {
}
#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ LIBC_INLINE LargeRangeReduction() = default;
+
private:
// Index of x in the look-up table ONE_TWENTY_EIGHT_OVER_PI.
- unsigned idx;
+ unsigned idx = 0;
// x scaled down by 2^(-16 *(idx - 3))).
- double x_reduced;
+ double x_reduced = 0;
// Parts of (x * 128/pi) mod 1.
- double y_hi, y_lo;
+ double y_hi = 0, y_lo = 0;
DoubleDouble y_mid;
};
@@ -369,6 +375,10 @@ static constexpr Float128 SIN_K_PI_OVER_128_F128[65] = {
};
#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+} // namespace range_reduction_double_internal
+
+} // namespace math
+
} // namespace LIBC_NAMESPACE_DECL
-#endif // LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_COMMON_H
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_COMMON_H
diff --git a/libc/src/math/generic/range_reduction_double_fma.h b/libc/src/__support/math/range_reduction_double_fma.h
index 160fb24..7fa3e40 100644
--- a/libc/src/math/generic/range_reduction_double_fma.h
+++ b/libc/src/__support/math/range_reduction_double_fma.h
@@ -6,20 +6,22 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_FMA_H
-#define LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_FMA_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_FMA_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_FMA_H
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/double_double.h"
#include "src/__support/FPUtil/multiply_add.h"
#include "src/__support/FPUtil/nearest_integer.h"
-#include "src/__support/common.h"
#include "src/__support/macros/config.h"
-#include "src/__support/macros/optimization.h"
-#include "src/math/generic/range_reduction_double_common.h"
+#include "src/__support/math/range_reduction_double_common.h"
namespace LIBC_NAMESPACE_DECL {
+namespace math {
+
+namespace range_reduction_double_internal {
+
using LIBC_NAMESPACE::fputil::DoubleDouble;
LIBC_INLINE unsigned LargeRangeReduction::fast(double x, DoubleDouble &u) {
@@ -341,6 +343,10 @@ LIBC_INLINE constexpr DoubleDouble SIN_K_PI_OVER_128[] = {
#endif // !LIBC_MATH_HAS_SMALL_TABLES
};
+} // namespace range_reduction_double_internal
+
+} // namespace math
+
} // namespace LIBC_NAMESPACE_DECL
-#endif // LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_FMA_H
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_FMA_H
diff --git a/libc/src/math/generic/range_reduction_double_nofma.h b/libc/src/__support/math/range_reduction_double_nofma.h
index 9d13d24..3990b9b 100644
--- a/libc/src/math/generic/range_reduction_double_nofma.h
+++ b/libc/src/__support/math/range_reduction_double_nofma.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_NOFMA_H
-#define LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_NOFMA_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_NOFMA_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_NOFMA_H
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/double_double.h"
@@ -16,10 +16,14 @@
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h"
-#include "src/math/generic/range_reduction_double_common.h"
+#include "src/__support/math/range_reduction_double_common.h"
namespace LIBC_NAMESPACE_DECL {
+namespace math {
+
+namespace range_reduction_double_internal {
+
using fputil::DoubleDouble;
LIBC_INLINE unsigned LargeRangeReduction::fast(double x, DoubleDouble &u) {
@@ -342,6 +346,10 @@ LIBC_INLINE constexpr DoubleDouble SIN_K_PI_OVER_128[] = {
#endif // !LIBC_MATH_HAS_SMALL_TABLES
};
+} // namespace range_reduction_double_internal
+
+} // namespace math
+
} // namespace LIBC_NAMESPACE_DECL
-#endif // LLVM_LIBC_SRC_MATH_GENERIC_RANGE_REDUCTION_DOUBLE_NOFMA_H
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_RANGE_REDUCTION_DOUBLE_NOFMA_H
diff --git a/libc/src/math/generic/sincos_eval.h b/libc/src/__support/math/sincos_eval.h
index 41a4c75..fc741af 100644
--- a/libc/src/math/generic/sincos_eval.h
+++ b/libc/src/__support/math/sincos_eval.h
@@ -18,7 +18,9 @@
namespace LIBC_NAMESPACE_DECL {
-namespace generic {
+namespace math {
+
+namespace sincos_eval_internal {
using fputil::DoubleDouble;
using Float128 = fputil::DyadicFloat<128>;
@@ -131,7 +133,9 @@ LIBC_INLINE void sincos_eval(const Float128 &u, Float128 &sin_u,
COS_COEFFS[6]);
}
-} // namespace generic
+} // namespace sincos_eval_internal
+
+} // namespace math
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/dlfcn/CMakeLists.txt b/libc/src/dlfcn/CMakeLists.txt
index 8ef0540..2ee3ac0 100644
--- a/libc/src/dlfcn/CMakeLists.txt
+++ b/libc/src/dlfcn/CMakeLists.txt
@@ -14,7 +14,6 @@ add_entrypoint_object(
dlerror.h
DEPENDS
libc.include.dlfcn
- libc.src.errno.errno
)
add_entrypoint_object(
@@ -25,7 +24,6 @@ add_entrypoint_object(
dlopen.h
DEPENDS
libc.include.dlfcn
- libc.src.errno.errno
)
add_entrypoint_object(
@@ -36,7 +34,6 @@ add_entrypoint_object(
dlsym.h
DEPENDS
libc.include.dlfcn
- libc.src.errno.errno
)
add_entrypoint_object(
@@ -47,7 +44,6 @@ add_entrypoint_object(
dlinfo.h
DEPENDS
libc.include.dlfcn
- libc.src.errno.errno
)
add_entrypoint_object(
@@ -58,5 +54,4 @@ add_entrypoint_object(
dladdr.h
DEPENDS
libc.include.dlfcn
- libc.src.errno.errno
)
diff --git a/libc/src/dlfcn/dladdr.cpp b/libc/src/dlfcn/dladdr.cpp
index 61490fd..3db68b4 100644
--- a/libc/src/dlfcn/dladdr.cpp
+++ b/libc/src/dlfcn/dladdr.cpp
@@ -14,7 +14,8 @@
namespace LIBC_NAMESPACE_DECL {
// TODO: https:// github.com/llvm/llvm-project/issues/97929
-LLVM_LIBC_FUNCTION(int, dladdr, (const void *addr, Dl_info *info)) {
+LLVM_LIBC_FUNCTION(int, dladdr,
+ (const void *__restrict addr, Dl_info *__restrict info)) {
return -1;
}
diff --git a/libc/src/dlfcn/dladdr.h b/libc/src/dlfcn/dladdr.h
index 346fc8d..abbc9a9 100644
--- a/libc/src/dlfcn/dladdr.h
+++ b/libc/src/dlfcn/dladdr.h
@@ -13,7 +13,7 @@
namespace LIBC_NAMESPACE_DECL {
-int dladdr(const void *, Dl_info *);
+int dladdr(const void *__restrict, Dl_info *__restrict);
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/dlfcn/dlinfo.cpp b/libc/src/dlfcn/dlinfo.cpp
index d78cade..e1938d1 100644
--- a/libc/src/dlfcn/dlinfo.cpp
+++ b/libc/src/dlfcn/dlinfo.cpp
@@ -16,7 +16,8 @@ namespace LIBC_NAMESPACE_DECL {
// TODO: https://github.com/llvm/llvm-project/issues/149911
LLVM_LIBC_FUNCTION(int, dlinfo,
- (void *restrict handle, int request, void *restrict info)) {
+ (void *__restrict handle, int request,
+ void *__restrict info)) {
return -1;
}
diff --git a/libc/src/dlfcn/dlinfo.h b/libc/src/dlfcn/dlinfo.h
index c2c34f0..bc13152 100644
--- a/libc/src/dlfcn/dlinfo.h
+++ b/libc/src/dlfcn/dlinfo.h
@@ -13,7 +13,7 @@
namespace LIBC_NAMESPACE_DECL {
-int dlinfo(void *restrict, int, void *restrict);
+int dlinfo(void *__restrict, int, void *__restrict);
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/dlfcn/dlsym.cpp b/libc/src/dlfcn/dlsym.cpp
index c075c20..dc0da7d 100644
--- a/libc/src/dlfcn/dlsym.cpp
+++ b/libc/src/dlfcn/dlsym.cpp
@@ -14,6 +14,8 @@
namespace LIBC_NAMESPACE_DECL {
// TODO(@izaakschroeder): https://github.com/llvm/llvm-project/issues/97920
-LLVM_LIBC_FUNCTION(void *, dlsym, (void *, const char *)) { return nullptr; }
+LLVM_LIBC_FUNCTION(void *, dlsym, (void *__restrict, const char *__restrict)) {
+ return nullptr;
+}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/dlfcn/dlsym.h b/libc/src/dlfcn/dlsym.h
index 70c6ab3..f879792 100644
--- a/libc/src/dlfcn/dlsym.h
+++ b/libc/src/dlfcn/dlsym.h
@@ -13,7 +13,7 @@
namespace LIBC_NAMESPACE_DECL {
-void *dlsym(void *, const char *);
+void *dlsym(void *__restrict, const char *__restrict);
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 0522e0e..6b684f4 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -91,6 +91,7 @@ add_math_entrypoint_object(ceilf)
add_math_entrypoint_object(ceill)
add_math_entrypoint_object(ceilf16)
add_math_entrypoint_object(ceilf128)
+add_math_entrypoint_object(ceilbf16)
add_math_entrypoint_object(copysign)
add_math_entrypoint_object(copysignf)
@@ -214,6 +215,7 @@ add_math_entrypoint_object(floorf)
add_math_entrypoint_object(floorl)
add_math_entrypoint_object(floorf16)
add_math_entrypoint_object(floorf128)
+add_math_entrypoint_object(floorbf16)
add_math_entrypoint_object(fma)
add_math_entrypoint_object(fmaf)
@@ -224,12 +226,14 @@ add_math_entrypoint_object(fmaxf)
add_math_entrypoint_object(fmaxl)
add_math_entrypoint_object(fmaxf128)
add_math_entrypoint_object(fmaxf16)
+add_math_entrypoint_object(fmaxbf16)
add_math_entrypoint_object(fmin)
add_math_entrypoint_object(fminf)
add_math_entrypoint_object(fminl)
add_math_entrypoint_object(fminf128)
add_math_entrypoint_object(fminf16)
+add_math_entrypoint_object(fminbf16)
add_math_entrypoint_object(fmaximum)
add_math_entrypoint_object(fmaximumf)
@@ -463,12 +467,14 @@ add_math_entrypoint_object(roundf)
add_math_entrypoint_object(roundl)
add_math_entrypoint_object(roundf16)
add_math_entrypoint_object(roundf128)
+add_math_entrypoint_object(roundbf16)
add_math_entrypoint_object(roundeven)
add_math_entrypoint_object(roundevenf)
add_math_entrypoint_object(roundevenl)
add_math_entrypoint_object(roundevenf16)
add_math_entrypoint_object(roundevenf128)
+add_math_entrypoint_object(roundevenbf16)
add_math_entrypoint_object(scalbln)
add_math_entrypoint_object(scalblnf)
@@ -546,6 +552,7 @@ add_math_entrypoint_object(truncf)
add_math_entrypoint_object(truncl)
add_math_entrypoint_object(truncf16)
add_math_entrypoint_object(truncf128)
+add_math_entrypoint_object(truncbf16)
add_math_entrypoint_object(ufromfp)
add_math_entrypoint_object(ufromfpf)
@@ -558,3 +565,13 @@ add_math_entrypoint_object(ufromfpxf)
add_math_entrypoint_object(ufromfpxl)
add_math_entrypoint_object(ufromfpxf16)
add_math_entrypoint_object(ufromfpxf128)
+
+add_math_entrypoint_object(bf16add)
+add_math_entrypoint_object(bf16addf)
+add_math_entrypoint_object(bf16addl)
+add_math_entrypoint_object(bf16addf128)
+
+add_math_entrypoint_object(bf16sub)
+add_math_entrypoint_object(bf16subf)
+add_math_entrypoint_object(bf16subl)
+add_math_entrypoint_object(bf16subf128)
diff --git a/libc/src/math/bf16add.h b/libc/src/math/bf16add.h
new file mode 100644
index 0000000..a29970e
--- /dev/null
+++ b/libc/src/math/bf16add.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16add -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16ADD_H
+#define LLVM_LIBC_SRC_MATH_BF16ADD_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16add(double x, double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16ADD_H
diff --git a/libc/src/math/bf16addf.h b/libc/src/math/bf16addf.h
new file mode 100644
index 0000000..80a5e2a
--- /dev/null
+++ b/libc/src/math/bf16addf.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16addf ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16ADDF_H
+#define LLVM_LIBC_SRC_MATH_BF16ADDF_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16addf(float x, float y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16ADDF_H
diff --git a/libc/src/math/bf16addf128.h b/libc/src/math/bf16addf128.h
new file mode 100644
index 0000000..3c2f3a1
--- /dev/null
+++ b/libc/src/math/bf16addf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16addf128 -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16ADDF128_H
+#define LLVM_LIBC_SRC_MATH_BF16ADDF128_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16addf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16ADDF128_H
diff --git a/libc/src/math/bf16addl.h b/libc/src/math/bf16addl.h
new file mode 100644
index 0000000..a9e7d68
--- /dev/null
+++ b/libc/src/math/bf16addl.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16addl ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16ADDL_H
+#define LLVM_LIBC_SRC_MATH_BF16ADDL_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16addl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16ADDL_H
diff --git a/libc/src/math/bf16sub.h b/libc/src/math/bf16sub.h
new file mode 100644
index 0000000..8108e914
--- /dev/null
+++ b/libc/src/math/bf16sub.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16sub -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16SUB_H
+#define LLVM_LIBC_SRC_MATH_BF16SUB_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16sub(double x, double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16SUB_H
diff --git a/libc/src/math/bf16subf.h b/libc/src/math/bf16subf.h
new file mode 100644
index 0000000..1bd79bf
--- /dev/null
+++ b/libc/src/math/bf16subf.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16subf ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16SUBF_H
+#define LLVM_LIBC_SRC_MATH_BF16SUBF_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16subf(float x, float y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16SUBF_H
diff --git a/libc/src/math/bf16subf128.h b/libc/src/math/bf16subf128.h
new file mode 100644
index 0000000..19590e8
--- /dev/null
+++ b/libc/src/math/bf16subf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16subf128 -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16SUBF128_H
+#define LLVM_LIBC_SRC_MATH_BF16SUBF128_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16subf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16SUBF128_H
diff --git a/libc/src/math/bf16subl.h b/libc/src/math/bf16subl.h
new file mode 100644
index 0000000..13b2093
--- /dev/null
+++ b/libc/src/math/bf16subl.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for bf16subl ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_BF16SUBL_H
+#define LLVM_LIBC_SRC_MATH_BF16SUBL_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 bf16subl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_BF16SUBL_H
diff --git a/libc/src/math/ceilbf16.h b/libc/src/math/ceilbf16.h
new file mode 100644
index 0000000..bf70f25
--- /dev/null
+++ b/libc/src/math/ceilbf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for ceilbf16 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_CEILBF16_H
+#define LLVM_LIBC_SRC_MATH_CEILBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 ceilbf16(bfloat16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_CEILBF16_H
diff --git a/libc/src/math/floorbf16.h b/libc/src/math/floorbf16.h
new file mode 100644
index 0000000..9b5a30a
--- /dev/null
+++ b/libc/src/math/floorbf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for floorbf16 ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FLOORBF16_H
+#define LLVM_LIBC_SRC_MATH_FLOORBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 floorbf16(bfloat16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FLOORBF16_H
diff --git a/libc/src/math/fmaxbf16.h b/libc/src/math/fmaxbf16.h
new file mode 100644
index 0000000..bdbd14c
--- /dev/null
+++ b/libc/src/math/fmaxbf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fmaxbf16 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXBF16_H
+#define LLVM_LIBC_SRC_MATH_FMAXBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 fmaxbf16(bfloat16 x, bfloat16 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXBF16_H
diff --git a/libc/src/math/fminbf16.h b/libc/src/math/fminbf16.h
new file mode 100644
index 0000000..4c1ada9
--- /dev/null
+++ b/libc/src/math/fminbf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fminbf16 ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINBF16_H
+#define LLVM_LIBC_SRC_MATH_FMINBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 fminbf16(bfloat16 x, bfloat16 y);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_FMINBF16_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index a866195..c8a8c2b 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -157,6 +157,22 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ ceilbf16
+ SRCS
+ ceilbf16.cpp
+ HDRS
+ ../ceilbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ ROUND_OPT
+)
+
+add_entrypoint_object(
daddl
SRCS
daddl.cpp
@@ -276,23 +292,6 @@ add_header_library(
)
add_header_library(
- range_reduction_double
- HDRS
- range_reduction_double_common.h
- range_reduction_double_fma.h
- range_reduction_double_nofma.h
- DEPENDS
- libc.src.__support.FPUtil.double_double
- libc.src.__support.FPUtil.dyadic_float
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.fma
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.FPUtil.nearest_integer
- libc.src.__support.common
- libc.src.__support.integer_literals
-)
-
-add_header_library(
sincosf_utils
HDRS
sincosf_utils.h
@@ -313,18 +312,6 @@ add_header_library(
libc.src.__support.common
)
-add_header_library(
- sincos_eval
- HDRS
- sincos_eval.h
- DEPENDS
- libc.src.__support.FPUtil.double_double
- libc.src.__support.FPUtil.dyadic_float
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.FPUtil.polyeval
- libc.src.__support.integer_literals
-)
-
add_entrypoint_object(
cos
SRCS
@@ -332,16 +319,7 @@ add_entrypoint_object(
HDRS
../cos.h
DEPENDS
- .range_reduction_double
- .sincos_eval
- libc.hdr.errno_macros
- libc.src.errno.errno
- libc.src.__support.FPUtil.double_double
- libc.src.__support.FPUtil.dyadic_float
- libc.src.__support.FPUtil.except_value_utils
- libc.src.__support.FPUtil.fenv_impl
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.macros.optimization
+ libc.src.__support.math.cos
)
add_entrypoint_object(
@@ -420,8 +398,8 @@ add_entrypoint_object(
HDRS
../sin.h
DEPENDS
- .range_reduction_double
- .sincos_eval
+ libc.src.__support.math.range_reduction_double
+ libc.src.__support.math.sincos_eval
libc.hdr.errno_macros
libc.src.errno.errno
libc.src.__support.FPUtil.double_double
@@ -480,8 +458,8 @@ add_entrypoint_object(
HDRS
../sincos.h
DEPENDS
- .range_reduction_double
- .sincos_eval
+ libc.src.__support.math.range_reduction_double
+ libc.src.__support.math.sincos_eval
libc.hdr.errno_macros
libc.src.errno.errno
libc.src.__support.FPUtil.double_double
@@ -553,7 +531,7 @@ add_entrypoint_object(
HDRS
../tan.h
DEPENDS
- .range_reduction_double
+ libc.src.__support.math.range_reduction_double
libc.hdr.errno_macros
libc.src.errno.errno
libc.src.__support.FPUtil.double_double
@@ -802,6 +780,22 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ truncbf16
+ SRCS
+ truncbf16.cpp
+ HDRS
+ ../truncbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ ROUND_OPT
+)
+
+add_entrypoint_object(
floor
SRCS
floor.cpp
@@ -862,6 +856,22 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ floorbf16
+ SRCS
+ floorbf16.cpp
+ HDRS
+ ../floorbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ ROUND_OPT
+)
+
+add_entrypoint_object(
round
SRCS
round.cpp
@@ -922,6 +932,22 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ roundbf16
+ SRCS
+ roundbf16.cpp
+ HDRS
+ ../roundbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ ROUND_OPT
+)
+
+add_entrypoint_object(
roundeven
SRCS
roundeven.cpp
@@ -982,6 +1008,22 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ roundevenbf16
+ SRCS
+ roundevenbf16.cpp
+ HDRS
+ ../roundevenbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ ROUND_OPT
+)
+
+add_entrypoint_object(
lround
SRCS
lround.cpp
@@ -2281,6 +2323,21 @@ add_entrypoint_object(
MISC_MATH_BASIC_OPS_OPT
)
+add_entrypoint_object(
+ fminbf16
+ SRCS
+ fminbf16.cpp
+ HDRS
+ ../fminbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ MISC_MATH_BASIC_OPS_OPT
+)
add_entrypoint_object(
fmax
@@ -2341,6 +2398,22 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ fmaxbf16
+ SRCS
+ fmaxbf16.cpp
+ HDRS
+ ../fmaxbf16.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+ FLAGS
+ MISC_MATH_BASIC_OPS_OPT
+)
+
+add_entrypoint_object(
fmaximum
SRCS
fmaximum.cpp
@@ -4739,11 +4812,7 @@ add_entrypoint_object(
HDRS
../cbrtf.h
DEPENDS
- libc.hdr.fenv_macros
- libc.src.__support.FPUtil.fenv_impl
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.macros.optimization
+ libc.src.__support.math.cbrtf
)
add_entrypoint_object(
@@ -4835,3 +4904,116 @@ add_header_library(
libc.src.__support.math.expf16_utils
libc.src.__support.math.exp10_float16_constants
)
+
+add_entrypoint_object(
+ bf16add
+ SRCS
+ bf16add.cpp
+ HDRS
+ ../bf16add.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+add_entrypoint_object(
+ bf16addf
+ SRCS
+ bf16addf.cpp
+ HDRS
+ ../bf16addf.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+add_entrypoint_object(
+ bf16addl
+ SRCS
+ bf16addl.cpp
+ HDRS
+ ../bf16addl.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+add_entrypoint_object(
+ bf16addf128
+ SRCS
+ bf16addf128.cpp
+ HDRS
+ ../bf16addf128.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+
+add_entrypoint_object(
+ bf16sub
+ SRCS
+ bf16sub.cpp
+ HDRS
+ ../bf16sub.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+add_entrypoint_object(
+ bf16subf
+ SRCS
+ bf16subf.cpp
+ HDRS
+ ../bf16subf.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+add_entrypoint_object(
+ bf16subl
+ SRCS
+ bf16subl.cpp
+ HDRS
+ ../bf16subl.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
+
+add_entrypoint_object(
+ bf16subf128
+ SRCS
+ bf16subf128.cpp
+ HDRS
+ ../bf16subf128.h
+ DEPENDS
+ libc.src.__support.common
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.generic.add_sub
+ libc.src.__support.macros.config
+ libc.src.__support.macros.properties.types
+)
diff --git a/libc/src/math/generic/bf16add.cpp b/libc/src/math/generic/bf16add.cpp
new file mode 100644
index 0000000..257596a
--- /dev/null
+++ b/libc/src/math/generic/bf16add.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16add function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16add.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16add, (double x, double y)) {
+ return fputil::generic::add<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16addf.cpp b/libc/src/math/generic/bf16addf.cpp
new file mode 100644
index 0000000..65e6cbf
--- /dev/null
+++ b/libc/src/math/generic/bf16addf.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16addf function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16addf.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16addf, (float x, float y)) {
+ return fputil::generic::add<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16addf128.cpp b/libc/src/math/generic/bf16addf128.cpp
new file mode 100644
index 0000000..03f70af
--- /dev/null
+++ b/libc/src/math/generic/bf16addf128.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16addf128 function ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16addf128.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16addf128, (float128 x, float128 y)) {
+ return fputil::generic::add<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16addl.cpp b/libc/src/math/generic/bf16addl.cpp
new file mode 100644
index 0000000..c212195
--- /dev/null
+++ b/libc/src/math/generic/bf16addl.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16addl function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16addl.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16addl, (long double x, long double y)) {
+ return fputil::generic::add<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16sub.cpp b/libc/src/math/generic/bf16sub.cpp
new file mode 100644
index 0000000..65eb209
--- /dev/null
+++ b/libc/src/math/generic/bf16sub.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16sub function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16sub.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16sub, (double x, double y)) {
+ return fputil::generic::sub<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16subf.cpp b/libc/src/math/generic/bf16subf.cpp
new file mode 100644
index 0000000..6bba4be
--- /dev/null
+++ b/libc/src/math/generic/bf16subf.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16subf function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16subf.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16subf, (float x, float y)) {
+ return fputil::generic::sub<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16subf128.cpp b/libc/src/math/generic/bf16subf128.cpp
new file mode 100644
index 0000000..e5fe107
--- /dev/null
+++ b/libc/src/math/generic/bf16subf128.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16subf128 function ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16subf128.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16subf128, (float128 x, float128 y)) {
+ return fputil::generic::sub<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/bf16subl.cpp b/libc/src/math/generic/bf16subl.cpp
new file mode 100644
index 0000000..d3a970c
--- /dev/null
+++ b/libc/src/math/generic/bf16subl.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of bf16subl function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/bf16subl.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/FPUtil/generic/add_sub.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, bf16subl, (long double x, long double y)) {
+ return fputil::generic::sub<bfloat16>(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/cbrtf.cpp b/libc/src/math/generic/cbrtf.cpp
index 71b23c4..0bd8f71 100644
--- a/libc/src/math/generic/cbrtf.cpp
+++ b/libc/src/math/generic/cbrtf.cpp
@@ -7,153 +7,10 @@
//===----------------------------------------------------------------------===//
#include "src/math/cbrtf.h"
-#include "hdr/fenv_macros.h"
-#include "src/__support/FPUtil/FEnvImpl.h"
-#include "src/__support/FPUtil/FPBits.h"
-#include "src/__support/FPUtil/multiply_add.h"
-#include "src/__support/common.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+#include "src/__support/math/cbrtf.h"
namespace LIBC_NAMESPACE_DECL {
-namespace {
-
-// Look up table for 2^(i/3) for i = 0, 1, 2.
-constexpr double CBRT2[3] = {1.0, 0x1.428a2f98d728bp0, 0x1.965fea53d6e3dp0};
-
-// Degree-7 polynomials approximation of ((1 + x)^(1/3) - 1)/x for 0 <= x <= 1
-// generated by Sollya with:
-// > for i from 0 to 15 do {
-// P = fpminimax(((1 + x)^(1/3) - 1)/x, 6, [|D...|], [i/16, (i + 1)/16]);
-// print("{", coeff(P, 0), ",", coeff(P, 1), ",", coeff(P, 2), ",",
-// coeff(P, 3), ",", coeff(P, 4), ",", coeff(P, 5), ",",
-// coeff(P, 6), "},");
-// };
-// Then (1 + x)^(1/3) ~ 1 + x * P(x).
-constexpr double COEFFS[16][7] = {
- {0x1.55555555554ebp-2, -0x1.c71c71c678c0cp-4, 0x1.f9add2776de81p-5,
- -0x1.511e10aa964a7p-5, 0x1.ee44165937fa2p-6, -0x1.7c5c9e059345dp-6,
- 0x1.047f75e0aff14p-6},
- {0x1.5555554d1149ap-2, -0x1.c71c676fcb5bp-4, 0x1.f9ab127dc57ebp-5,
- -0x1.50ea8fd1d4c15p-5, 0x1.e9d68f28ced43p-6, -0x1.60e0e1e661311p-6,
- 0x1.716eca1d6e3bcp-7},
- {0x1.5555546377d45p-2, -0x1.c71bc1c6d49d2p-4, 0x1.f9924cc0ed24dp-5,
- -0x1.4fea3beb53b3bp-5, 0x1.de028a9a07b1bp-6, -0x1.3b090d2233524p-6,
- 0x1.0aeca34893785p-7},
- {0x1.55554dce9f649p-2, -0x1.c7188b34b98f8p-4, 0x1.f93e1af34af49p-5,
- -0x1.4d9a06be75c63p-5, 0x1.cb943f4f68992p-6, -0x1.139a685a5e3c4p-6,
- 0x1.88410674c6a5dp-8},
- {0x1.5555347d211c3p-2, -0x1.c70f2a4b1a5fap-4, 0x1.f88420e8602c3p-5,
- -0x1.49becfa4ed3ep-5, 0x1.b475cd9013162p-6, -0x1.dcfee1dd2f8efp-7,
- 0x1.249bb51a1c498p-8},
- {0x1.5554f01b33dbap-2, -0x1.c6facb929dbf1p-4, 0x1.f73fb7861252ep-5,
- -0x1.4459a4a0071fap-5, 0x1.9a8df2b504fc2p-6, -0x1.9a7ce3006d06ep-7,
- 0x1.ba9230918fa2ep-9},
- {0x1.55545c695db5fp-2, -0x1.c6d6089f20275p-4, 0x1.f556e0ea80efp-5,
- -0x1.3d91372d083f4p-5, 0x1.7f66cff331f4p-6, -0x1.606a562491737p-7,
- 0x1.52e3e17c71069p-9},
- {0x1.55534a879232ap-2, -0x1.c69b836998b84p-4, 0x1.f2bb26dac0e4cp-5,
- -0x1.359eed43716d7p-5, 0x1.64218cd824fbcp-6, -0x1.2e703e2e091e8p-7,
- 0x1.0677d9af6aad4p-9},
- {0x1.5551836bb5494p-2, -0x1.c64658c15353bp-4, 0x1.ef68517451a6ep-5,
- -0x1.2cc20a980dceep-5, 0x1.49843e0fad93ap-6, -0x1.03c59ccb68e54p-7,
- 0x1.9ad325dc7adcbp-10},
- {0x1.554ecacb0d035p-2, -0x1.c5d2664026ffcp-4, 0x1.eb624796ba809p-5,
- -0x1.233803d19a535p-5, 0x1.300decb1c3c28p-6, -0x1.befe18031ec3dp-8,
- 0x1.449f5ee175c69p-10},
- {0x1.554ae1f5ae815p-2, -0x1.c53c6b14ff6b2p-4, 0x1.e6b2d5127bb5bp-5,
- -0x1.19387336788a3p-5, 0x1.180955a6ab255p-6, -0x1.81696703ba369p-8,
- 0x1.02cb36389bd79p-10},
- {0x1.55458a59f356ep-2, -0x1.c4820dd631ae9p-4, 0x1.e167af818bd15p-5,
- -0x1.0ef35f6f72e52p-5, 0x1.019c33b65e4ebp-6, -0x1.4d25bdd52d3a5p-8,
- 0x1.a008ae91f5936p-11},
- {0x1.553e878eafee1p-2, -0x1.c3a1d0b2a3db2p-4, 0x1.db90d8ed9f89bp-5,
- -0x1.0490e20f1ae91p-5, 0x1.d9a5d1fc42fe3p-7, -0x1.20bf8227c2abfp-8,
- 0x1.50f8174cdb6e9p-11},
- {0x1.5535a0dedf1b1p-2, -0x1.c29afb8bd01a1p-4, 0x1.d53f6371c1e27p-5,
- -0x1.f463209b433e2p-6, 0x1.b35222a17e44p-7, -0x1.f5efbf505e133p-9,
- 0x1.12e0e94e8586dp-11},
- {0x1.552aa25e57bfdp-2, -0x1.c16d811e4acadp-4, 0x1.ce8489b47aa51p-5,
- -0x1.dfde7ff758ea8p-6, 0x1.901f43aac38c8p-7, -0x1.b581d07df5ad5p-9,
- 0x1.c3726535f1fc6p-12},
- {0x1.551d5d9b204d3p-2, -0x1.c019e328f8db1p-4, 0x1.c7710f44fc3cep-5,
- -0x1.cbbbe25ea8ba4p-6, 0x1.6fe270088623dp-7, -0x1.7e6fc79733761p-9,
- 0x1.75077abf18d84p-12},
-};
-
-} // anonymous namespace
-
-LLVM_LIBC_FUNCTION(float, cbrtf, (float x)) {
- using FloatBits = typename fputil::FPBits<float>;
- using DoubleBits = typename fputil::FPBits<double>;
-
- FloatBits x_bits(x);
-
- uint32_t x_abs = x_bits.uintval() & 0x7fff'ffff;
- uint32_t sign_bit = (x_bits.uintval() >> 31) << DoubleBits::EXP_LEN;
-
- if (LIBC_UNLIKELY(x == 0.0f || x_abs >= 0x7f80'0000)) {
- // x is 0, Inf, or NaN.
- // Make sure it works for FTZ/DAZ modes.
- return x + x;
- }
-
- double xd = static_cast<double>(x);
- DoubleBits xd_bits(xd);
-
- // When using biased exponent of x in double precision,
- // x_e = real_exponent_of_x + 1023
- // Then:
- // x_e / 3 = real_exponent_of_x / 3 + 1023/3
- // = real_exponent_of_x / 3 + 341
- // So to make it the correct biased exponent of x^(1/3), we add
- // 1023 - 341 = 682
- // to the quotient x_e / 3.
- unsigned x_e = static_cast<unsigned>(xd_bits.get_biased_exponent());
- unsigned out_e = (x_e / 3 + 682) | sign_bit;
- unsigned shift_e = x_e % 3;
-
- // Set x_m = 2^(x_e % 3) * (1.mantissa)
- uint64_t x_m = xd_bits.get_mantissa();
- // Use the leading 4 bits for look up table
- unsigned idx = static_cast<unsigned>(x_m >> (DoubleBits::FRACTION_LEN - 4));
-
- x_m |= static_cast<uint64_t>(DoubleBits::EXP_BIAS)
- << DoubleBits::FRACTION_LEN;
-
- double x_reduced = DoubleBits(x_m).get_val();
- double dx = x_reduced - 1.0;
-
- double dx_sq = dx * dx;
- double c0 = fputil::multiply_add(dx, COEFFS[idx][0], 1.0);
- double c1 = fputil::multiply_add(dx, COEFFS[idx][2], COEFFS[idx][1]);
- double c2 = fputil::multiply_add(dx, COEFFS[idx][4], COEFFS[idx][3]);
- double c3 = fputil::multiply_add(dx, COEFFS[idx][6], COEFFS[idx][5]);
-
- double dx_4 = dx_sq * dx_sq;
- double p0 = fputil::multiply_add(dx_sq, c1, c0);
- double p1 = fputil::multiply_add(dx_sq, c3, c2);
-
- double r = fputil::multiply_add(dx_4, p1, p0) * CBRT2[shift_e];
-
- uint64_t r_m = DoubleBits(r).get_mantissa();
- // Check if the output is exact. To be exact, the smallest 1-bit of the
- // output has to be at least 2^-7 or higher. So we check the lowest 44 bits
- // to see if they are within 2^(-52 + 3) errors from all zeros, then the
- // result cube root is exact.
- if (LIBC_UNLIKELY(((r_m + 8) & 0xfffffffffff) <= 16)) {
- if ((r_m & 0xfffffffffff) <= 8)
- r_m &= 0xffff'ffff'ffff'ffe0;
- else
- r_m = (r_m & 0xffff'ffff'ffff'ffe0) + 0x20;
- fputil::clear_except_if_required(FE_INEXACT);
- }
- // Adjust exponent and sign.
- uint64_t r_bits =
- r_m | (static_cast<uint64_t>(out_e) << DoubleBits::FRACTION_LEN);
-
- return static_cast<float>(DoubleBits(r_bits).get_val());
-}
+LLVM_LIBC_FUNCTION(float, cbrtf, (float x)) { return math::cbrtf(x); }
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/ceilbf16.cpp b/libc/src/math/generic/ceilbf16.cpp
new file mode 100644
index 0000000..441dcf0
--- /dev/null
+++ b/libc/src/math/generic/ceilbf16.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of ceilbf16 function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ceilbf16.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, ceilbf16, (bfloat16 x)) { return fputil::ceil(x); }
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/cos.cpp b/libc/src/math/generic/cos.cpp
index 5da0f868..aabf3bc 100644
--- a/libc/src/math/generic/cos.cpp
+++ b/libc/src/math/generic/cos.cpp
@@ -7,161 +7,10 @@
//===----------------------------------------------------------------------===//
#include "src/math/cos.h"
-#include "hdr/errno_macros.h"
-#include "src/__support/FPUtil/FEnvImpl.h"
-#include "src/__support/FPUtil/FPBits.h"
-#include "src/__support/FPUtil/double_double.h"
-#include "src/__support/FPUtil/dyadic_float.h"
-#include "src/__support/FPUtil/except_value_utils.h"
-#include "src/__support/common.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
-#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
-#include "src/math/generic/range_reduction_double_common.h"
-#include "src/math/generic/sincos_eval.h"
-
-#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
-#include "range_reduction_double_fma.h"
-#else
-#include "range_reduction_double_nofma.h"
-#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
+#include "src/__support/math/cos.h"
namespace LIBC_NAMESPACE_DECL {
-using DoubleDouble = fputil::DoubleDouble;
-using Float128 = typename fputil::DyadicFloat<128>;
-
-LLVM_LIBC_FUNCTION(double, cos, (double x)) {
- using FPBits = typename fputil::FPBits<double>;
- FPBits xbits(x);
-
- uint16_t x_e = xbits.get_biased_exponent();
-
- DoubleDouble y;
- unsigned k;
- LargeRangeReduction range_reduction_large{};
-
- // |x| < 2^16.
- if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT)) {
- // |x| < 2^-7
- if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 7)) {
- // |x| < 2^-27
- if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 27)) {
- // Signed zeros.
- if (LIBC_UNLIKELY(x == 0.0))
- return 1.0;
-
- // For |x| < 2^-27, |cos(x) - 1| < |x|^2/2 < 2^-54 = ulp(1 - 2^-53)/2.
- return fputil::round_result_slightly_down(1.0);
- }
- // No range reduction needed.
- k = 0;
- y.lo = 0.0;
- y.hi = x;
- } else {
- // Small range reduction.
- k = range_reduction_small(x, y);
- }
- } else {
- // Inf or NaN
- if (LIBC_UNLIKELY(x_e > 2 * FPBits::EXP_BIAS)) {
- if (xbits.is_signaling_nan()) {
- fputil::raise_except_if_required(FE_INVALID);
- return FPBits::quiet_nan().get_val();
- }
- // cos(+-Inf) = NaN
- if (xbits.get_mantissa() == 0) {
- fputil::set_errno_if_required(EDOM);
- fputil::raise_except_if_required(FE_INVALID);
- }
- return x + FPBits::quiet_nan().get_val();
- }
-
- // Large range reduction.
- k = range_reduction_large.fast(x, y);
- }
-
- DoubleDouble sin_y, cos_y;
-
- [[maybe_unused]] double err = generic::sincos_eval(y, sin_y, cos_y);
-
- // Look up sin(k * pi/128) and cos(k * pi/128)
-#ifdef LIBC_MATH_HAS_SMALL_TABLES
- // Memory saving versions. Use 65-entry table.
- auto get_idx_dd = [](unsigned kk) -> DoubleDouble {
- unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
- DoubleDouble ans = SIN_K_PI_OVER_128[idx];
- if (kk & 128) {
- ans.hi = -ans.hi;
- ans.lo = -ans.lo;
- }
- return ans;
- };
- DoubleDouble msin_k = get_idx_dd(k + 128);
- DoubleDouble cos_k = get_idx_dd(k + 64);
-#else
- // Fast look up version, but needs 256-entry table.
- // -sin(k * pi/128) = sin((k + 128) * pi/128)
- // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
- DoubleDouble msin_k = SIN_K_PI_OVER_128[(k + 128) & 255];
- DoubleDouble cos_k = SIN_K_PI_OVER_128[(k + 64) & 255];
-#endif // LIBC_MATH_HAS_SMALL_TABLES
-
- // After range reduction, k = round(x * 128 / pi) and y = x - k * (pi / 128).
- // So k is an integer and -pi / 256 <= y <= pi / 256.
- // Then cos(x) = cos((k * pi/128 + y)
- // = cos(y) * cos(k*pi/128) - sin(y) * sin(k*pi/128)
- DoubleDouble cos_k_cos_y = fputil::quick_mult(cos_y, cos_k);
- DoubleDouble msin_k_sin_y = fputil::quick_mult(sin_y, msin_k);
-
- DoubleDouble rr = fputil::exact_add<false>(cos_k_cos_y.hi, msin_k_sin_y.hi);
- rr.lo += msin_k_sin_y.lo + cos_k_cos_y.lo;
-
-#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
- return rr.hi + rr.lo;
-#else
-
- double rlp = rr.lo + err;
- double rlm = rr.lo - err;
-
- double r_upper = rr.hi + rlp; // (rr.lo + ERR);
- double r_lower = rr.hi + rlm; // (rr.lo - ERR);
-
- // Ziv's rounding test.
- if (LIBC_LIKELY(r_upper == r_lower))
- return r_upper;
-
- Float128 u_f128, sin_u, cos_u;
- if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT))
- u_f128 = range_reduction_small_f128(x);
- else
- u_f128 = range_reduction_large.accurate();
-
- generic::sincos_eval(u_f128, sin_u, cos_u);
-
- auto get_sin_k = [](unsigned kk) -> Float128 {
- unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
- Float128 ans = SIN_K_PI_OVER_128_F128[idx];
- if (kk & 128)
- ans.sign = Sign::NEG;
- return ans;
- };
-
- // -sin(k * pi/128) = sin((k + 128) * pi/128)
- // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
- Float128 msin_k_f128 = get_sin_k(k + 128);
- Float128 cos_k_f128 = get_sin_k(k + 64);
-
- // cos(x) = cos((k * pi/128 + u)
- // = cos(u) * cos(k*pi/128) - sin(u) * sin(k*pi/128)
- Float128 r = fputil::quick_add(fputil::quick_mul(cos_k_f128, cos_u),
- fputil::quick_mul(msin_k_f128, sin_u));
-
- // TODO: Add assertion if Ziv's accuracy tests fail in debug mode.
- // https://github.com/llvm/llvm-project/issues/96452.
-
- return static_cast<double>(r);
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-}
+LLVM_LIBC_FUNCTION(double, cos, (double x)) { return math::cos(x); }
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/floorbf16.cpp b/libc/src/math/generic/floorbf16.cpp
new file mode 100644
index 0000000..d157096
--- /dev/null
+++ b/libc/src/math/generic/floorbf16.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of floorbf16 function ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/floorbf16.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, floorbf16, (bfloat16 x)) {
+ return fputil::floor(x);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fmaxbf16.cpp b/libc/src/math/generic/fmaxbf16.cpp
new file mode 100644
index 0000000..01d395b
--- /dev/null
+++ b/libc/src/math/generic/fmaxbf16.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of fmaxbf16 function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaxbf16.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, fmaxbf16, (bfloat16 x, bfloat16 y)) {
+ return fputil::fmax(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/fminbf16.cpp b/libc/src/math/generic/fminbf16.cpp
new file mode 100644
index 0000000..c3e29ee
--- /dev/null
+++ b/libc/src/math/generic/fminbf16.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of fminbf16 function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminbf16.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, fminbf16, (bfloat16 x, bfloat16 y)) {
+ return fputil::fmin(x, y);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/roundbf16.cpp b/libc/src/math/generic/roundbf16.cpp
new file mode 100644
index 0000000..cc7e5e2
--- /dev/null
+++ b/libc/src/math/generic/roundbf16.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of roundbf16 function ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/roundbf16.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, roundbf16, (bfloat16 x)) {
+ return fputil::round(x);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/roundevenbf16.cpp b/libc/src/math/generic/roundevenbf16.cpp
new file mode 100644
index 0000000..39419e4
--- /dev/null
+++ b/libc/src/math/generic/roundevenbf16.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of roundevenbf16 function --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/roundevenbf16.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, roundevenbf16, (bfloat16 x)) {
+ return fputil::round_using_specific_rounding_mode(x, FP_INT_TONEAREST);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/sin.cpp b/libc/src/math/generic/sin.cpp
index a614427b..1b6310f 100644
--- a/libc/src/math/generic/sin.cpp
+++ b/libc/src/math/generic/sin.cpp
@@ -18,13 +18,13 @@
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
-#include "src/math/generic/range_reduction_double_common.h"
-#include "src/math/generic/sincos_eval.h"
+#include "src/__support/math/range_reduction_double_common.h"
+#include "src/__support/math/sincos_eval.h"
#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
-#include "range_reduction_double_fma.h"
+#include "src/__support/math/range_reduction_double_fma.h"
#else
-#include "range_reduction_double_nofma.h"
+#include "src/__support/math/range_reduction_double_nofma.h"
#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
namespace LIBC_NAMESPACE_DECL {
@@ -33,6 +33,7 @@ using DoubleDouble = fputil::DoubleDouble;
using Float128 = typename fputil::DyadicFloat<128>;
LLVM_LIBC_FUNCTION(double, sin, (double x)) {
+ using namespace math::range_reduction_double_internal;
using FPBits = typename fputil::FPBits<double>;
FPBits xbits(x);
@@ -95,7 +96,8 @@ LLVM_LIBC_FUNCTION(double, sin, (double x)) {
DoubleDouble sin_y, cos_y;
- [[maybe_unused]] double err = generic::sincos_eval(y, sin_y, cos_y);
+ [[maybe_unused]] double err =
+ math::sincos_eval_internal::sincos_eval(y, sin_y, cos_y);
// Look up sin(k * pi/128) and cos(k * pi/128)
#ifdef LIBC_MATH_HAS_SMALL_TABLES
@@ -149,7 +151,7 @@ LLVM_LIBC_FUNCTION(double, sin, (double x)) {
else
u_f128 = range_reduction_large.accurate();
- generic::sincos_eval(u_f128, sin_u, cos_u);
+ math::sincos_eval_internal::sincos_eval(u_f128, sin_u, cos_u);
auto get_sin_k = [](unsigned kk) -> Float128 {
unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
diff --git a/libc/src/math/generic/sincos.cpp b/libc/src/math/generic/sincos.cpp
index 08c8a82..38661de 100644
--- a/libc/src/math/generic/sincos.cpp
+++ b/libc/src/math/generic/sincos.cpp
@@ -19,13 +19,13 @@
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
-#include "src/math/generic/range_reduction_double_common.h"
-#include "src/math/generic/sincos_eval.h"
+#include "src/__support/math/range_reduction_double_common.h"
+#include "src/__support/math/sincos_eval.h"
#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
-#include "range_reduction_double_fma.h"
+#include "src/__support/math/range_reduction_double_fma.h"
#else
-#include "range_reduction_double_nofma.h"
+#include "src/__support/math/range_reduction_double_nofma.h"
#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
namespace LIBC_NAMESPACE_DECL {
@@ -34,6 +34,7 @@ using DoubleDouble = fputil::DoubleDouble;
using Float128 = typename fputil::DyadicFloat<128>;
LLVM_LIBC_FUNCTION(void, sincos, (double x, double *sin_x, double *cos_x)) {
+ using namespace math::range_reduction_double_internal;
using FPBits = typename fputil::FPBits<double>;
FPBits xbits(x);
@@ -106,7 +107,8 @@ LLVM_LIBC_FUNCTION(void, sincos, (double x, double *sin_x, double *cos_x)) {
DoubleDouble sin_y, cos_y;
- [[maybe_unused]] double err = generic::sincos_eval(y, sin_y, cos_y);
+ [[maybe_unused]] double err =
+ math::sincos_eval_internal::sincos_eval(y, sin_y, cos_y);
// Look up sin(k * pi/128) and cos(k * pi/128)
#ifdef LIBC_MATH_HAS_SMALL_TABLES
@@ -179,7 +181,7 @@ LLVM_LIBC_FUNCTION(void, sincos, (double x, double *sin_x, double *cos_x)) {
else
u_f128 = range_reduction_large.accurate();
- generic::sincos_eval(u_f128, sin_u, cos_u);
+ math::sincos_eval_internal::sincos_eval(u_f128, sin_u, cos_u);
auto get_sin_k = [](unsigned kk) -> Float128 {
unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
diff --git a/libc/src/math/generic/tan.cpp b/libc/src/math/generic/tan.cpp
index 89b812c..7ea40c9 100644
--- a/libc/src/math/generic/tan.cpp
+++ b/libc/src/math/generic/tan.cpp
@@ -20,12 +20,12 @@
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
-#include "src/math/generic/range_reduction_double_common.h"
+#include "src/__support/math/range_reduction_double_common.h"
#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
-#include "range_reduction_double_fma.h"
+#include "src/__support/math/range_reduction_double_fma.h"
#else
-#include "range_reduction_double_nofma.h"
+#include "src/__support/math/range_reduction_double_nofma.h"
#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
namespace LIBC_NAMESPACE_DECL {
@@ -121,6 +121,7 @@ LIBC_INLINE double tan_eval(const DoubleDouble &u, DoubleDouble &result) {
} // anonymous namespace
LLVM_LIBC_FUNCTION(double, tan, (double x)) {
+ using namespace math::range_reduction_double_internal;
using FPBits = typename fputil::FPBits<double>;
FPBits xbits(x);
diff --git a/libc/src/math/generic/truncbf16.cpp b/libc/src/math/generic/truncbf16.cpp
new file mode 100644
index 0000000..dfbe83d
--- /dev/null
+++ b/libc/src/math/generic/truncbf16.cpp
@@ -0,0 +1,21 @@
+//===-- Implementation of truncbf16 function ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/truncbf16.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(bfloat16, truncbf16, (bfloat16 x)) {
+ return fputil::trunc(x);
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/roundbf16.h b/libc/src/math/roundbf16.h
new file mode 100644
index 0000000..0f74e43
--- /dev/null
+++ b/libc/src/math/roundbf16.h
@@ -0,0 +1,22 @@
+//===-- Implementation header for roundbf16 ---------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_ROUNDBF16_H
+#define LLVM_LIBC_SRC_MATH_ROUNDBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 roundbf16(bfloat16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_ROUNDBF16_H
diff --git a/libc/src/math/roundevenbf16.h b/libc/src/math/roundevenbf16.h
new file mode 100644
index 0000000..f4374d2
--- /dev/null
+++ b/libc/src/math/roundevenbf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for roundevenbf16 -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_ROUNDEVENBF16_H
+#define LLVM_LIBC_SRC_MATH_ROUNDEVENBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 roundevenbf16(bfloat16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_ROUNDEVENBF16_H
diff --git a/libc/src/math/truncbf16.h b/libc/src/math/truncbf16.h
new file mode 100644
index 0000000..c87d4cc
--- /dev/null
+++ b/libc/src/math/truncbf16.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for truncbf16 ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_TRUNCBF16_H
+#define LLVM_LIBC_SRC_MATH_TRUNCBF16_H
+
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+bfloat16 truncbf16(bfloat16 x);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_MATH_TRUNCBF16_H
diff --git a/libc/test/UnitTest/CMakeLists.txt b/libc/test/UnitTest/CMakeLists.txt
index d92ab6f..f1a83fc 100644
--- a/libc/test/UnitTest/CMakeLists.txt
+++ b/libc/test/UnitTest/CMakeLists.txt
@@ -125,6 +125,7 @@ add_unittest_framework_library(
RoundingModeUtils.h
DEPENDS
LibcTest
+ libc.test.UnitTest.ErrnoCheckingTest
libc.test.UnitTest.string_utils
libc.src.__support.CPP.array
libc.src.__support.FPUtil.fp_bits
diff --git a/libc/test/UnitTest/FEnvSafeTest.cpp b/libc/test/UnitTest/FEnvSafeTest.cpp
index 168b1d4..f644569 100644
--- a/libc/test/UnitTest/FEnvSafeTest.cpp
+++ b/libc/test/UnitTest/FEnvSafeTest.cpp
@@ -9,8 +9,10 @@
#include "FEnvSafeTest.h"
#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/libc_errno.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/architectures.h"
+#include "test/UnitTest/ErrnoCheckingTest.h"
namespace LIBC_NAMESPACE_DECL {
namespace testing {
@@ -25,6 +27,10 @@ void FEnvSafeTest::TearDown() {
if (!should_be_unchanged) {
restore_fenv();
}
+ // TODO (PR 135320): Remove this override once all FEnvSafeTest instances are
+ // updated to validate or ignore errno.
+ libc_errno = 0;
+ ErrnoCheckingTest::TearDown();
}
void FEnvSafeTest::get_fenv(fenv_t &fenv) {
diff --git a/libc/test/UnitTest/FEnvSafeTest.h b/libc/test/UnitTest/FEnvSafeTest.h
index a3c5e62..1e10629 100644
--- a/libc/test/UnitTest/FEnvSafeTest.h
+++ b/libc/test/UnitTest/FEnvSafeTest.h
@@ -12,6 +12,7 @@
#include "hdr/types/fenv_t.h"
#include "src/__support/CPP/utility.h"
#include "src/__support/macros/config.h"
+#include "test/UnitTest/ErrnoCheckingTest.h"
#include "test/UnitTest/Test.h"
namespace LIBC_NAMESPACE_DECL {
@@ -20,7 +21,7 @@ namespace testing {
// This provides a test fixture (or base class for other test fixtures) that
// asserts that each test does not leave the FPU state represented by `fenv_t`
// (aka `FPState`) perturbed from its initial state.
-class FEnvSafeTest : public Test {
+class FEnvSafeTest : public ErrnoCheckingTest {
public:
void TearDown() override;
diff --git a/libc/test/UnitTest/FPMatcher.h b/libc/test/UnitTest/FPMatcher.h
index da15cf2..592cd1b 100644
--- a/libc/test/UnitTest/FPMatcher.h
+++ b/libc/test/UnitTest/FPMatcher.h
@@ -14,8 +14,10 @@
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/fpbits_str.h"
+#include "src/__support/libc_errno.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/architectures.h"
+#include "test/UnitTest/ErrnoCheckingTest.h"
#include "test/UnitTest/RoundingModeUtils.h"
#include "test/UnitTest/StringUtils.h"
#include "test/UnitTest/Test.h"
@@ -166,7 +168,7 @@ CFPMatcher<T, C> getMatcherComplex(T expectedValue) {
return CFPMatcher<T, C>(expectedValue);
}
-template <typename T> struct FPTest : public Test {
+template <typename T> struct FPTest : public ErrnoCheckingTest {
using FPBits = LIBC_NAMESPACE::fputil::FPBits<T>;
using StorageType = typename FPBits::StorageType;
static constexpr StorageType STORAGE_MAX =
@@ -191,6 +193,13 @@ template <typename T> struct FPTest : public Test {
fputil::testing::RoundingMode::Downward,
fputil::testing::RoundingMode::TowardZero,
};
+
+ void TearDown() override {
+ // TODO (PR 135320): Remove this override once all FPTest instances are
+ // updated to validate or ignore errno.
+ libc_errno = 0;
+ ErrnoCheckingTest::TearDown();
+ }
};
// Add facility to test Flush-Denormal-To-Zero (FTZ) and Denormal-As-Zero (DAZ)
diff --git a/libc/test/integration/src/__support/GPU/CMakeLists.txt b/libc/test/integration/src/__support/GPU/CMakeLists.txt
index e066830..1fb175b 100644
--- a/libc/test/integration/src/__support/GPU/CMakeLists.txt
+++ b/libc/test/integration/src/__support/GPU/CMakeLists.txt
@@ -27,3 +27,16 @@ add_integration_test(
LOADER_ARGS
--threads 64
)
+
+add_libc_test(
+ fixedstack_test
+ SUITE
+ libc-support-gpu-tests
+ SRCS
+ fixedstack_test.cpp
+ DEPENDS
+ libc.src.__support.GPU.fixedstack
+ LOADER_ARGS
+ --threads 32
+ --blocks 16
+)
diff --git a/libc/test/integration/src/__support/GPU/fixedstack_test.cpp b/libc/test/integration/src/__support/GPU/fixedstack_test.cpp
new file mode 100644
index 0000000..fde51df
--- /dev/null
+++ b/libc/test/integration/src/__support/GPU/fixedstack_test.cpp
@@ -0,0 +1,44 @@
+//===-- Integration test for the lock-free stack --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/GPU/fixedstack.h"
+#include "src/__support/GPU/utils.h"
+#include "test/IntegrationTest/test.h"
+
+using namespace LIBC_NAMESPACE;
+
+static FixedStack<uint32_t, 2048> global_stack;
+
+void run() {
+ // We need enough space in the stack as threads in flight can temporarily
+ // consume memory before they finish comitting it back to the stack.
+ ASSERT_EQ(gpu::get_num_blocks() * gpu::get_num_threads(), 512);
+
+ uint32_t val;
+ uint32_t num_threads = static_cast<uint32_t>(gpu::get_num_threads());
+ for (int i = 0; i < 256; ++i) {
+ EXPECT_TRUE(global_stack.push(UINT32_MAX))
+ EXPECT_TRUE(global_stack.pop(val))
+ ASSERT_TRUE(val < num_threads || val == UINT32_MAX);
+ }
+
+ EXPECT_TRUE(global_stack.push(static_cast<uint32_t>(gpu::get_thread_id())));
+ EXPECT_TRUE(global_stack.push(static_cast<uint32_t>(gpu::get_thread_id())));
+ EXPECT_TRUE(global_stack.pop(val));
+ ASSERT_TRUE(val < num_threads || val == UINT32_MAX);
+
+ // Fill the rest of the stack with the default value.
+ while (!global_stack.push(UINT32_MAX))
+ ;
+}
+
+TEST_MAIN(int argc, char **argv, char **envp) {
+ run();
+
+ return 0;
+}
diff --git a/libc/test/shared/CMakeLists.txt b/libc/test/shared/CMakeLists.txt
index f5ea510..b2264e1 100644
--- a/libc/test/shared/CMakeLists.txt
+++ b/libc/test/shared/CMakeLists.txt
@@ -27,6 +27,8 @@ add_fp_unittest(
libc.src.__support.math.atanhf
libc.src.__support.math.atanhf16
libc.src.__support.math.cbrt
+ libc.src.__support.math.cbrtf
+ libc.src.__support.math.cos
libc.src.__support.math.erff
libc.src.__support.math.exp
libc.src.__support.math.exp10
diff --git a/libc/test/shared/shared_math_test.cpp b/libc/test/shared/shared_math_test.cpp
index 3d64e5e..2386b55 100644
--- a/libc/test/shared/shared_math_test.cpp
+++ b/libc/test/shared/shared_math_test.cpp
@@ -49,6 +49,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat) {
EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::atan2f(0.0f, 0.0f));
EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::atanf(0.0f));
EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::atanhf(0.0f));
+ EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::cbrtf(0.0f));
EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::erff(0.0f));
EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::exp10f(0.0f));
EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::expf(0.0f));
@@ -65,8 +66,9 @@ TEST(LlvmLibcSharedMathTest, AllDouble) {
EXPECT_FP_EQ(0x1.921fb54442d18p+0, LIBC_NAMESPACE::shared::acos(0.0));
EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::asin(0.0));
EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::atan(0.0));
- EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::cbrt(0.0));
EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::atan2(0.0, 0.0));
+ EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::cbrt(0.0));
+ EXPECT_FP_EQ(0x1p+0, LIBC_NAMESPACE::shared::cos(0.0));
EXPECT_FP_EQ(0x1p+0, LIBC_NAMESPACE::shared::exp(0.0));
EXPECT_FP_EQ(0x1p+0, LIBC_NAMESPACE::shared::exp10(0.0));
}
diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt
index 43cde0d..a74f9fe 100644
--- a/libc/test/src/math/CMakeLists.txt
+++ b/libc/test/src/math/CMakeLists.txt
@@ -2972,6 +2972,118 @@ add_fp_unittest(
libc.src.__support.macros.properties.types
)
+add_fp_unittest(
+ bf16add_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16add_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.src.math.bf16add
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16addf_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16addf_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.src.math.bf16addf
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16addl_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16addl_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.src.math.bf16addl
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16addf128_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16addf128_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.src.math.bf16addf128
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16sub_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16sub_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.bf16sub
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16subf_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16subf_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.bf16subf
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16subl_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16subl_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.bf16subl
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
+ bf16subf128_test
+ NEED_MPFR
+ SUITE
+ libc-math-unittests
+ SRCS
+ bf16subf128_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.src.math.bf16subf128
+ libc.src.__support.FPUtil.bfloat16
+)
+
add_subdirectory(generic)
add_subdirectory(smoke)
diff --git a/libc/test/src/math/bf16add_test.cpp b/libc/test/src/math/bf16add_test.cpp
new file mode 100644
index 0000000..9e9c594
--- /dev/null
+++ b/libc/test/src/math/bf16add_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16add ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16add.h"
+
+LIST_ADD_TESTS(bfloat16, double, LIBC_NAMESPACE::bf16add)
diff --git a/libc/test/src/math/bf16addf128_test.cpp b/libc/test/src/math/bf16addf128_test.cpp
new file mode 100644
index 0000000..46f7ad3
--- /dev/null
+++ b/libc/test/src/math/bf16addf128_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16addf128 -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16addf128.h"
+
+LIST_ADD_TESTS(bfloat16, float128, LIBC_NAMESPACE::bf16addf128)
diff --git a/libc/test/src/math/bf16addf_test.cpp b/libc/test/src/math/bf16addf_test.cpp
new file mode 100644
index 0000000..06d56cf
--- /dev/null
+++ b/libc/test/src/math/bf16addf_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16addf --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16addf.h"
+
+LIST_ADD_TESTS(bfloat16, float, LIBC_NAMESPACE::bf16addf)
diff --git a/libc/test/src/math/bf16addl_test.cpp b/libc/test/src/math/bf16addl_test.cpp
new file mode 100644
index 0000000..bf54827
--- /dev/null
+++ b/libc/test/src/math/bf16addl_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16addl --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16addl.h"
+
+LIST_ADD_TESTS(bfloat16, long double, LIBC_NAMESPACE::bf16addl)
diff --git a/libc/test/src/math/bf16sub_test.cpp b/libc/test/src/math/bf16sub_test.cpp
new file mode 100644
index 0000000..4a793dc
--- /dev/null
+++ b/libc/test/src/math/bf16sub_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16sub ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16sub.h"
+
+LIST_SUB_TESTS(bfloat16, double, LIBC_NAMESPACE::bf16sub)
diff --git a/libc/test/src/math/bf16subf128_test.cpp b/libc/test/src/math/bf16subf128_test.cpp
new file mode 100644
index 0000000..25d6711
--- /dev/null
+++ b/libc/test/src/math/bf16subf128_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16subf128 -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16subf128.h"
+
+LIST_SUB_TESTS(bfloat16, float128, LIBC_NAMESPACE::bf16subf128)
diff --git a/libc/test/src/math/bf16subf_test.cpp b/libc/test/src/math/bf16subf_test.cpp
new file mode 100644
index 0000000..e8c7440
--- /dev/null
+++ b/libc/test/src/math/bf16subf_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16subf --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16subf.h"
+
+LIST_SUB_TESTS(bfloat16, float, LIBC_NAMESPACE::bf16subf)
diff --git a/libc/test/src/math/bf16subl_test.cpp b/libc/test/src/math/bf16subl_test.cpp
new file mode 100644
index 0000000..2997369
--- /dev/null
+++ b/libc/test/src/math/bf16subl_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16subl --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16subl.h"
+
+LIST_SUB_TESTS(bfloat16, long double, LIBC_NAMESPACE::bf16subl)
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index a722f61..f060ef3 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -345,6 +345,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ truncbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ truncbf16_test.cpp
+ HDRS
+ TruncTest.h
+ DEPENDS
+ libc.src.math.truncbf16
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
canonicalize_test
SUITE
libc-math-smoke-tests
@@ -544,6 +557,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ ceilbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ceilbf16_test.cpp
+ HDRS
+ CeilTest.h
+ DEPENDS
+ libc.src.math.ceilbf16
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
dfmal_test
SUITE
libc-math-smoke-tests
@@ -664,6 +690,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ floorbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ floorbf16_test.cpp
+ HDRS
+ FloorTest.h
+ DEPENDS
+ libc.src.math.floorbf16
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
round_test
SUITE
libc-math-smoke-tests
@@ -728,6 +767,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ roundbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ roundbf16_test.cpp
+ HDRS
+ RoundTest.h
+ DEPENDS
+ libc.src.math.roundbf16
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
roundeven_test
SUITE
libc-math-smoke-tests
@@ -792,6 +844,19 @@ add_fp_unittest(
)
add_fp_unittest(
+ roundevenbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ roundevenbf16_test.cpp
+ HDRS
+ RoundEvenTest.h
+ DEPENDS
+ libc.src.math.roundevenbf16
+ libc.src.__support.FPUtil.bfloat16
+)
+
+add_fp_unittest(
lround_test
SUITE
libc-math-smoke-tests
@@ -2282,6 +2347,21 @@ add_fp_unittest(
)
add_fp_unittest(
+ fminbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminbf16_test.cpp
+ HDRS
+ FMinTest.h
+ DEPENDS
+ libc.src.math.fminbf16
+ libc.src.__support.CPP.algorithm
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
fmaxf_test
SUITE
libc-math-smoke-tests
@@ -2352,6 +2432,21 @@ add_fp_unittest(
)
add_fp_unittest(
+ fmaxbf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaxbf16_test.cpp
+ HDRS
+ FMaxTest.h
+ DEPENDS
+ libc.src.math.fmaxbf16
+ libc.src.__support.CPP.algorithm
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
fmaximuml_test
SUITE
libc-math-smoke-tests
@@ -5400,3 +5495,131 @@ add_fp_unittest(
libc.src.__support.macros.properties.os
libc.src.__support.macros.properties.types
)
+
+add_fp_unittest(
+ bf16add_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16add_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16add
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16addf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16addf_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16addf
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16addl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16addl_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16addl
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16addf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16addf128_test.cpp
+ HDRS
+ AddTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16addf128
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16sub_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16sub_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16sub
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16subf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16subf_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16subf
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16subl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16subl_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16subl
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
+
+add_fp_unittest(
+ bf16subf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ bf16subf128_test.cpp
+ HDRS
+ SubTest.h
+ DEPENDS
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
+ libc.src.math.bf16subf128
+ libc.src.__support.FPUtil.bfloat16
+ libc.src.__support.macros.properties.os
+)
diff --git a/libc/test/src/math/smoke/CeilTest.h b/libc/test/src/math/smoke/CeilTest.h
index 7998eab..1839db9 100644
--- a/libc/test/src/math/smoke/CeilTest.h
+++ b/libc/test/src/math/smoke/CeilTest.h
@@ -59,10 +59,12 @@ public:
EXPECT_FP_EQ(T(-10.0), func(T(-10.32)));
EXPECT_FP_EQ(T(11.0), func(T(10.65)));
EXPECT_FP_EQ(T(-10.0), func(T(-10.65)));
+ EXPECT_FP_EQ(T(50.0), func(T(49.62)));
+ EXPECT_FP_EQ(T(-50.0), func(T(-50.31)));
EXPECT_FP_EQ(T(124.0), func(T(123.38)));
EXPECT_FP_EQ(T(-123.0), func(T(-123.38)));
EXPECT_FP_EQ(T(124.0), func(T(123.96)));
- EXPECT_FP_EQ(T(-123.0), func(T(-123.96)));
+ EXPECT_FP_EQ(T(-123.0), func(T(-123.5)));
}
};
diff --git a/libc/test/src/math/smoke/FloorTest.h b/libc/test/src/math/smoke/FloorTest.h
index bc19e4f..cbcf276 100644
--- a/libc/test/src/math/smoke/FloorTest.h
+++ b/libc/test/src/math/smoke/FloorTest.h
@@ -59,10 +59,11 @@ public:
EXPECT_FP_EQ(T(-11.0), func(T(-10.32)));
EXPECT_FP_EQ(T(10.0), func(T(10.65)));
EXPECT_FP_EQ(T(-11.0), func(T(-10.65)));
+ EXPECT_FP_EQ(T(50.0), func(T(50.31)));
+ EXPECT_FP_EQ(T(-50.0), func(T(-49.63)));
EXPECT_FP_EQ(T(123.0), func(T(123.38)));
EXPECT_FP_EQ(T(-124.0), func(T(-123.38)));
- EXPECT_FP_EQ(T(123.0), func(T(123.96)));
- EXPECT_FP_EQ(T(-124.0), func(T(-123.96)));
+ EXPECT_FP_EQ(T(-124.0), func(T(-123.5)));
}
};
diff --git a/libc/test/src/math/smoke/RoundTest.h b/libc/test/src/math/smoke/RoundTest.h
index beb7000..72889da 100644
--- a/libc/test/src/math/smoke/RoundTest.h
+++ b/libc/test/src/math/smoke/RoundTest.h
@@ -59,8 +59,10 @@ public:
EXPECT_FP_EQ(T(-10.0), func(T(-10.32)));
EXPECT_FP_EQ(T(11.0), func(T(10.65)));
EXPECT_FP_EQ(T(-11.0), func(T(-10.65)));
- EXPECT_FP_EQ(T(123.0), func(T(123.38)));
- EXPECT_FP_EQ(T(-123.0), func(T(-123.38)));
+ EXPECT_FP_EQ(T(50.0), func(T(49.63)));
+ EXPECT_FP_EQ(T(-50.0), func(T(-50.31)));
+ EXPECT_FP_EQ(T(124.0), func(T(123.5)));
+ EXPECT_FP_EQ(T(-124.0), func(T(-123.5)));
EXPECT_FP_EQ(T(124.0), func(T(123.96)));
EXPECT_FP_EQ(T(-124.0), func(T(-123.96)));
}
diff --git a/libc/test/src/math/smoke/TruncTest.h b/libc/test/src/math/smoke/TruncTest.h
index 49688e8..e088f29 100644
--- a/libc/test/src/math/smoke/TruncTest.h
+++ b/libc/test/src/math/smoke/TruncTest.h
@@ -61,8 +61,8 @@ public:
EXPECT_FP_EQ(T(-10.0), func(T(-10.65)));
EXPECT_FP_EQ(T(123.0), func(T(123.38)));
EXPECT_FP_EQ(T(-123.0), func(T(-123.38)));
- EXPECT_FP_EQ(T(123.0), func(T(123.96)));
- EXPECT_FP_EQ(T(-123.0), func(T(-123.96)));
+ EXPECT_FP_EQ(T(123.0), func(T(123.5)));
+ EXPECT_FP_EQ(T(-123.0), func(T(-123.5)));
}
};
diff --git a/libc/test/src/math/smoke/bf16add_test.cpp b/libc/test/src/math/smoke/bf16add_test.cpp
new file mode 100644
index 0000000..9e9c594
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16add_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16add ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16add.h"
+
+LIST_ADD_TESTS(bfloat16, double, LIBC_NAMESPACE::bf16add)
diff --git a/libc/test/src/math/smoke/bf16addf128_test.cpp b/libc/test/src/math/smoke/bf16addf128_test.cpp
new file mode 100644
index 0000000..46f7ad3
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16addf128_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16addf128 -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16addf128.h"
+
+LIST_ADD_TESTS(bfloat16, float128, LIBC_NAMESPACE::bf16addf128)
diff --git a/libc/test/src/math/smoke/bf16addf_test.cpp b/libc/test/src/math/smoke/bf16addf_test.cpp
new file mode 100644
index 0000000..06d56cf
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16addf_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16addf --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16addf.h"
+
+LIST_ADD_TESTS(bfloat16, float, LIBC_NAMESPACE::bf16addf)
diff --git a/libc/test/src/math/smoke/bf16addl_test.cpp b/libc/test/src/math/smoke/bf16addl_test.cpp
new file mode 100644
index 0000000..bf54827
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16addl_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16addl --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16addl.h"
+
+LIST_ADD_TESTS(bfloat16, long double, LIBC_NAMESPACE::bf16addl)
diff --git a/libc/test/src/math/smoke/bf16sub_test.cpp b/libc/test/src/math/smoke/bf16sub_test.cpp
new file mode 100644
index 0000000..4a793dc
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16sub_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16sub ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16sub.h"
+
+LIST_SUB_TESTS(bfloat16, double, LIBC_NAMESPACE::bf16sub)
diff --git a/libc/test/src/math/smoke/bf16subf128_test.cpp b/libc/test/src/math/smoke/bf16subf128_test.cpp
new file mode 100644
index 0000000..25d6711
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16subf128_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16subf128 -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16subf128.h"
+
+LIST_SUB_TESTS(bfloat16, float128, LIBC_NAMESPACE::bf16subf128)
diff --git a/libc/test/src/math/smoke/bf16subf_test.cpp b/libc/test/src/math/smoke/bf16subf_test.cpp
new file mode 100644
index 0000000..e8c7440
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16subf_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16subf --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16subf.h"
+
+LIST_SUB_TESTS(bfloat16, float, LIBC_NAMESPACE::bf16subf)
diff --git a/libc/test/src/math/smoke/bf16subl_test.cpp b/libc/test/src/math/smoke/bf16subl_test.cpp
new file mode 100644
index 0000000..2997369
--- /dev/null
+++ b/libc/test/src/math/smoke/bf16subl_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for bf16subl --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/bf16subl.h"
+
+LIST_SUB_TESTS(bfloat16, long double, LIBC_NAMESPACE::bf16subl)
diff --git a/libc/test/src/math/smoke/ceilbf16_test.cpp b/libc/test/src/math/smoke/ceilbf16_test.cpp
new file mode 100644
index 0000000..dcaf058
--- /dev/null
+++ b/libc/test/src/math/smoke/ceilbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for ceilbf16 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CeilTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/ceilbf16.h"
+
+LIST_CEIL_TESTS(bfloat16, LIBC_NAMESPACE::ceilbf16)
diff --git a/libc/test/src/math/smoke/floorbf16_test.cpp b/libc/test/src/math/smoke/floorbf16_test.cpp
new file mode 100644
index 0000000..9cc77cd
--- /dev/null
+++ b/libc/test/src/math/smoke/floorbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for floorbf16 -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FloorTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/floorbf16.h"
+
+LIST_FLOOR_TESTS(bfloat16, LIBC_NAMESPACE::floorbf16)
diff --git a/libc/test/src/math/smoke/fmaxbf16_test.cpp b/libc/test/src/math/smoke/fmaxbf16_test.cpp
new file mode 100644
index 0000000..8ff9313
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaxbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for fmaxbf16 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaxTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/fmaxbf16.h"
+
+LIST_FMAX_TESTS(bfloat16, LIBC_NAMESPACE::fmaxbf16)
diff --git a/libc/test/src/math/smoke/fminbf16_test.cpp b/libc/test/src/math/smoke/fminbf16_test.cpp
new file mode 100644
index 0000000..195a0ac
--- /dev/null
+++ b/libc/test/src/math/smoke/fminbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for fminbf16 --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/fminbf16.h"
+
+LIST_FMIN_TESTS(bfloat16, LIBC_NAMESPACE::fminbf16)
diff --git a/libc/test/src/math/smoke/roundbf16_test.cpp b/libc/test/src/math/smoke/roundbf16_test.cpp
new file mode 100644
index 0000000..5163868
--- /dev/null
+++ b/libc/test/src/math/smoke/roundbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for roundbf16 -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RoundTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/roundbf16.h"
+
+LIST_ROUND_TESTS(bfloat16, LIBC_NAMESPACE::roundbf16)
diff --git a/libc/test/src/math/smoke/roundevenbf16_test.cpp b/libc/test/src/math/smoke/roundevenbf16_test.cpp
new file mode 100644
index 0000000..711c37a
--- /dev/null
+++ b/libc/test/src/math/smoke/roundevenbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for roundevenbf16 ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RoundEvenTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/roundevenbf16.h"
+
+LIST_ROUNDEVEN_TESTS(bfloat16, LIBC_NAMESPACE::roundevenbf16)
diff --git a/libc/test/src/math/smoke/truncbf16_test.cpp b/libc/test/src/math/smoke/truncbf16_test.cpp
new file mode 100644
index 0000000..970fa69
--- /dev/null
+++ b/libc/test/src/math/smoke/truncbf16_test.cpp
@@ -0,0 +1,14 @@
+//===-- Unittests for truncbf16 -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "TruncTest.h"
+
+#include "src/__support/FPUtil/bfloat16.h"
+#include "src/math/truncbf16.h"
+
+LIST_TRUNC_TESTS(bfloat16, LIBC_NAMESPACE::truncbf16)
diff --git a/libc/utils/MPFRWrapper/MPFRUtils.cpp b/libc/utils/MPFRWrapper/MPFRUtils.cpp
index ae12a83..57e818c 100644
--- a/libc/utils/MPFRWrapper/MPFRUtils.cpp
+++ b/libc/utils/MPFRWrapper/MPFRUtils.cpp
@@ -411,6 +411,21 @@ template void explain_binary_operation_one_output_error(
#endif
template void explain_binary_operation_one_output_error(
Operation, const BinaryInput<bfloat16> &, bfloat16, double, RoundingMode);
+template void
+explain_binary_operation_one_output_error(Operation, const BinaryInput<float> &,
+ bfloat16, double, RoundingMode);
+template void explain_binary_operation_one_output_error(
+ Operation, const BinaryInput<double> &, bfloat16, double, RoundingMode);
+template void
+explain_binary_operation_one_output_error(Operation,
+ const BinaryInput<long double> &,
+ bfloat16, double, RoundingMode);
+#if defined(LIBC_TYPES_HAS_FLOAT128) && \
+ defined(LIBC_TYPES_FLOAT128_IS_NOT_LONG_DOUBLE)
+template void explain_binary_operation_one_output_error(
+ Operation, const BinaryInput<float128> &, bfloat16, double, RoundingMode);
+#endif // defined(LIBC_TYPES_HAS_FLOAT128) &&
+ // defined(LIBC_TYPES_FLOAT128_IS_NOT_LONG_DOUBLE)
template <typename InputType, typename OutputType>
void explain_ternary_operation_one_output_error(
@@ -648,6 +663,26 @@ template bool compare_binary_operation_one_output(Operation,
const BinaryInput<bfloat16> &,
bfloat16, double,
RoundingMode);
+
+template bool compare_binary_operation_one_output(Operation,
+ const BinaryInput<float> &,
+ bfloat16, double,
+ RoundingMode);
+template bool compare_binary_operation_one_output(Operation,
+ const BinaryInput<double> &,
+ bfloat16, double,
+ RoundingMode);
+template bool
+compare_binary_operation_one_output(Operation, const BinaryInput<long double> &,
+ bfloat16, double, RoundingMode);
+#if defined(LIBC_TYPES_HAS_FLOAT128) && \
+ defined(LIBC_TYPES_FLOAT128_IS_NOT_LONG_DOUBLE)
+template bool compare_binary_operation_one_output(Operation,
+ const BinaryInput<float128> &,
+ bfloat16, double,
+ RoundingMode);
+#endif // defined(LIBC_TYPES_HAS_FLOAT128) &&
+ // defined(LIBC_TYPES_FLOAT128_IS_NOT_LONG_DOUBLE)
template <typename InputType, typename OutputType>
bool compare_ternary_operation_one_output(Operation op,
const TernaryInput<InputType> &input,
diff --git a/libc/utils/hdrgen/hdrgen/header.py b/libc/utils/hdrgen/hdrgen/header.py
index b054ed4..2118db6 100644
--- a/libc/utils/hdrgen/hdrgen/header.py
+++ b/libc/utils/hdrgen/hdrgen/header.py
@@ -204,7 +204,7 @@ class HeaderFile:
current_guard = None
for function in self.functions:
- if function.guard == None:
+ if function.guard == None and current_guard == None:
content.append(str(function) + " __NOEXCEPT;")
content.append("")
else:
@@ -221,7 +221,8 @@ class HeaderFile:
content.append(f"#endif // {current_guard}")
content.append("")
current_guard = function.guard
- content.append(f"#ifdef {current_guard}")
+ if current_guard is not None:
+ content.append(f"#ifdef {current_guard}")
content.append(str(function) + " __NOEXCEPT;")
content.append("")
if current_guard != None:
diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt
index adfabf4..756e097 100644
--- a/libclc/CMakeLists.txt
+++ b/libclc/CMakeLists.txt
@@ -31,6 +31,7 @@ set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS
clc/lib/amdgcn/SOURCES;
clc/lib/amdgpu/SOURCES;
clc/lib/clspv/SOURCES;
+ clc/lib/ptx-nvidiacl/SOURCES;
clc/lib/r600/SOURCES;
clc/lib/spirv/SOURCES;
)
diff --git a/libclc/clc/include/clc/shared/binary_decl_with_scalar_second_arg.inc b/libclc/clc/include/clc/shared/binary_decl_with_scalar_second_arg.inc
deleted file mode 100644
index 05a5339..0000000
--- a/libclc/clc/include/clc/shared/binary_decl_with_scalar_second_arg.inc
+++ /dev/null
@@ -1,15 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE FUNCTION(__CLC_GENTYPE x,
- __CLC_GENTYPE y);
-
-#ifndef __CLC_SCALAR
-_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE FUNCTION(__CLC_GENTYPE x,
- __CLC_SCALAR_GENTYPE y);
-#endif
diff --git a/libcxx/docs/Status/Cxx2cIssues.csv b/libcxx/docs/Status/Cxx2cIssues.csv
index c622512..5460664 100644
--- a/libcxx/docs/Status/Cxx2cIssues.csv
+++ b/libcxx/docs/Status/Cxx2cIssues.csv
@@ -132,7 +132,7 @@
"`LWG4200 <https://wg21.link/LWG4200>`__","The ``operation_state`` concept can be simplified","2025-06 (Sofia)","","",""
"`LWG4201 <https://wg21.link/LWG4201>`__","``with-await-transform::await_transform`` should not use a deduced return type","2025-06 (Sofia)","","",""
"`LWG4217 <https://wg21.link/LWG4217>`__","Clarify ``mdspan`` layout mapping requirements for ``rank == 0``","2025-06 (Sofia)","","",""
-"`LWG4222 <https://wg21.link/LWG4222>`__","``expected`` constructor from a single value missing a constraint","2025-06 (Sofia)","","",""
+"`LWG4222 <https://wg21.link/LWG4222>`__","``expected`` constructor from a single value missing a constraint","2025-06 (Sofia)","|Complete|","22",""
"`LWG4224 <https://wg21.link/LWG4224>`__","Philox engines should be freestanding","2025-06 (Sofia)","","",""
"`LWG4227 <https://wg21.link/LWG4227>`__","Missing ``noexcept`` operator in [exec.when.all]","2025-06 (Sofia)","","",""
"`LWG4231 <https://wg21.link/LWG4231>`__","``datapar::chunk<N>`` should use ``simd-size-type`` instead of ``size_t``","2025-06 (Sofia)","","",""
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 51444ec..c6b87a3 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -447,14 +447,12 @@ set(files
__fwd/get.h
__fwd/ios.h
__fwd/istream.h
- __fwd/map.h
__fwd/mdspan.h
__fwd/memory.h
__fwd/memory_resource.h
__fwd/ostream.h
__fwd/pair.h
__fwd/queue.h
- __fwd/set.h
__fwd/span.h
__fwd/sstream.h
__fwd/stack.h
diff --git a/libcxx/include/__expected/expected.h b/libcxx/include/__expected/expected.h
index 0f446b8..38a3412 100644
--- a/libcxx/include/__expected/expected.h
+++ b/libcxx/include/__expected/expected.h
@@ -557,7 +557,8 @@ public:
template <class _Up = _Tp>
requires(!is_same_v<remove_cvref_t<_Up>, in_place_t> && !is_same_v<expected, remove_cvref_t<_Up>> &&
- is_constructible_v<_Tp, _Up> && !__is_std_unexpected<remove_cvref_t<_Up>>::value &&
+ !is_same_v<remove_cvref_t<_Up>, unexpect_t> && is_constructible_v<_Tp, _Up> &&
+ !__is_std_unexpected<remove_cvref_t<_Up>>::value &&
(!is_same_v<remove_cv_t<_Tp>, bool> || !__is_std_expected<remove_cvref_t<_Up>>::value))
_LIBCPP_HIDE_FROM_ABI constexpr explicit(!is_convertible_v<_Up, _Tp>)
expected(_Up&& __u) noexcept(is_nothrow_constructible_v<_Tp, _Up>) // strengthened
diff --git a/libcxx/include/__fwd/map.h b/libcxx/include/__fwd/map.h
deleted file mode 100644
index 940298d..0000000
--- a/libcxx/include/__fwd/map.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___FWD_MAP_H
-#define _LIBCPP___FWD_MAP_H
-
-#include <__config>
-#include <__fwd/functional.h>
-#include <__fwd/memory.h>
-#include <__fwd/pair.h>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _Key, class _Tp, class _Compare = less<_Key>, class _Allocator = allocator<pair<const _Key, _Tp> > >
-class map;
-
-template <class _Key, class _Tp, class _Compare = less<_Key>, class _Allocator = allocator<pair<const _Key, _Tp> > >
-class multimap;
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // _LIBCPP___FWD_MAP_H
diff --git a/libcxx/include/__fwd/set.h b/libcxx/include/__fwd/set.h
deleted file mode 100644
index d5ef8d5..0000000
--- a/libcxx/include/__fwd/set.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___FWD_SET_H
-#define _LIBCPP___FWD_SET_H
-
-#include <__config>
-#include <__fwd/functional.h>
-#include <__fwd/memory.h>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _Key, class _Compare = less<_Key>, class _Allocator = allocator<_Key> >
-class set;
-
-template <class _Key, class _Compare = less<_Key>, class _Allocator = allocator<_Key> >
-class multiset;
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // _LIBCPP___FWD_SET_H
diff --git a/libcxx/include/__tree b/libcxx/include/__tree
index 74b20d5..3dd5ae5 100644
--- a/libcxx/include/__tree
+++ b/libcxx/include/__tree
@@ -13,9 +13,7 @@
#include <__algorithm/min.h>
#include <__assert>
#include <__config>
-#include <__fwd/map.h>
#include <__fwd/pair.h>
-#include <__fwd/set.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/next.h>
@@ -686,16 +684,6 @@ private:
friend class __tree;
template <class, class, class>
friend class __tree_const_iterator;
- template <class>
- friend class __map_iterator;
- template <class, class, class, class>
- friend class map;
- template <class, class, class, class>
- friend class multimap;
- template <class, class, class>
- friend class set;
- template <class, class, class>
- friend class multiset;
};
template <class _Tp, class _NodePtr, class _DiffType>
@@ -709,18 +697,15 @@ class __tree_const_iterator {
__end_node_pointer __ptr_;
public:
- using iterator_category = bidirectional_iterator_tag;
- using value_type = __get_node_value_type_t<_Tp>;
- using difference_type = _DiffType;
- using reference = const value_type&;
- using pointer = __rebind_pointer_t<_NodePtr, const value_type>;
+ using iterator_category = bidirectional_iterator_tag;
+ using value_type = __get_node_value_type_t<_Tp>;
+ using difference_type = _DiffType;
+ using reference = const value_type&;
+ using pointer = __rebind_pointer_t<_NodePtr, const value_type>;
+ using __non_const_iterator _LIBCPP_NODEBUG = __tree_iterator<_Tp, __node_pointer, difference_type>;
_LIBCPP_HIDE_FROM_ABI __tree_const_iterator() _NOEXCEPT : __ptr_(nullptr) {}
-private:
- typedef __tree_iterator<_Tp, __node_pointer, difference_type> __non_const_iterator;
-
-public:
_LIBCPP_HIDE_FROM_ABI __tree_const_iterator(__non_const_iterator __p) _NOEXCEPT : __ptr_(__p.__ptr_) {}
_LIBCPP_HIDE_FROM_ABI reference operator*() const { return __get_np()->__value_; }
@@ -762,16 +747,6 @@ private:
template <class, class, class>
friend class __tree;
- template <class, class, class, class>
- friend class map;
- template <class, class, class, class>
- friend class multimap;
- template <class, class, class>
- friend class set;
- template <class, class, class>
- friend class multiset;
- template <class>
- friend class __map_const_iterator;
};
template <class _Tp, class _Compare>
diff --git a/libcxx/include/map b/libcxx/include/map
index 0a43bd0..9f98abe 100644
--- a/libcxx/include/map
+++ b/libcxx/include/map
@@ -582,7 +582,6 @@ erase_if(multimap<Key, T, Compare, Allocator>& c, Predicate pred); // C++20
# include <__functional/binary_function.h>
# include <__functional/is_transparent.h>
# include <__functional/operations.h>
-# include <__fwd/map.h>
# include <__iterator/erase_if_container.h>
# include <__iterator/iterator_traits.h>
# include <__iterator/ranges_iterator_traits.h>
@@ -692,12 +691,12 @@ public:
# if _LIBCPP_STD_VER >= 14
template <typename _K2>
_LIBCPP_HIDE_FROM_ABI bool operator()(const _K2& __x, const _CP& __y) const {
- return __comp_(__x, __y.__get_value().first);
+ return __comp_(__x, __y.first);
}
template <typename _K2>
_LIBCPP_HIDE_FROM_ABI bool operator()(const _CP& __x, const _K2& __y) const {
- return __comp_(__x.__get_value().first, __y);
+ return __comp_(__x.first, __y);
}
# endif
};
@@ -861,7 +860,10 @@ public:
friend class __tree_const_iterator;
};
-template <class _Key, class _Tp, class _Compare, class _Allocator>
+template <class _Key, class _Tp, class _Compare = less<_Key>, class _Allocator = allocator<pair<const _Key, _Tp> > >
+class multimap;
+
+template <class _Key, class _Tp, class _Compare = less<_Key>, class _Allocator = allocator<pair<const _Key, _Tp> > >
class map {
public:
// types:
diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in
index b07a153e..c431c0c 100644
--- a/libcxx/include/module.modulemap.in
+++ b/libcxx/include/module.modulemap.in
@@ -1607,7 +1607,6 @@ module std [system] {
}
module map {
- module fwd { header "__fwd/map.h" }
header "map"
export *
export std.iterator.reverse_iterator
@@ -1974,7 +1973,6 @@ module std [system] {
}
module set {
- module fwd { header "__fwd/set.h" }
header "set"
export *
export std.iterator.reverse_iterator
diff --git a/libcxx/include/set b/libcxx/include/set
index 342a529..c77345b 100644
--- a/libcxx/include/set
+++ b/libcxx/include/set
@@ -522,7 +522,6 @@ erase_if(multiset<Key, Compare, Allocator>& c, Predicate pred); // C++20
# include <__config>
# include <__functional/is_transparent.h>
# include <__functional/operations.h>
-# include <__fwd/set.h>
# include <__iterator/erase_if_container.h>
# include <__iterator/iterator_traits.h>
# include <__iterator/ranges_iterator_traits.h>
@@ -570,7 +569,10 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Key, class _Compare, class _Allocator>
+template <class _Key, class _Compare = less<_Key>, class _Allocator = allocator<_Key> >
+class multiset;
+
+template <class _Key, class _Compare = less<_Key>, class _Allocator = allocator<_Key> >
class set {
public:
// types:
diff --git a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add.verify.cpp b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add.verify.cpp
index 320ef57..222799f 100644
--- a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add.verify.cpp
+++ b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add.verify.cpp
@@ -48,12 +48,12 @@ void pointer_to_incomplete_type() {
void function_pointer() {
{
volatile std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_add(&fun, 0);
}
{
std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_add(&fun, 0);
}
}
diff --git a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add_explicit.verify.cpp b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add_explicit.verify.cpp
index bdd8089..3bde3ad 100644
--- a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add_explicit.verify.cpp
+++ b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_add_explicit.verify.cpp
@@ -51,12 +51,12 @@ void pointer_to_incomplete_type() {
void function_pointer() {
{
volatile std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_add_explicit(&fun, 0, std::memory_order_relaxed);
}
{
std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_add_explicit(&fun, 0, std::memory_order_relaxed);
}
}
diff --git a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub.verify.cpp b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub.verify.cpp
index 2c9f898..805ca34 100644
--- a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub.verify.cpp
+++ b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub.verify.cpp
@@ -48,12 +48,12 @@ void pointer_to_incomplete_type() {
void function_pointer() {
{
volatile std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_sub(&fun, 0);
}
{
std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_sub(&fun, 0);
}
}
diff --git a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub_explicit.verify.cpp b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub_explicit.verify.cpp
index 88c4275..37a4237 100644
--- a/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub_explicit.verify.cpp
+++ b/libcxx/test/libcxx/atomics/atomics.types.operations/atomics.types.operations.req/atomic_fetch_sub_explicit.verify.cpp
@@ -51,12 +51,12 @@ void pointer_to_incomplete_type() {
void function_pointer() {
{
volatile std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_sub_explicit(&fun, 0, std::memory_order_relaxed);
}
{
std::atomic<void (*)(int)> fun;
- // expected-error-re@*:* {{static assertion failed due to requirement '!is_function<void (int)>::value'{{.*}}Pointer to function isn't allowed}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Pointer to function isn't allowed}}
std::atomic_fetch_sub_explicit(&fun, 0, std::memory_order_relaxed);
}
}
diff --git a/libcxx/test/std/containers/associative/map/map.nonmember/compare.three_way.verify.cpp b/libcxx/test/std/containers/associative/map/map.nonmember/compare.three_way.verify.cpp
index 2427525..5352100 100644
--- a/libcxx/test/std/containers/associative/map/map.nonmember/compare.three_way.verify.cpp
+++ b/libcxx/test/std/containers/associative/map/map.nonmember/compare.three_way.verify.cpp
@@ -25,9 +25,9 @@ int main(int, char**) {
{
std::map<int, int, std::less<int>, std::allocator<int>> s1;
std::map<int, int, std::less<int>, test_allocator<int>> s2;
- // expected-error-re@*:* {{static assertion failed due to requirement 'is_same<int, std::pair<const int, int>>::value'{{.*}}Allocator::value_type must be same type as value_type}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Allocator::value_type must be same type as value_type}}
s1 <=> s2;
- // expected-error-re@*:* {{static assertion failed due to requirement 'is_same<int, std::pair<const int, int>>::value'{{.*}}Allocator::value_type must be same type as value_type}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Allocator::value_type must be same type as value_type}}
s2 <=> s1;
}
// Mismatching comparision functions
diff --git a/libcxx/test/std/containers/associative/map/map.ops/count0.pass.cpp b/libcxx/test/std/containers/associative/map/map.ops/count0.pass.cpp
index c7ba765..62491e2 100644
--- a/libcxx/test/std/containers/associative/map/map.ops/count0.pass.cpp
+++ b/libcxx/test/std/containers/associative/map/map.ops/count0.pass.cpp
@@ -33,6 +33,10 @@ int main(int, char**) {
typedef std::map<int, double, transparent_less_not_referenceable> M;
assert(M().count(C2Int{5}) == 0);
}
+ {
+ using M = std::map<int, double, transparent_less_nonempty>;
+ assert(M().count(C2Int{5}) == 0);
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/map/map.ops/equal_range0.pass.cpp b/libcxx/test/std/containers/associative/map/map.ops/equal_range0.pass.cpp
index 75724bd..57ce9339 100644
--- a/libcxx/test/std/containers/associative/map/map.ops/equal_range0.pass.cpp
+++ b/libcxx/test/std/containers/associative/map/map.ops/equal_range0.pass.cpp
@@ -40,6 +40,13 @@ int main(int, char**) {
P result = example.equal_range(C2Int{5});
assert(result.first == result.second);
}
+ {
+ using M = std::map<int, double, transparent_less_nonempty>;
+ using P = std::pair<typename M::iterator, typename M::iterator>;
+ M example;
+ P result = example.equal_range(C2Int{5});
+ assert(result.first == result.second);
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/map/map.ops/find0.pass.cpp b/libcxx/test/std/containers/associative/map/map.ops/find0.pass.cpp
index 9825d6c..3f09d56 100644
--- a/libcxx/test/std/containers/associative/map/map.ops/find0.pass.cpp
+++ b/libcxx/test/std/containers/associative/map/map.ops/find0.pass.cpp
@@ -36,6 +36,11 @@ int main(int, char**) {
M example;
assert(example.find(C2Int{5}) == example.end());
}
+ {
+ using M = std::map<int, double, transparent_less_nonempty>;
+ M example;
+ assert(example.find(C2Int{5}) == example.end());
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/map/map.ops/lower_bound0.pass.cpp b/libcxx/test/std/containers/associative/map/map.ops/lower_bound0.pass.cpp
index fe7fe38..308a2ed 100644
--- a/libcxx/test/std/containers/associative/map/map.ops/lower_bound0.pass.cpp
+++ b/libcxx/test/std/containers/associative/map/map.ops/lower_bound0.pass.cpp
@@ -36,6 +36,11 @@ int main(int, char**) {
M example;
assert(example.lower_bound(C2Int{5}) == example.end());
}
+ {
+ using M = std::map<int, double, transparent_less_nonempty>;
+ M example;
+ assert(example.lower_bound(C2Int{5}) == example.end());
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/map/map.ops/upper_bound0.pass.cpp b/libcxx/test/std/containers/associative/map/map.ops/upper_bound0.pass.cpp
index 525aa67..332b71a 100644
--- a/libcxx/test/std/containers/associative/map/map.ops/upper_bound0.pass.cpp
+++ b/libcxx/test/std/containers/associative/map/map.ops/upper_bound0.pass.cpp
@@ -36,6 +36,11 @@ int main(int, char**) {
M example;
assert(example.upper_bound(C2Int{5}) == example.end());
}
+ {
+ using M = std::map<int, double, transparent_less_nonempty>;
+ M example;
+ assert(example.upper_bound(C2Int{5}) == example.end());
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.nonmember/compare.three_way.verify.cpp b/libcxx/test/std/containers/associative/multimap/multimap.nonmember/compare.three_way.verify.cpp
index 239ad0a..e70e47e 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.nonmember/compare.three_way.verify.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.nonmember/compare.three_way.verify.cpp
@@ -25,9 +25,9 @@ int main(int, char**) {
{
std::multimap<int, int, std::less<int>, std::allocator<int>> s1;
std::multimap<int, int, std::less<int>, test_allocator<int>> s2;
- // expected-error-re@*:* {{static assertion failed due to requirement 'is_same<int, std::pair<const int, int>>::value'{{.*}}Allocator::value_type must be same type as value_type}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Allocator::value_type must be same type as value_type}}
s1 <=> s2;
- // expected-error-re@*:* {{static assertion failed due to requirement 'is_same<int, std::pair<const int, int>>::value'{{.*}}Allocator::value_type must be same type as value_type}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}Allocator::value_type must be same type as value_type}}
s2 <=> s1;
}
// Mismatching comparision functions
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.ops/count0.pass.cpp b/libcxx/test/std/containers/associative/multimap/multimap.ops/count0.pass.cpp
index 233d1a1..36f0ac2 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.ops/count0.pass.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.ops/count0.pass.cpp
@@ -33,6 +33,10 @@ int main(int, char**) {
typedef std::multimap<int, double, transparent_less_not_referenceable> M;
assert(M().count(C2Int{5}) == 0);
}
+ {
+ using M = std::multimap<int, double, transparent_less_nonempty>;
+ assert(M().count(C2Int{5}) == 0);
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.ops/equal_range0.pass.cpp b/libcxx/test/std/containers/associative/multimap/multimap.ops/equal_range0.pass.cpp
index 0bead6c..a362c03 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.ops/equal_range0.pass.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.ops/equal_range0.pass.cpp
@@ -40,6 +40,13 @@ int main(int, char**) {
P result = example.equal_range(C2Int{5});
assert(result.first == result.second);
}
+ {
+ using M = std::multimap<int, double, transparent_less_nonempty>;
+ using P = std::pair<typename M::iterator, typename M::iterator>;
+ M example;
+ P result = example.equal_range(C2Int{5});
+ assert(result.first == result.second);
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.ops/find0.pass.cpp b/libcxx/test/std/containers/associative/multimap/multimap.ops/find0.pass.cpp
index 701d4e3..ccb0900 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.ops/find0.pass.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.ops/find0.pass.cpp
@@ -36,6 +36,11 @@ int main(int, char**) {
M example;
assert(example.find(C2Int{5}) == example.end());
}
+ {
+ using M = std::multimap<int, double, transparent_less_nonempty>;
+ M example;
+ assert(example.find(C2Int{5}) == example.end());
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.ops/lower_bound0.pass.cpp b/libcxx/test/std/containers/associative/multimap/multimap.ops/lower_bound0.pass.cpp
index 79f9948..4b48530 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.ops/lower_bound0.pass.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.ops/lower_bound0.pass.cpp
@@ -36,6 +36,11 @@ int main(int, char**) {
M example;
assert(example.lower_bound(C2Int{5}) == example.end());
}
+ {
+ using M = std::multimap<int, double, transparent_less_nonempty>;
+ M example;
+ assert(example.lower_bound(C2Int{5}) == example.end());
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.ops/upper_bound0.pass.cpp b/libcxx/test/std/containers/associative/multimap/multimap.ops/upper_bound0.pass.cpp
index 62f5241..f2ae945 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.ops/upper_bound0.pass.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.ops/upper_bound0.pass.cpp
@@ -36,6 +36,11 @@ int main(int, char**) {
M example;
assert(example.upper_bound(C2Int{5}) == example.end());
}
+ {
+ using M = std::multimap<int, double, transparent_less_nonempty>;
+ M example;
+ assert(example.upper_bound(C2Int{5}) == example.end());
+ }
return 0;
}
diff --git a/libcxx/test/std/containers/sequences/array/array.overview/nttp.verify.cpp b/libcxx/test/std/containers/sequences/array/array.overview/nttp.verify.cpp
index f50febf..e268dd9 100644
--- a/libcxx/test/std/containers/sequences/array/array.overview/nttp.verify.cpp
+++ b/libcxx/test/std/containers/sequences/array/array.overview/nttp.verify.cpp
@@ -45,7 +45,7 @@ using E = test<NotALiteral, 1, std::array<NotALiteral, 1>{}>;
// expected-error-re@*:* {{non-type template parameter has non-literal type 'std::array<NotALiteral, 1U{{L{0,2}.*}}>'}}
using F = test<std::string, 2, std::array<std::string, 2>{}>;
-// expected-error-re@*:* {{type 'std::array<{{(std::)?}}string, 2U{{L{0,2}.*}}>' {{(\(aka 'array<basic_string<char>, 2UL{0,2}>'\) )?}}of non-type template parameter is not a structural type}}
+// expected-error-re@*:* {{type {{.+}} of non-type template parameter is not a structural type}}
} // namespace test_full_type
namespace test_ctad {
@@ -77,5 +77,5 @@ using E = test<std::array<NotALiteral, 1>{}>;
// expected-error@-1 {{non-type template parameter has non-literal type 'std::array<NotALiteral, 1>'}}
using F = test<std::array<std::string, 2>{}>;
-// expected-error@-1 {{type 'std::array<std::string, 2>' (aka 'array<basic_string<char>, 2>') of non-type template parameter is not a structural type}}
+// expected-error-re@-1 {{type {{.+}} of non-type template parameter is not a structural type}}
} // namespace test_auto
diff --git a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/params.verify.cpp b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/params.verify.cpp
index bc9b563..0e03161 100644
--- a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/params.verify.cpp
+++ b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/params.verify.cpp
@@ -24,7 +24,7 @@ int main(int, char**)
// expected-error-re@*:* {{static assertion failed due to requirement '1ULL == 0 || 1ULL < 1ULL'{{.*}}linear_congruential_engine invalid parameters}}
std::linear_congruential_engine<T, 0, 1, 1> e3;
std::linear_congruential_engine<T, 1, 0, 1> e4;
- // expected-error-re@*:* {{static assertion failed due to requirement 'is_unsigned<int>::value'{{.*}}_UIntType must be unsigned type}}
+ // expected-error-re@*:* {{static assertion failed due to requirement {{.+}}_UIntType must be unsigned type}}
std::linear_congruential_engine<int, 0, 0, 0> e5;
return 0;
diff --git a/libcxx/test/std/thread/futures/futures.task/futures.task.members/ctor1.verify.cpp b/libcxx/test/std/thread/futures/futures.task/futures.task.members/ctor1.verify.cpp
index 3c61c76..efa1316 100644
--- a/libcxx/test/std/thread/futures/futures.task/futures.task.members/ctor1.verify.cpp
+++ b/libcxx/test/std/thread/futures/futures.task/futures.task.members/ctor1.verify.cpp
@@ -31,7 +31,7 @@ int main(int, char**)
auto const& c_init = init;
PT p1{init}; // expected-error {{no matching constructor}}
PT p2{c_init}; // expected-error {{no matching constructor}}
- PT p3{std::move(init)}; // expected-error {{no matching constructor for initialization of 'PT' (aka 'packaged_task<A (int, char)>')}}
+ PT p3{std::move(init)}; // expected-error-re {{no matching constructor for initialization of 'PT' (aka {{.+}})}}
- return 0;
+ return 0;
}
diff --git a/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp b/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp
index fa9f5a1..13c0da2 100644
--- a/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp
+++ b/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp
@@ -14,6 +14,7 @@
// Constraints:
// - is_same_v<remove_cvref_t<U>, in_place_t> is false; and
// - is_same_v<expected, remove_cvref_t<U>> is false; and
+// - is_same_v<remove_cvref_t<U>, unexpect_t> is false; and
// - remove_cvref_t<U> is not a specialization of unexpected; and
// - is_constructible_v<T, U> is true.
//
@@ -46,6 +47,16 @@ static_assert(!std::is_constructible_v<std::expected<FromJustInplace, int>, std:
// Note that result is true because it is covered by the constructors that take expected
static_assert(std::is_constructible_v<std::expected<int, int>, std::expected<int, int>&>);
+struct T {
+ explicit T(auto) {}
+};
+struct E {
+ E(int) {}
+};
+
+// is_same_v<remove_cvref_t<U>, unexpect_t> is false;
+static_assert(!std::is_constructible_v<std::expected<T, E>, std::unexpect_t>);
+
// remove_cvref_t<U> is a specialization of unexpected
// Note that result is true because it is covered by the constructors that take unexpected
static_assert(std::is_constructible_v<std::expected<int, int>, std::unexpected<int>&>);
diff --git a/libcxx/test/std/utilities/utility/pairs/pairs.pair/nttp.verify.cpp b/libcxx/test/std/utilities/utility/pairs/pairs.pair/nttp.verify.cpp
index 9a39c18..f440b95 100644
--- a/libcxx/test/std/utilities/utility/pairs/pairs.pair/nttp.verify.cpp
+++ b/libcxx/test/std/utilities/utility/pairs/pairs.pair/nttp.verify.cpp
@@ -74,7 +74,7 @@ using H = test<std::pair<NotALiteral, NotALiteral>{}>;
// expected-error@-1 {{non-type template parameter has non-literal type 'std::pair<NotALiteral, NotALiteral>'}}
using I = test<std::pair<std::string, std::string>{}>;
-// expected-error-re@-1 {{type 'std::pair<{{(std::)?}}string, {{(std::)?}}string>'{{( \(aka 'std::pair<std::string, std::string>'\))?}} of non-type template parameter is not a structural type}}
+// expected-error-re@*:* {{type {{.+}} of non-type template parameter is not a structural type}}
} // namespace test_ctad
namespace test_auto {
@@ -95,5 +95,5 @@ using H = test<std::pair<NotALiteral, NotALiteral>{}>;
// expected-error@-1 {{non-type template parameter has non-literal type 'std::pair<NotALiteral, NotALiteral>'}}
using I = test<std::pair<std::string, std::string>{}>;
-// expected-error@-1 {{type 'std::pair<std::string, std::string>' (aka 'pair<basic_string<char>, basic_string<char>>') of non-type template parameter is not a structural type}}
+// expected-error-re@-1 {{type {{.+}} of non-type template parameter is not a structural type}}
} // namespace test_auto
diff --git a/libcxx/test/support/is_transparent.h b/libcxx/test/support/is_transparent.h
index 700c894..4b2a458 100644
--- a/libcxx/test/support/is_transparent.h
+++ b/libcxx/test/support/is_transparent.h
@@ -36,6 +36,17 @@ struct transparent_less_not_referenceable
using is_transparent = void () const &; // it's a type; a weird one, but a type
};
+// Prevent regression when empty base class optimization is not suitable.
+// See https://github.com/llvm/llvm-project/issues/152543.
+struct transparent_less_nonempty {
+ template <class T, class U>
+ constexpr bool operator()(T&& t, U&& u) const {
+ return std::forward<T>(t) < std::forward<U>(u);
+ }
+ struct is_transparent {
+ } pad_; // making this comparator non-empty
+};
+
struct transparent_less_no_type
{
template <class T, class U>
diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 8802c8c..838ca4d 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -1396,9 +1396,6 @@ void LoongArch::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
// change in section sizes can have cascading effect and require another
// relaxation pass.
bool LoongArch::relaxOnce(int pass) const {
- if (ctx.arg.relocatable)
- return false;
-
if (pass == 0)
initSymbolAnchors(ctx);
diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 72d8315..71201b0 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -45,7 +45,21 @@ public:
uint64_t val) const override;
void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
bool relaxOnce(int pass) const override;
+ template <class ELFT, class RelTy>
+ bool synthesizeAlignForInput(uint64_t &dot, InputSection *sec,
+ Relocs<RelTy> rels);
+ template <class ELFT, class RelTy>
+ void finalizeSynthesizeAligns(uint64_t &dot, InputSection *sec,
+ Relocs<RelTy> rels);
+ template <class ELFT>
+ bool synthesizeAlignAux(uint64_t &dot, InputSection *sec);
+ bool synthesizeAlign(uint64_t &dot, InputSection *sec) override;
void finalizeRelax(int passes) const override;
+
+ // The following two variables are used by synthesized ALIGN relocations.
+ InputSection *baseSec = nullptr;
+ // r_offset and r_addend pairs.
+ SmallVector<std::pair<uint64_t, uint64_t>, 0> synthesizedAligns;
};
} // end anonymous namespace
@@ -942,9 +956,6 @@ static bool relax(Ctx &ctx, int pass, InputSection &sec) {
// relaxation pass.
bool RISCV::relaxOnce(int pass) const {
llvm::TimeTraceScope timeScope("RISC-V relaxOnce");
- if (ctx.arg.relocatable)
- return false;
-
if (pass == 0)
initSymbolAnchors(ctx);
@@ -959,6 +970,116 @@ bool RISCV::relaxOnce(int pass) const {
return changed;
}
+// If the section alignment is >= 4, advance `dot` to insert NOPs and synthesize
+// an ALIGN relocation. Otherwise, return false to use default handling.
+template <class ELFT, class RelTy>
+bool RISCV::synthesizeAlignForInput(uint64_t &dot, InputSection *sec,
+ Relocs<RelTy> rels) {
+ if (!baseSec) {
+ // Record the first input section with RELAX relocations. We will synthesize
+ // ALIGN relocations here.
+ for (auto rel : rels) {
+ if (rel.getType(false) == R_RISCV_RELAX) {
+ baseSec = sec;
+ break;
+ }
+ }
+ } else if (sec->addralign >= 4) {
+ // If the alignment is >= 4 and the section does not start with an ALIGN
+ // relocation, synthesize one.
+ bool hasAlignRel = llvm::any_of(rels, [](const RelTy &rel) {
+ return rel.r_offset == 0 && rel.getType(false) == R_RISCV_ALIGN;
+ });
+ if (!hasAlignRel) {
+ synthesizedAligns.emplace_back(dot - baseSec->getVA(),
+ sec->addralign - 2);
+ dot += sec->addralign - 2;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Finalize the relocation section by appending synthesized ALIGN relocations
+// after processing all input sections.
+template <class ELFT, class RelTy>
+void RISCV::finalizeSynthesizeAligns(uint64_t &dot, InputSection *sec,
+ Relocs<RelTy> rels) {
+ auto *f = cast<ObjFile<ELFT>>(baseSec->file);
+ auto shdr = f->template getELFShdrs<ELFT>()[baseSec->relSecIdx];
+ // Create a copy of InputSection.
+ sec = make<InputSection>(*f, shdr, baseSec->name);
+ auto *baseRelSec = cast<InputSection>(f->getSections()[baseSec->relSecIdx]);
+ *sec = *baseRelSec;
+ baseSec = nullptr;
+
+ // Allocate buffer for original and synthesized relocations in RELA format.
+ // If CREL is used, OutputSection::finalizeNonAllocCrel will convert RELA to
+ // CREL.
+ auto newSize = rels.size() + synthesizedAligns.size();
+ auto *relas = makeThreadLocalN<typename ELFT::Rela>(newSize);
+ sec->size = newSize * sizeof(typename ELFT::Rela);
+ sec->content_ = reinterpret_cast<uint8_t *>(relas);
+ sec->type = SHT_RELA;
+ // Copy original relocations to the new buffer, potentially converting CREL to
+ // RELA.
+ for (auto [i, r] : llvm::enumerate(rels)) {
+ relas[i].r_offset = r.r_offset;
+ relas[i].setSymbolAndType(r.getSymbol(0), r.getType(0), false);
+ if constexpr (RelTy::HasAddend)
+ relas[i].r_addend = r.r_addend;
+ }
+ // Append synthesized ALIGN relocations to the buffer.
+ for (auto [i, r] : llvm::enumerate(synthesizedAligns)) {
+ auto &rela = relas[rels.size() + i];
+ rela.r_offset = r.first;
+ rela.setSymbolAndType(0, R_RISCV_ALIGN, false);
+ rela.r_addend = r.second;
+ }
+ // Replace the old relocation section with the new one in the output section.
+ // addOrphanSections ensures that the output relocation section is processed
+ // after osec.
+ for (SectionCommand *cmd : sec->getParent()->commands) {
+ auto *isd = dyn_cast<InputSectionDescription>(cmd);
+ if (!isd)
+ continue;
+ for (auto *&isec : isd->sections)
+ if (isec == baseRelSec)
+ isec = sec;
+ }
+}
+
+template <class ELFT>
+bool RISCV::synthesizeAlignAux(uint64_t &dot, InputSection *sec) {
+ bool ret = false;
+ if (sec) {
+ invokeOnRelocs(*sec, ret = synthesizeAlignForInput<ELFT>, dot, sec);
+ } else if (baseSec) {
+ invokeOnRelocs(*baseSec, finalizeSynthesizeAligns<ELFT>, dot, sec);
+ }
+ return ret;
+}
+
+// Without linker relaxation enabled for a particular relocatable file or
+// section, the assembler will not generate R_RISCV_ALIGN relocations for
+// alignment directives. This becomes problematic in a two-stage linking
+// process: ld -r a.o b.o -o ab.o; ld ab.o -o ab. This function synthesizes an
+// R_RISCV_ALIGN relocation at section start when needed.
+//
+// When called with an input section (`sec` is not null): If the section
+// alignment is >= 4, advance `dot` to insert NOPs and synthesize an ALIGN
+// relocation.
+//
+// When called after all input sections are processed (`sec` is null): The
+// output relocation section is updated with all the newly synthesized ALIGN
+// relocations.
+bool RISCV::synthesizeAlign(uint64_t &dot, InputSection *sec) {
+ assert(ctx.arg.relocatable);
+ if (ctx.arg.is64)
+ return synthesizeAlignAux<ELF64LE>(dot, sec);
+ return synthesizeAlignAux<ELF32LE>(dot, sec);
+}
+
void RISCV::finalizeRelax(int passes) const {
llvm::TimeTraceScope timeScope("Finalize RISC-V relaxation");
Log(ctx) << "relaxation passes: " << passes;
diff --git a/lld/ELF/BPSectionOrderer.cpp b/lld/ELF/BPSectionOrderer.cpp
index 0615204..38f15b9 100644
--- a/lld/ELF/BPSectionOrderer.cpp
+++ b/lld/ELF/BPSectionOrderer.cpp
@@ -76,10 +76,12 @@ DenseMap<const InputSectionBase *, int> elf::runBalancedPartitioning(
if (!d)
return;
auto *sec = dyn_cast_or_null<InputSection>(d->section);
- // Skip empty, discarded, ICF folded sections, .bss. Skipping ICF folded
- // sections reduces duplicate detection work in BPSectionOrderer.
- if (!sec || sec->size == 0 || !sec->isLive() || sec->repl != sec ||
- !sec->content().data() || !orderer.secToSym.try_emplace(sec, d).second)
+ // Skip section symbols. Skip empty, discarded, ICF folded sections, .bss.
+ // Skipping ICF folded sections reduces duplicate detection work in
+ // BPSectionOrderer.
+ if (sym.isSection() || !sec || sec->size == 0 || !sec->isLive() ||
+ sec->repl != sec || !sec->content().data() ||
+ !orderer.secToSym.try_emplace(sec, d).second)
return;
rootSymbolToSectionIdxs[CachedHashStringRef(
lld::utils::getRootSymbol(sym.getName()))]
diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp
index a5d08f4..e9ce4e2 100644
--- a/lld/ELF/LinkerScript.cpp
+++ b/lld/ELF/LinkerScript.cpp
@@ -1230,6 +1230,9 @@ bool LinkerScript::assignOffsets(OutputSection *sec) {
if (sec->firstInOverlay)
state->overlaySize = 0;
+ bool synthesizeAlign = ctx.arg.relocatable && ctx.arg.relax &&
+ (sec->flags & SHF_EXECINSTR) &&
+ ctx.arg.emachine == EM_RISCV;
// We visited SectionsCommands from processSectionCommands to
// layout sections. Now, we visit SectionsCommands again to fix
// section offsets.
@@ -1260,7 +1263,10 @@ bool LinkerScript::assignOffsets(OutputSection *sec) {
if (isa<PotentialSpillSection>(isec))
continue;
const uint64_t pos = dot;
- dot = alignToPowerOf2(dot, isec->addralign);
+ // If synthesized ALIGN may be needed, call maybeSynthesizeAlign and
+ // disable the default handling if the return value is true.
+ if (!(synthesizeAlign && ctx.target->synthesizeAlign(dot, isec)))
+ dot = alignToPowerOf2(dot, isec->addralign);
isec->outSecOff = dot - sec->addr;
dot += isec->getSize();
@@ -1276,6 +1282,12 @@ bool LinkerScript::assignOffsets(OutputSection *sec) {
if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent())
expandOutputSection(alignToPowerOf2(dot, ctx.arg.commonPageSize) - dot);
+ if (synthesizeAlign) {
+ const uint64_t pos = dot;
+ ctx.target->synthesizeAlign(dot, nullptr);
+ expandOutputSection(dot - pos);
+ }
+
// Non-SHF_ALLOC sections do not affect the addresses of other OutputSections
// as they are not part of the process image.
if (!(sec->flags & SHF_ALLOC)) {
diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp
index 1020dd9..0b50f6d 100644
--- a/lld/ELF/OutputSections.cpp
+++ b/lld/ELF/OutputSections.cpp
@@ -889,9 +889,17 @@ void OutputSection::sortInitFini() {
std::array<uint8_t, 4> OutputSection::getFiller(Ctx &ctx) {
if (filler)
return *filler;
- if (flags & SHF_EXECINSTR)
- return ctx.target->trapInstr;
- return {0, 0, 0, 0};
+ if (!(flags & SHF_EXECINSTR))
+ return {0, 0, 0, 0};
+ if (ctx.arg.relocatable && ctx.arg.emachine == EM_RISCV) {
+ // See RISCV::maybeSynthesizeAlign: Synthesized NOP bytes and ALIGN
+ // relocations might be needed between two input sections. Use a NOP for the
+ // filler.
+ if (ctx.arg.eflags & EF_RISCV_RVC)
+ return {1, 0, 1, 0};
+ return {0x13, 0, 0, 0};
+ }
+ return ctx.target->trapInstr;
}
void OutputSection::checkDynRelAddends(Ctx &ctx) {
diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h
index fdc0c20..e121fa1 100644
--- a/lld/ELF/Target.h
+++ b/lld/ELF/Target.h
@@ -96,6 +96,9 @@ public:
// Do a linker relaxation pass and return true if we changed something.
virtual bool relaxOnce(int pass) const { return false; }
+ virtual bool synthesizeAlign(uint64_t &dot, InputSection *sec) {
+ return false;
+ }
// Do finalize relaxation after collecting relaxation infos.
virtual void finalizeRelax(int passes) const {}
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 2b0e097..4fa8039 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -1541,8 +1541,10 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
if (ctx.arg.randomizeSectionPadding)
randomizeSectionPadding(ctx);
+ // Iterate until a fixed point is reached, skipping relocatable links since
+ // the final addresses are unavailable.
uint32_t pass = 0, assignPasses = 0;
- for (;;) {
+ while (!ctx.arg.relocatable) {
bool changed = ctx.target->needsThunks
? tc.createThunks(pass, ctx.outputSections)
: ctx.target->relaxOnce(pass);
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index 9eb391c..f11c65a 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -1635,27 +1635,21 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
config->osoPrefix = args.getLastArgValue(OPT_oso_prefix);
if (!config->osoPrefix.empty()) {
- // Expand special characters, such as ".", "..", or "~", if present.
- // Note: LD64 only expands "." and not other special characters.
- // That seems silly to imitate so we will not try to follow it, but rather
- // just use real_path() to do it.
-
// The max path length is 4096, in theory. However that seems quite long
// and seems unlikely that any one would want to strip everything from the
// path. Hence we've picked a reasonably large number here.
SmallString<1024> expanded;
- if (!fs::real_path(config->osoPrefix, expanded,
- /*expand_tilde=*/true)) {
- // Note: LD64 expands "." to be `<current_dir>/`
- // (ie., it has a slash suffix) whereas real_path() doesn't.
- // So we have to append '/' to be consistent.
- StringRef sep = sys::path::get_separator();
- // real_path removes trailing slashes as part of the normalization, but
- // these are meaningful for our text based stripping
- if (config->osoPrefix == "." || config->osoPrefix.ends_with(sep))
- expanded += sep;
- config->osoPrefix = saver().save(expanded.str());
+ // Expand "." into the current working directory.
+ if (config->osoPrefix == "." && !fs::current_path(expanded)) {
+ // Note: LD64 expands "." to be `<current_dir>/
+ // (ie., it has a slash suffix) whereas current_path() doesn't.
+ // So we have to append '/' to be consistent because this is
+ // meaningful for our text based stripping.
+ expanded += sys::path::get_separator();
+ } else {
+ expanded = config->osoPrefix;
}
+ config->osoPrefix = saver().save(expanded.str());
}
bool pie = args.hasFlag(OPT_pie, OPT_no_pie, true);
diff --git a/lld/test/ELF/bp-section-orderer.s b/lld/test/ELF/bp-section-orderer.s
index 438d7c2..9a83c70 100644
--- a/lld/test/ELF/bp-section-orderer.s
+++ b/lld/test/ELF/bp-section-orderer.s
@@ -21,38 +21,38 @@
# RUN: llvm-profdata merge a.proftext -o a.profdata
# RUN: ld.lld a.o --irpgo-profile=a.profdata --bp-startup-sort=function --verbose-bp-section-orderer --icf=all --gc-sections 2>&1 | FileCheck %s --check-prefix=STARTUP-FUNC-ORDER
-# STARTUP-FUNC-ORDER: Ordered 3 sections ([[#]] bytes) using balanced partitioning
-# STARTUP-FUNC-ORDER: Total area under the page fault curve: 3.
+# STARTUP-FUNC-ORDER: Ordered 4 sections ([[#]] bytes) using balanced partitioning
+# STARTUP-FUNC-ORDER: Total area under the page fault curve: 4.
# RUN: ld.lld -o out.s a.o --irpgo-profile=a.profdata --bp-startup-sort=function
# RUN: llvm-nm -jn out.s | tr '\n' , | FileCheck %s --check-prefix=STARTUP
-# STARTUP: s5,s4,s3,s2,s1,A,B,C,F,E,D,merged1,merged2,_start,d4,d3,d2,d1,g1,{{$}}
+# STARTUP: s5,s4,s3,s2,s1,A,B,C,L1,F,E,D,merged1,merged2,G,_start,d4,d3,d2,d1,g1,{{$}}
# RUN: ld.lld -o out.os a.o --irpgo-profile=a.profdata --bp-startup-sort=function --symbol-ordering-file a.txt
# RUN: llvm-nm -jn out.os | tr '\n' , | FileCheck %s --check-prefix=ORDER-STARTUP
-# ORDER-STARTUP: s2,s1,s5,s4,s3,A,F,E,D,B,C,merged1,merged2,_start,d3,d2,d4,d1,g1,{{$}}
+# ORDER-STARTUP: s2,s1,s5,s4,s3,A,F,E,D,B,C,L1,merged1,merged2,G,_start,d3,d2,d4,d1,g1,{{$}}
# RUN: ld.lld -o out.cf a.o --verbose-bp-section-orderer --bp-compression-sort=function 2>&1 | FileCheck %s --check-prefix=BP-COMPRESSION-FUNC
# RUN: ld.lld -o out.cf.icf a.o --verbose-bp-section-orderer --bp-compression-sort=function --icf=all --gc-sections 2>&1 | FileCheck %s --check-prefix=BP-COMPRESSION-ICF-FUNC
# RUN: llvm-nm -jn out.cf | tr '\n' , | FileCheck %s --check-prefix=CFUNC
-# CFUNC: s5,s4,s3,s2,s1,A,F,merged1,merged2,C,E,D,B,_start,d4,d3,d2,d1,g1,{{$}}
+# CFUNC: s5,s4,s3,s2,s1,C,E,D,B,G,F,merged1,merged2,A,_start,L1,d4,d3,d2,d1,g1,{{$}}
# RUN: ld.lld -o out.cd a.o --verbose-bp-section-orderer --bp-compression-sort=data 2>&1 | FileCheck %s --check-prefix=BP-COMPRESSION-DATA
# RUN: llvm-nm -jn out.cd | tr '\n' , | FileCheck %s --check-prefix=CDATA
-# CDATA: s5,s3,s4,s2,s1,F,C,E,D,B,A,merged1,merged2,_start,d4,d1,d3,d2,g1,{{$}}
+# CDATA: s5,s3,s4,s2,s1,F,C,E,D,B,A,merged1,merged2,L1,G,_start,d4,d1,d3,d2,g1,{{$}}
# RUN: ld.lld -o out.cb a.o --verbose-bp-section-orderer --bp-compression-sort=both 2>&1 | FileCheck %s --check-prefix=BP-COMPRESSION-BOTH
# RUN: llvm-nm -jn out.cb | tr '\n' , | FileCheck %s --check-prefix=CBOTH
-# CBOTH: s5,s3,s4,s2,s1,A,F,merged1,merged2,C,E,D,B,_start,d4,d1,d3,d2,g1,{{$}}
+# CBOTH: s5,s3,s4,s2,s1,C,E,D,B,G,F,merged1,merged2,A,_start,L1,d4,d1,d3,d2,g1,{{$}}
# RUN: ld.lld -o out.cbs a.o --verbose-bp-section-orderer --bp-compression-sort=both --irpgo-profile=a.profdata --bp-startup-sort=function 2>&1 | FileCheck %s --check-prefix=BP-COMPRESSION-BOTH
# RUN: llvm-nm -jn out.cbs | tr '\n' , | FileCheck %s --check-prefix=CBOTH-STARTUP
-# CBOTH-STARTUP: s5,s3,s4,s2,s1,A,B,C,F,E,D,merged1,merged2,_start,d4,d1,d3,d2,g1,{{$}}
+# CBOTH-STARTUP: s5,s3,s4,s2,s1,A,B,C,L1,F,E,D,merged1,merged2,G,_start,d4,d1,d3,d2,g1,{{$}}
-# BP-COMPRESSION-FUNC: Ordered 9 sections ([[#]] bytes) using balanced partitioning
-# BP-COMPRESSION-ICF-FUNC: Ordered 8 sections ([[#]] bytes) using balanced partitioning
+# BP-COMPRESSION-FUNC: Ordered 11 sections ([[#]] bytes) using balanced partitioning
+# BP-COMPRESSION-ICF-FUNC: Ordered 9 sections ([[#]] bytes) using balanced partitioning
# BP-COMPRESSION-DATA: Ordered 9 sections ([[#]] bytes) using balanced partitioning
-# BP-COMPRESSION-BOTH: Ordered 18 sections ([[#]] bytes) using balanced partitioning
+# BP-COMPRESSION-BOTH: Ordered 20 sections ([[#]] bytes) using balanced partitioning
#--- a.proftext
:ir
@@ -63,7 +63,7 @@
1
# Weight
1
-A, B, C
+A, B, C, L1
A
# Func Hash:
@@ -97,6 +97,14 @@ D
# Counter Values:
1
+L1
+# Func Hash:
+5555
+# Num Counters:
+1
+# Counter Values:
+1
+
#--- a.txt
A
F
@@ -137,6 +145,9 @@ void A() {}
RETAIN int merged1(int a) { return F(a + 101); }
int merged2(int a) { return F(a + 101); }
+RETAIN static int L1(int a) { return a + 103; }
+int G(int a) { return L1(a); }
+
int _start() { return 0; }
#--- gen
@@ -148,7 +159,7 @@ clang --target=aarch64-linux-gnu -O0 -ffunction-sections -fdata-sections -fno-as
.p2align 2
.type F,@function
F: // @F
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -167,7 +178,7 @@ F: // @F
.p2align 2
.type C,@function
C: // @C
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -186,7 +197,7 @@ C: // @C
.p2align 2
.type E,@function
E: // @E
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -205,7 +216,7 @@ E: // @E
.p2align 2
.type D,@function
D: // @D
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -224,7 +235,7 @@ D: // @D
.p2align 2
.type B,@function
B: // @B
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -243,7 +254,7 @@ B: // @B
.p2align 2
.type A,@function
A: // @A
-// %bb.0: // %entry
+// %bb.0:
ret
.Lfunc_end5:
.size A, .Lfunc_end5-A
@@ -253,7 +264,7 @@ A: // @A
.p2align 2
.type merged1,@function
merged1: // @merged1
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -272,7 +283,7 @@ merged1: // @merged1
.p2align 2
.type merged2,@function
merged2: // @merged2
-// %bb.0: // %entry
+// %bb.0:
sub sp, sp, #32
stp x29, x30, [sp, #16] // 16-byte Folded Spill
add x29, sp, #16
@@ -286,16 +297,48 @@ merged2: // @merged2
.Lfunc_end7:
.size merged2, .Lfunc_end7-merged2
// -- End function
+ .section .text.L1,"axR",@progbits
+ .p2align 2 // -- Begin function L1
+ .type L1,@function
+L1: // @L1
+// %bb.0:
+ sub sp, sp, #16
+ str w0, [sp, #12]
+ ldr w8, [sp, #12]
+ add w0, w8, #103
+ add sp, sp, #16
+ ret
+.Lfunc_end8:
+ .size L1, .Lfunc_end8-L1
+ // -- End function
+ .section .text.G,"ax",@progbits
+ .globl G // -- Begin function G
+ .p2align 2
+ .type G,@function
+G: // @G
+// %bb.0:
+ sub sp, sp, #32
+ stp x29, x30, [sp, #16] // 16-byte Folded Spill
+ add x29, sp, #16
+ stur w0, [x29, #-4]
+ ldur w0, [x29, #-4]
+ bl L1
+ ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+ add sp, sp, #32
+ ret
+.Lfunc_end9:
+ .size G, .Lfunc_end9-G
+ // -- End function
.section .text._start,"ax",@progbits
.globl _start // -- Begin function _start
.p2align 2
.type _start,@function
_start: // @_start
-// %bb.0: // %entry
+// %bb.0:
mov w0, wzr
ret
-.Lfunc_end8:
- .size _start, .Lfunc_end8-_start
+.Lfunc_end10:
+ .size _start, .Lfunc_end10-_start
// -- End function
.type s5,@object // @s5
.section .rodata.s5,"a",@progbits
@@ -395,3 +438,4 @@ g1:
.addrsig_sym B
.addrsig_sym A
.addrsig_sym merged1
+ .addrsig_sym L1
diff --git a/lld/test/ELF/riscv-relax-align.s b/lld/test/ELF/riscv-relax-align.s
index 3efc774f..b3db2c94 100644
--- a/lld/test/ELF/riscv-relax-align.s
+++ b/lld/test/ELF/riscv-relax-align.s
@@ -10,7 +10,7 @@
# RUN: ld.lld -Ttext=0x10000 --no-relax 32.o -o 32.norelax
# RUN: llvm-objdump -td --no-show-raw-insn -M no-aliases 32.norelax | FileCheck %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax %s -o 64.o
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax %s -riscv-align-rvc=0 -o 64.o
# RUN: ld.lld -Ttext=0x10000 64.o -o 64
# RUN: llvm-objdump -td --no-show-raw-insn -M no-aliases 64 | FileCheck %s
# RUN: ld.lld -Ttext=0x10000 --no-relax 64.o -o 64.norelax
@@ -29,7 +29,7 @@
# CHECK-DAG: 00010000 g .text {{0*}}38 _start
# CHECK: <_start>:
-# CHECK-NEXT: addi a0, a0, 0x1
+# CHECK-NEXT: lui a0, 0x10
# CHECK-EMPTY:
# CHECK-NEXT: <a>:
# CHECK-NEXT: addi a0, a0, 0x2
@@ -82,7 +82,9 @@
# GC-NOT: <d>:
# CHECKR: <_start>:
-# CHECKR-NEXT: addi a0, a0, 0x1
+# CHECKR-NEXT: lui a0, 0x0
+# CHECKR-NEXT: 0000000000000000: R_RISCV_HI20 _start
+# CHECKR-NEXT: 0000000000000000: R_RISCV_RELAX *ABS*
# CHECKR-EMPTY:
# CHECKR-NEXT: <a>:
# CHECKR-NEXT: addi a0, a0, 0x2
@@ -116,7 +118,7 @@
.global _start
_start:
- addi a0, a0, 0x1
+ lui a0, %hi(_start)
a:
addi a0, a0, 0x2
b:
diff --git a/lld/test/ELF/riscv-relax-emit-relocs.s b/lld/test/ELF/riscv-relax-emit-relocs.s
index 26ea0d1..358a288 100644
--- a/lld/test/ELF/riscv-relax-emit-relocs.s
+++ b/lld/test/ELF/riscv-relax-emit-relocs.s
@@ -3,11 +3,11 @@
# RUN: rm -rf %t && mkdir %t && cd %t
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+relax %s -o 32.o
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+relax %s -o 32.o -riscv-align-rvc=0
# RUN: ld.lld -Ttext=0x10000 --emit-relocs 32.o -o 32
# RUN: llvm-objdump -dr --no-show-raw-insn -M no-aliases 32 | FileCheck %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax %s -o 64.o
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax %s -o 64.o -riscv-align-rvc=0
# RUN: ld.lld -Ttext=0x10000 --emit-relocs 64.o -o 64
# RUN: llvm-objdump -dr --no-show-raw-insn -M no-aliases 64 | FileCheck %s
diff --git a/lld/test/ELF/riscv-relocatable-align.s b/lld/test/ELF/riscv-relocatable-align.s
new file mode 100644
index 0000000..4ba932c
--- /dev/null
+++ b/lld/test/ELF/riscv-relocatable-align.s
@@ -0,0 +1,131 @@
+# REQUIRES: riscv
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax a.s -o ac.o
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax b.s -o bc.o
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax b1.s -o b1c.o
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax c.s -o cc.o
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c d.s -o dc.o
+
+## No RELAX. Don't synthesize ALIGN.
+# RUN: ld.lld -r bc.o dc.o -o bd.ro
+# RUN: llvm-readelf -r bd.ro | FileCheck %s --check-prefix=NOREL
+
+# NOREL: no relocations
+
+# RUN: ld.lld -r bc.o bc.o ac.o bc.o b1c.o cc.o dc.o -o out.ro
+# RUN: llvm-objdump -dr -M no-aliases out.ro | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+relax a.s -o a.o
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+relax b.s -o b.o
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+relax d.s -o d.o
+# RUN: ld.lld -r a.o b.o d.o -o out0.ro
+# RUN: ld.lld -Ttext=0x10000 out0.ro -o out0
+# RUN: llvm-objdump -dr -M no-aliases out0 | FileCheck %s --check-prefix=CHECK1
+
+# CHECK: <b0>:
+# CHECK-NEXT: 0: 00158513 addi a0, a1, 0x1
+# CHECK-NEXT: 4: 0001 c.nop
+# CHECK-NEXT: 6: 0001 c.nop
+# CHECK-EMPTY:
+# CHECK-NEXT: <b0>:
+# CHECK-NEXT: 8: 00158513 addi a0, a1, 0x1
+# CHECK-EMPTY:
+# CHECK-NEXT: <_start>:
+# CHECK-NEXT: c: 00000097 auipc ra, 0x0
+# CHECK-NEXT: 000000000000000c: R_RISCV_CALL_PLT foo
+# CHECK-NEXT: 000000000000000c: R_RISCV_RELAX *ABS*
+# CHECK-NEXT: 10: 000080e7 jalr ra, 0x0(ra) <_start>
+# CHECK-NEXT: 14: 0001 c.nop
+# CHECK-NEXT: 0000000000000014: R_RISCV_ALIGN *ABS*+0x6
+# CHECK-NEXT: 16: 0001 c.nop
+# CHECK-NEXT: 18: 0001 c.nop
+# CHECK-EMPTY:
+# CHECK-NEXT: <b0>:
+# CHECK-NEXT: 1a: 00158513 addi a0, a1, 0x1
+# CHECK-NEXT: 1e: 0001 c.nop
+# CHECK-NEXT: 20: 0001 c.nop
+# CHECK-NEXT: 0000000000000020: R_RISCV_ALIGN *ABS*+0x6
+# CHECK-NEXT: 22: 0001 c.nop
+# CHECK-NEXT: 24: 00000013 addi zero, zero, 0x0
+# CHECK-EMPTY:
+# CHECK-NEXT: <b0>:
+# CHECK-NEXT: 28: 00158513 addi a0, a1, 0x1
+# CHECK-EMPTY:
+# CHECK-NEXT: <c0>:
+# CHECK-NEXT: 2c: 00000097 auipc ra, 0x0
+# CHECK-NEXT: 000000000000002c: R_RISCV_CALL_PLT foo
+# CHECK-NEXT: 000000000000002c: R_RISCV_RELAX *ABS*
+# CHECK-NEXT: 30: 000080e7 jalr ra, 0x0(ra) <c0>
+# CHECK-NEXT: 34: 0001 c.nop
+# CHECK-NEXT: 0000000000000034: R_RISCV_ALIGN *ABS*+0x2
+# CHECK-EMPTY:
+# CHECK-NEXT: <d0>:
+# CHECK-NEXT: 36: 00258513 addi a0, a1, 0x2
+
+# CHECK1: <_start>:
+# CHECK1-NEXT: 010000ef jal ra, 0x10010 <foo>
+# CHECK1-NEXT: 00000013 addi zero, zero, 0x0
+# CHECK1-EMPTY:
+# CHECK1-NEXT: <b0>:
+# CHECK1-NEXT: 00158513 addi a0, a1, 0x1
+# CHECK1-EMPTY:
+# CHECK1-NEXT: <d0>:
+# CHECK1-NEXT: 00258513 addi a0, a1, 0x2
+
+## Test CREL.
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax --crel a.s -o acrel.o
+# RUN: ld.lld -r acrel.o bc.o -o out1.ro
+# RUN: llvm-objdump -dr -M no-aliases out1.ro | FileCheck %s --check-prefix=CHECK2
+
+# CHECK2: <_start>:
+# CHECK2-NEXT: 0: 00000097 auipc ra, 0x0
+# CHECK2-NEXT: 0000000000000000: R_RISCV_CALL_PLT foo
+# CHECK2-NEXT: 0000000000000000: R_RISCV_RELAX *ABS*
+# CHECK2-NEXT: 4: 000080e7 jalr ra, 0x0(ra) <_start>
+# CHECK2-NEXT: 8: 0001 c.nop
+# CHECK2-NEXT: 0000000000000008: R_RISCV_ALIGN *ABS*+0x6
+# CHECK2-NEXT: a: 0001 c.nop
+# CHECK2-NEXT: c: 0001 c.nop
+# CHECK2-EMPTY:
+# CHECK2-NEXT: <b0>:
+# CHECK2-NEXT: e: 00158513 addi a0, a1, 0x1
+
+#--- a.s
+.globl _start
+_start:
+ call foo
+
+.section .text1,"ax"
+.globl foo
+foo:
+
+#--- b.s
+## Needs synthesized ALIGN
+.option push
+.option norelax
+.balign 8
+b0:
+ addi a0, a1, 1
+.option pop
+
+#--- b1.s
+.option push
+.option norelax
+ .reloc ., R_RISCV_ALIGN, 6
+ addi x0, x0, 0
+ c.nop
+.balign 8
+b0:
+ addi a0, a1, 1
+.option pop
+
+#--- c.s
+.balign 2
+c0:
+ call foo
+
+#--- d.s
+## Needs synthesized ALIGN
+.balign 4
+d0:
+ addi a0, a1, 2
diff --git a/lld/test/MachO/stabs.s b/lld/test/MachO/stabs.s
index 968656d..e32b9fc 100644
--- a/lld/test/MachO/stabs.s
+++ b/lld/test/MachO/stabs.s
@@ -63,9 +63,18 @@
# RUN: dsymutil -s %t/test-rel | grep 'N_OSO' | FileCheck %s -D#TEST_TIME=0x10 -D#FOO_TIME=0x20 --check-prefix=REL-PATH-NO-SLASH
# RUN: cd %t && ZERO_AR_DATE=0 %lld -lSystem test.o foo.o no-debug.o -oso_prefix "." -o %t/test-rel-dot
# RUN: dsymutil -s %t/test-rel-dot | grep 'N_OSO' | FileCheck %s -D#TEST_TIME=0x10 -D#FOO_TIME=0x20 --check-prefix=REL-DOT
-## Set HOME to %t (for ~ to expand to)
-# RUN: cd %t && env HOME=%t ZERO_AR_DATE=0 %lld -lSystem test.o foo.o no-debug.o -oso_prefix "~" -o %t/test-rel-tilde
-# RUN: dsymutil -s %t/test-rel-tilde | grep 'N_OSO' | FileCheck %s -D#TEST_TIME=0x10 -D#FOO_TIME=0x20 --check-prefix=REL-PATH
+# RUN: cd %t && ZERO_AR_DATE=0 %lld -lSystem ./test.o ./foo.o ./no-debug.o -oso_prefix "." -o %t/test-rel-dot
+# RUN: dsymutil -s %t/test-rel-dot | grep 'N_OSO' | FileCheck %s -D#TEST_TIME=0x10 -D#FOO_TIME=0x20 --check-prefix=REL-DOT-EXPLICIT
+
+## Check that symlinks are not expanded when -oso_prefix . is used.
+# RUN: mkdir -p %t/private/var/folders/tmp && ln -s private/var %t/var
+# RUN: cp %t/test.o %t/foo.o %t/no-debug.o %t/private/var/folders/tmp
+# RUN: env TZ=GMT touch -t "197001010000.16" %t/private/var/folders/tmp/test.o
+# RUN: env TZ=GMT touch -t "197001010000.32" %t/private/var/folders/tmp/foo.o
+# RUN: cd %t/var/folders/tmp && ZERO_AR_DATE=0 %lld -lSystem test.o foo.o no-debug.o -oso_prefix "." -o test-rel-symlink
+# RUN: dsymutil -s %t/private/var/folders/tmp/test-rel-symlink | grep 'N_OSO' | FileCheck %s -D#TEST_TIME=0x10 -D#FOO_TIME=0x20 --check-prefix=REL-DOT
+# RUN: cd %t/var/folders/tmp && ZERO_AR_DATE=0 %lld -lSystem ./test.o ./foo.o ./no-debug.o -oso_prefix "." -o test-rel-symlink
+# RUN: dsymutil -s %t/private/var/folders/tmp/test-rel-symlink | grep 'N_OSO' | FileCheck %s -D#TEST_TIME=0x10 -D#FOO_TIME=0x20 --check-prefix=REL-DOT-EXPLICIT
## Check that we don't emit DWARF or stabs when -S is used
# RUN: %lld -lSystem test.o foo.o no-debug.o -S -o %t/test-no-debug
@@ -91,6 +100,7 @@
# REL-PATH: (N_OSO ) 03 0001 [[#%.16x,TEST_TIME]] '/test.o'
# REL-PATH-NO-SLASH: (N_OSO ) 03 0001 [[#%.16x,TEST_TIME]] 'test.o'
# REL-DOT: (N_OSO ) 03 0001 [[#%.16x,TEST_TIME]] 'test.o'
+# REL-DOT-EXPLICIT: (N_OSO ) 03 0001 [[#%.16x,TEST_TIME]] './test.o'
# CHECK-NEXT: (N_STSYM ) [[#%.2d,MORE_DATA_ID + 1]] 0000 [[#%.16x,STATIC:]] '_static_var'
# CHECK-NEXT: (N_FUN ) [[#%.2d,TEXT_ID + 1]] 0000 [[#%.16x,MAIN:]] '_main'
# CHECK-NEXT: (N_FUN ) 00 0000 0000000000000006{{$}}
diff --git a/lldb/include/lldb/API/SBTarget.h b/lldb/include/lldb/API/SBTarget.h
index 2776a8f..22b6c63 100644
--- a/lldb/include/lldb/API/SBTarget.h
+++ b/lldb/include/lldb/API/SBTarget.h
@@ -658,6 +658,14 @@ public:
lldb::LanguageType symbol_language,
const SBFileSpecList &module_list, const SBFileSpecList &comp_unit_list);
+ lldb::SBBreakpoint BreakpointCreateByName(
+ const char *symbol_name,
+ uint32_t
+ name_type_mask, // Logical OR one or more FunctionNameType enum bits
+ lldb::LanguageType symbol_language, lldb::addr_t offset,
+ bool offset_is_insn_count, const SBFileSpecList &module_list,
+ const SBFileSpecList &comp_unit_list);
+
#ifdef SWIG
lldb::SBBreakpoint BreakpointCreateByNames(
const char **symbol_name, uint32_t num_names,
diff --git a/lldb/include/lldb/Breakpoint/BreakpointResolver.h b/lldb/include/lldb/Breakpoint/BreakpointResolver.h
index 52cd70e..243acee 100644
--- a/lldb/include/lldb/Breakpoint/BreakpointResolver.h
+++ b/lldb/include/lldb/Breakpoint/BreakpointResolver.h
@@ -45,9 +45,9 @@ public:
/// The breakpoint that owns this resolver.
/// \param[in] resolverType
/// The concrete breakpoint resolver type for this breakpoint.
- BreakpointResolver(const lldb::BreakpointSP &bkpt,
- unsigned char resolverType,
- lldb::addr_t offset = 0);
+ BreakpointResolver(const lldb::BreakpointSP &bkpt, unsigned char resolverType,
+ lldb::addr_t offset = 0,
+ bool offset_is_insn_count = false);
/// The Destructor is virtual, all significant breakpoint resolvers derive
/// from this class.
@@ -76,6 +76,7 @@ public:
void SetOffset(lldb::addr_t offset);
lldb::addr_t GetOffset() const { return m_offset; }
+ lldb::addr_t GetOffsetIsInsnCount() const { return m_offset_is_insn_count; }
/// In response to this method the resolver scans all the modules in the
/// breakpoint's target, and adds any new locations it finds.
@@ -220,6 +221,8 @@ private:
lldb::BreakpointWP m_breakpoint; // This is the breakpoint we add locations to.
lldb::addr_t m_offset; // A random offset the user asked us to add to any
// breakpoints we set.
+ bool m_offset_is_insn_count; // Use the offset as an instruction count
+ // instead of an address offset.
// Subclass identifier (for llvm isa/dyn_cast)
const unsigned char SubclassID;
diff --git a/lldb/include/lldb/Breakpoint/BreakpointResolverName.h b/lldb/include/lldb/Breakpoint/BreakpointResolverName.h
index c83814c..48b3eda 100644
--- a/lldb/include/lldb/Breakpoint/BreakpointResolverName.h
+++ b/lldb/include/lldb/Breakpoint/BreakpointResolverName.h
@@ -27,7 +27,7 @@ public:
lldb::FunctionNameType name_type_mask,
lldb::LanguageType language,
Breakpoint::MatchType type, lldb::addr_t offset,
- bool skip_prologue);
+ bool offset_is_insn_count, bool skip_prologue);
// This one takes an array of names. It is always MatchType = Exact.
BreakpointResolverName(const lldb::BreakpointSP &bkpt, const char *names[],
diff --git a/lldb/include/lldb/Core/Disassembler.h b/lldb/include/lldb/Core/Disassembler.h
index 21bacb1..50a5d87 100644
--- a/lldb/include/lldb/Core/Disassembler.h
+++ b/lldb/include/lldb/Core/Disassembler.h
@@ -291,6 +291,8 @@ public:
size_t GetSize() const;
+ size_t GetTotalByteSize() const;
+
uint32_t GetMaxOpcocdeByteSize() const;
lldb::InstructionSP GetInstructionAtIndex(size_t idx) const;
diff --git a/lldb/include/lldb/Protocol/MCP/Protocol.h b/lldb/include/lldb/Protocol/MCP/Protocol.h
index c43b068..6448416 100644
--- a/lldb/include/lldb/Protocol/MCP/Protocol.h
+++ b/lldb/include/lldb/Protocol/MCP/Protocol.h
@@ -21,7 +21,7 @@
namespace lldb_protocol::mcp {
-static llvm::StringLiteral kVersion = "2024-11-05";
+static llvm::StringLiteral kProtocolVersion = "2024-11-05";
/// A request that expects a response.
struct Request {
diff --git a/lldb/include/lldb/Protocol/MCP/Server.h b/lldb/include/lldb/Protocol/MCP/Server.h
new file mode 100644
index 0000000..2ac0588
--- /dev/null
+++ b/lldb/include/lldb/Protocol/MCP/Server.h
@@ -0,0 +1,70 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_PROTOCOL_MCP_SERVER_H
+#define LLDB_PROTOCOL_MCP_SERVER_H
+
+#include "lldb/Protocol/MCP/Protocol.h"
+#include "lldb/Protocol/MCP/Resource.h"
+#include "lldb/Protocol/MCP/Tool.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Error.h"
+#include <mutex>
+
+namespace lldb_protocol::mcp {
+
+class Server {
+public:
+ Server(std::string name, std::string version);
+ virtual ~Server() = default;
+
+ void AddTool(std::unique_ptr<Tool> tool);
+ void AddResourceProvider(std::unique_ptr<ResourceProvider> resource_provider);
+
+protected:
+ virtual Capabilities GetCapabilities() = 0;
+
+ using RequestHandler =
+ std::function<llvm::Expected<Response>(const Request &)>;
+ using NotificationHandler = std::function<void(const Notification &)>;
+
+ void AddRequestHandlers();
+
+ void AddRequestHandler(llvm::StringRef method, RequestHandler handler);
+ void AddNotificationHandler(llvm::StringRef method,
+ NotificationHandler handler);
+
+ llvm::Expected<std::optional<Message>> HandleData(llvm::StringRef data);
+
+ llvm::Expected<Response> Handle(Request request);
+ void Handle(Notification notification);
+
+ llvm::Expected<Response> InitializeHandler(const Request &);
+
+ llvm::Expected<Response> ToolsListHandler(const Request &);
+ llvm::Expected<Response> ToolsCallHandler(const Request &);
+
+ llvm::Expected<Response> ResourcesListHandler(const Request &);
+ llvm::Expected<Response> ResourcesReadHandler(const Request &);
+
+ std::mutex m_mutex;
+
+private:
+ const std::string m_name;
+ const std::string m_version;
+
+ llvm::StringMap<std::unique_ptr<Tool>> m_tools;
+ std::vector<std::unique_ptr<ResourceProvider>> m_resource_providers;
+
+ llvm::StringMap<RequestHandler> m_request_handlers;
+ llvm::StringMap<NotificationHandler> m_notification_handlers;
+};
+
+} // namespace lldb_protocol::mcp
+
+#endif
diff --git a/lldb/include/lldb/Symbol/Symbol.h b/lldb/include/lldb/Symbol/Symbol.h
index 688c8a5..0674e56 100644
--- a/lldb/include/lldb/Symbol/Symbol.h
+++ b/lldb/include/lldb/Symbol/Symbol.h
@@ -167,7 +167,7 @@ public:
lldb::SymbolType GetType() const { return (lldb::SymbolType)m_type; }
- void SetType(lldb::SymbolType type) { m_type = (lldb::SymbolType)type; }
+ void SetType(lldb::SymbolType type) { m_type = type; }
const char *GetTypeAsString() const;
diff --git a/lldb/include/lldb/Target/Target.h b/lldb/include/lldb/Target/Target.h
index 7b23c8a..14a09f2 100644
--- a/lldb/include/lldb/Target/Target.h
+++ b/lldb/include/lldb/Target/Target.h
@@ -18,6 +18,7 @@
#include "lldb/Breakpoint/BreakpointList.h"
#include "lldb/Breakpoint/BreakpointName.h"
#include "lldb/Breakpoint/WatchpointList.h"
+#include "lldb/Core/Address.h"
#include "lldb/Core/Architecture.h"
#include "lldb/Core/Disassembler.h"
#include "lldb/Core/ModuleList.h"
@@ -723,7 +724,7 @@ public:
lldb::BreakpointSP CreateBreakpoint(lldb::addr_t load_addr, bool internal,
bool request_hardware);
- // Use this to create a breakpoint from a load address and a module file spec
+ // Use this to create a breakpoint from a file address and a module file spec
lldb::BreakpointSP CreateAddressInModuleBreakpoint(lldb::addr_t file_addr,
bool internal,
const FileSpec &file_spec,
@@ -752,8 +753,8 @@ public:
const FileSpecList *containingModules,
const FileSpecList *containingSourceFiles, const char *func_name,
lldb::FunctionNameType func_name_type_mask, lldb::LanguageType language,
- lldb::addr_t offset, LazyBool skip_prologue, bool internal,
- bool request_hardware);
+ lldb::addr_t offset, bool offset_is_insn_count, LazyBool skip_prologue,
+ bool internal, bool request_hardware);
lldb::BreakpointSP
CreateExceptionBreakpoint(enum lldb::LanguageType language, bool catch_bp,
@@ -1334,6 +1335,10 @@ public:
const lldb_private::RegisterFlags &flags,
uint32_t byte_size);
+ llvm::Expected<lldb::DisassemblerSP>
+ ReadInstructions(const Address &start_addr, uint32_t count,
+ const char *flavor_string = nullptr);
+
// Target Stop Hooks
class StopHook : public UserID {
public:
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
index 0b09893..939be99 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
@@ -107,17 +107,23 @@ def dump_dap_log(log_file):
class Source(object):
def __init__(
- self, path: Optional[str] = None, source_reference: Optional[int] = None
+ self,
+ path: Optional[str] = None,
+ source_reference: Optional[int] = None,
+ raw_dict: Optional[dict[str, Any]] = None,
):
self._name = None
self._path = None
self._source_reference = None
+ self._raw_dict = None
if path is not None:
self._name = os.path.basename(path)
self._path = path
elif source_reference is not None:
self._source_reference = source_reference
+ elif raw_dict is not None:
+ self._raw_dict = raw_dict
else:
raise ValueError("Either path or source_reference must be provided")
@@ -125,6 +131,9 @@ class Source(object):
return f"Source(name={self.name}, path={self.path}), source_reference={self.source_reference})"
def as_dict(self):
+ if self._raw_dict is not None:
+ return self._raw_dict
+
source_dict = {}
if self._name is not None:
source_dict["name"] = self._name
@@ -135,6 +144,19 @@ class Source(object):
return source_dict
+class Breakpoint(object):
+ def __init__(self, obj):
+ self._breakpoint = obj
+
+ def is_verified(self):
+ """Check if the breakpoint is verified."""
+ return self._breakpoint.get("verified", False)
+
+ def source(self):
+ """Get the source of the breakpoint."""
+ return self._breakpoint.get("source", {})
+
+
class NotSupportedError(KeyError):
"""Raised if a feature is not supported due to its capabilities."""
@@ -170,7 +192,7 @@ class DebugCommunication(object):
self.initialized = False
self.frame_scopes = {}
self.init_commands = init_commands
- self.resolved_breakpoints = {}
+ self.resolved_breakpoints: dict[str, Breakpoint] = {}
@classmethod
def encode_content(cls, s: str) -> bytes:
@@ -326,8 +348,8 @@ class DebugCommunication(object):
def _update_verified_breakpoints(self, breakpoints: list[Event]):
for breakpoint in breakpoints:
if "id" in breakpoint:
- self.resolved_breakpoints[str(breakpoint["id"])] = breakpoint.get(
- "verified", False
+ self.resolved_breakpoints[str(breakpoint["id"])] = Breakpoint(
+ breakpoint
)
def send_packet(self, command_dict: Request, set_sequence=True):
@@ -484,7 +506,14 @@ class DebugCommunication(object):
if breakpoint_event is None:
break
- return [id for id in breakpoint_ids if id not in self.resolved_breakpoints]
+ return [
+ id
+ for id in breakpoint_ids
+ if (
+ id not in self.resolved_breakpoints
+ or not self.resolved_breakpoints[id].is_verified()
+ )
+ ]
def wait_for_exited(self, timeout: Optional[float] = None):
event_dict = self.wait_for_event("exited", timeout=timeout)
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
index 1567462..c51b4b1 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
@@ -59,24 +59,22 @@ class DAPTestCaseBase(TestBase):
Each object in data is 1:1 mapping with the entry in lines.
It contains optional location/hitCondition/logMessage parameters.
"""
- response = self.dap_server.request_setBreakpoints(
- Source(source_path), lines, data
+ return self.set_source_breakpoints_from_source(
+ Source(path=source_path), lines, data, wait_for_resolve
)
- if response is None or not response["success"]:
- return []
- breakpoints = response["body"]["breakpoints"]
- breakpoint_ids = []
- for breakpoint in breakpoints:
- breakpoint_ids.append("%i" % (breakpoint["id"]))
- if wait_for_resolve:
- self.wait_for_breakpoints_to_resolve(breakpoint_ids)
- return breakpoint_ids
def set_source_breakpoints_assembly(
self, source_reference, lines, data=None, wait_for_resolve=True
):
+ return self.set_source_breakpoints_from_source(
+ Source(source_reference=source_reference), lines, data, wait_for_resolve
+ )
+
+ def set_source_breakpoints_from_source(
+ self, source: Source, lines, data=None, wait_for_resolve=True
+ ):
response = self.dap_server.request_setBreakpoints(
- Source(source_reference=source_reference),
+ source,
lines,
data,
)
diff --git a/lldb/source/API/SBTarget.cpp b/lldb/source/API/SBTarget.cpp
index f26f795..6aa41c5 100644
--- a/lldb/source/API/SBTarget.cpp
+++ b/lldb/source/API/SBTarget.cpp
@@ -766,16 +766,19 @@ SBBreakpoint SBTarget::BreakpointCreateByName(const char *symbol_name,
const bool hardware = false;
const LazyBool skip_prologue = eLazyBoolCalculate;
const lldb::addr_t offset = 0;
+ const bool offset_is_insn_count = false;
if (module_name && module_name[0]) {
FileSpecList module_spec_list;
module_spec_list.Append(FileSpec(module_name));
sb_bp = target_sp->CreateBreakpoint(
&module_spec_list, nullptr, symbol_name, eFunctionNameTypeAuto,
- eLanguageTypeUnknown, offset, skip_prologue, internal, hardware);
+ eLanguageTypeUnknown, offset, offset_is_insn_count, skip_prologue,
+ internal, hardware);
} else {
sb_bp = target_sp->CreateBreakpoint(
nullptr, nullptr, symbol_name, eFunctionNameTypeAuto,
- eLanguageTypeUnknown, offset, skip_prologue, internal, hardware);
+ eLanguageTypeUnknown, offset, offset_is_insn_count, skip_prologue,
+ internal, hardware);
}
}
@@ -811,6 +814,17 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateByName(
const SBFileSpecList &comp_unit_list) {
LLDB_INSTRUMENT_VA(this, symbol_name, name_type_mask, symbol_language,
module_list, comp_unit_list);
+ return BreakpointCreateByName(symbol_name, name_type_mask, symbol_language, 0,
+ false, module_list, comp_unit_list);
+}
+
+lldb::SBBreakpoint SBTarget::BreakpointCreateByName(
+ const char *symbol_name, uint32_t name_type_mask,
+ LanguageType symbol_language, lldb::addr_t offset,
+ bool offset_is_insn_count, const SBFileSpecList &module_list,
+ const SBFileSpecList &comp_unit_list) {
+ LLDB_INSTRUMENT_VA(this, symbol_name, name_type_mask, symbol_language, offset,
+ offset_is_insn_count, module_list, comp_unit_list);
SBBreakpoint sb_bp;
if (TargetSP target_sp = GetSP();
@@ -821,7 +835,8 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateByName(
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
FunctionNameType mask = static_cast<FunctionNameType>(name_type_mask);
sb_bp = target_sp->CreateBreakpoint(module_list.get(), comp_unit_list.get(),
- symbol_name, mask, symbol_language, 0,
+ symbol_name, mask, symbol_language,
+ offset, offset_is_insn_count,
skip_prologue, internal, hardware);
}
@@ -1955,29 +1970,10 @@ lldb::SBInstructionList SBTarget::ReadInstructions(lldb::SBAddress base_addr,
if (TargetSP target_sp = GetSP()) {
if (Address *addr_ptr = base_addr.get()) {
- DataBufferHeap data(
- target_sp->GetArchitecture().GetMaximumOpcodeByteSize() * count, 0);
- bool force_live_memory = true;
- lldb_private::Status error;
- lldb::addr_t load_addr = LLDB_INVALID_ADDRESS;
- const size_t bytes_read =
- target_sp->ReadMemory(*addr_ptr, data.GetBytes(), data.GetByteSize(),
- error, force_live_memory, &load_addr);
-
- const bool data_from_file = load_addr == LLDB_INVALID_ADDRESS;
- if (!flavor_string || flavor_string[0] == '\0') {
- // FIXME - we don't have the mechanism in place to do per-architecture
- // settings. But since we know that for now we only support flavors on
- // x86 & x86_64,
- const llvm::Triple::ArchType arch =
- target_sp->GetArchitecture().GetTriple().getArch();
- if (arch == llvm::Triple::x86 || arch == llvm::Triple::x86_64)
- flavor_string = target_sp->GetDisassemblyFlavor();
+ if (llvm::Expected<DisassemblerSP> disassembler =
+ target_sp->ReadInstructions(*addr_ptr, count, flavor_string)) {
+ sb_instructions.SetDisassembler(*disassembler);
}
- sb_instructions.SetDisassembler(Disassembler::DisassembleBytes(
- target_sp->GetArchitecture(), nullptr, flavor_string,
- target_sp->GetDisassemblyCPU(), target_sp->GetDisassemblyFeatures(),
- *addr_ptr, data.GetBytes(), bytes_read, count, data_from_file));
}
}
diff --git a/lldb/source/Breakpoint/BreakpointResolver.cpp b/lldb/source/Breakpoint/BreakpointResolver.cpp
index 91fdff4..4ac4050 100644
--- a/lldb/source/Breakpoint/BreakpointResolver.cpp
+++ b/lldb/source/Breakpoint/BreakpointResolver.cpp
@@ -42,9 +42,9 @@ const char *BreakpointResolver::g_ty_to_name[] = {"FileAndLine", "Address",
const char *BreakpointResolver::g_option_names[static_cast<uint32_t>(
BreakpointResolver::OptionNames::LastOptionName)] = {
- "AddressOffset", "Exact", "FileName", "Inlines", "Language",
- "LineNumber", "Column", "ModuleName", "NameMask", "Offset",
- "PythonClass", "Regex", "ScriptArgs", "SectionName", "SearchDepth",
+ "AddressOffset", "Exact", "FileName", "Inlines", "Language",
+ "LineNumber", "Column", "ModuleName", "NameMask", "Offset",
+ "PythonClass", "Regex", "ScriptArgs", "SectionName", "SearchDepth",
"SkipPrologue", "SymbolNames"};
const char *BreakpointResolver::ResolverTyToName(enum ResolverTy type) {
@@ -65,8 +65,10 @@ BreakpointResolver::NameToResolverTy(llvm::StringRef name) {
BreakpointResolver::BreakpointResolver(const BreakpointSP &bkpt,
const unsigned char resolverTy,
- lldb::addr_t offset)
- : m_breakpoint(bkpt), m_offset(offset), SubclassID(resolverTy) {}
+ lldb::addr_t offset,
+ bool offset_is_insn_count)
+ : m_breakpoint(bkpt), m_offset(offset),
+ m_offset_is_insn_count(offset_is_insn_count), SubclassID(resolverTy) {}
BreakpointResolver::~BreakpointResolver() = default;
@@ -364,7 +366,32 @@ void BreakpointResolver::AddLocation(SearchFilter &filter,
BreakpointLocationSP BreakpointResolver::AddLocation(Address loc_addr,
bool *new_location) {
- loc_addr.Slide(m_offset);
+ if (m_offset_is_insn_count) {
+ Target &target = GetBreakpoint()->GetTarget();
+ llvm::Expected<DisassemblerSP> expected_instructions =
+ target.ReadInstructions(loc_addr, m_offset);
+ if (!expected_instructions) {
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Breakpoints),
+ expected_instructions.takeError(),
+ "error: Unable to read instructions at address 0x{0:x}",
+ loc_addr.GetLoadAddress(&target));
+ return BreakpointLocationSP();
+ }
+
+ const DisassemblerSP instructions = *expected_instructions;
+ if (!instructions ||
+ instructions->GetInstructionList().GetSize() != m_offset) {
+ LLDB_LOG(GetLog(LLDBLog::Breakpoints),
+ "error: Unable to read {0} instructions at address 0x{1:x}",
+ m_offset, loc_addr.GetLoadAddress(&target));
+ return BreakpointLocationSP();
+ }
+
+ loc_addr.Slide(instructions->GetInstructionList().GetTotalByteSize());
+ } else {
+ loc_addr.Slide(m_offset);
+ }
+
return GetBreakpoint()->AddLocation(loc_addr, new_location);
}
diff --git a/lldb/source/Breakpoint/BreakpointResolverAddress.cpp b/lldb/source/Breakpoint/BreakpointResolverAddress.cpp
index 828647c..70360d9 100644
--- a/lldb/source/Breakpoint/BreakpointResolverAddress.cpp
+++ b/lldb/source/Breakpoint/BreakpointResolverAddress.cpp
@@ -133,6 +133,11 @@ Searcher::CallbackReturn BreakpointResolverAddress::SearchCallback(
Address tmp_address;
if (module_sp->ResolveFileAddress(m_addr.GetOffset(), tmp_address))
m_addr = tmp_address;
+ else
+ return Searcher::eCallbackReturnStop;
+ } else {
+ // If we didn't find the module, then we can't resolve the address.
+ return Searcher::eCallbackReturnStop;
}
}
diff --git a/lldb/source/Breakpoint/BreakpointResolverName.cpp b/lldb/source/Breakpoint/BreakpointResolverName.cpp
index 21024a4..6372595 100644
--- a/lldb/source/Breakpoint/BreakpointResolverName.cpp
+++ b/lldb/source/Breakpoint/BreakpointResolverName.cpp
@@ -24,11 +24,13 @@
using namespace lldb;
using namespace lldb_private;
-BreakpointResolverName::BreakpointResolverName(const BreakpointSP &bkpt,
- const char *name_cstr, FunctionNameType name_type_mask,
- LanguageType language, Breakpoint::MatchType type, lldb::addr_t offset,
+BreakpointResolverName::BreakpointResolverName(
+ const BreakpointSP &bkpt, const char *name_cstr,
+ FunctionNameType name_type_mask, LanguageType language,
+ Breakpoint::MatchType type, lldb::addr_t offset, bool offset_is_insn_count,
bool skip_prologue)
- : BreakpointResolver(bkpt, BreakpointResolver::NameResolver, offset),
+ : BreakpointResolver(bkpt, BreakpointResolver::NameResolver, offset,
+ offset_is_insn_count),
m_match_type(type), m_language(language), m_skip_prologue(skip_prologue) {
if (m_match_type == Breakpoint::Regexp) {
m_regex = RegularExpression(name_cstr);
@@ -81,7 +83,7 @@ BreakpointResolverName::BreakpointResolverName(const BreakpointSP &bkpt,
BreakpointResolverName::BreakpointResolverName(
const BreakpointResolverName &rhs)
: BreakpointResolver(rhs.GetBreakpoint(), BreakpointResolver::NameResolver,
- rhs.GetOffset()),
+ rhs.GetOffset(), rhs.GetOffsetIsInsnCount()),
m_lookups(rhs.m_lookups), m_class_name(rhs.m_class_name),
m_regex(rhs.m_regex), m_match_type(rhs.m_match_type),
m_language(rhs.m_language), m_skip_prologue(rhs.m_skip_prologue) {}
@@ -177,7 +179,8 @@ BreakpointResolverSP BreakpointResolverName::CreateFromStructuredData(
std::shared_ptr<BreakpointResolverName> resolver_sp =
std::make_shared<BreakpointResolverName>(
nullptr, names[0].c_str(), name_masks[0], language,
- Breakpoint::MatchType::Exact, offset, skip_prologue);
+ Breakpoint::MatchType::Exact, offset,
+ /*offset_is_insn_count = */ false, skip_prologue);
for (size_t i = 1; i < num_elem; i++) {
resolver_sp->AddNameLookup(ConstString(names[i]), name_masks[i]);
}
diff --git a/lldb/source/Core/DemangledNameInfo.cpp b/lldb/source/Core/DemangledNameInfo.cpp
index 00227f0..76f8987 100644
--- a/lldb/source/Core/DemangledNameInfo.cpp
+++ b/lldb/source/Core/DemangledNameInfo.cpp
@@ -111,6 +111,11 @@ void TrackingOutputBuffer::finalizeEnd() {
if (NameInfo.ScopeRange.first > NameInfo.ScopeRange.second)
NameInfo.ScopeRange.second = NameInfo.ScopeRange.first;
NameInfo.BasenameRange.first = NameInfo.ScopeRange.second;
+
+ // We call anything past the FunctionEncoding the "suffix".
+ // In practice this would be nodes like `DotSuffix` that wrap
+ // a FunctionEncoding.
+ NameInfo.SuffixRange.first = getCurrentPosition();
}
ScopedOverride<unsigned> TrackingOutputBuffer::enterFunctionTypePrinting() {
@@ -138,6 +143,9 @@ void TrackingOutputBuffer::printLeft(const Node &N) {
default:
OutputBuffer::printLeft(N);
}
+
+ // Keep updating suffix until we reach the end.
+ NameInfo.SuffixRange.second = getCurrentPosition();
}
void TrackingOutputBuffer::printRight(const Node &N) {
@@ -151,6 +159,9 @@ void TrackingOutputBuffer::printRight(const Node &N) {
default:
OutputBuffer::printRight(N);
}
+
+ // Keep updating suffix until we reach the end.
+ NameInfo.SuffixRange.second = getCurrentPosition();
}
void TrackingOutputBuffer::printLeftImpl(const FunctionType &N) {
diff --git a/lldb/source/Core/Disassembler.cpp b/lldb/source/Core/Disassembler.cpp
index 925de2a..e0a7d69 100644
--- a/lldb/source/Core/Disassembler.cpp
+++ b/lldb/source/Core/Disassembler.cpp
@@ -1016,6 +1016,16 @@ uint32_t InstructionList::GetMaxOpcocdeByteSize() const {
return max_inst_size;
}
+size_t InstructionList::GetTotalByteSize() const {
+ size_t total_byte_size = 0;
+ collection::const_iterator pos, end;
+ for (pos = m_instructions.begin(), end = m_instructions.end(); pos != end;
+ ++pos) {
+ total_byte_size += (*pos)->GetOpcode().GetByteSize();
+ }
+ return total_byte_size;
+}
+
InstructionSP InstructionList::GetInstructionAtIndex(size_t idx) const {
InstructionSP inst_sp;
if (idx < m_instructions.size())
diff --git a/lldb/source/Core/DynamicLoader.cpp b/lldb/source/Core/DynamicLoader.cpp
index 4be9f3e..7580b15 100644
--- a/lldb/source/Core/DynamicLoader.cpp
+++ b/lldb/source/Core/DynamicLoader.cpp
@@ -211,7 +211,7 @@ ModuleSP DynamicLoader::LoadBinaryWithUUIDAndAddress(
if (uuid.IsValid())
prog_str << uuid.GetAsString();
if (value_is_offset == 0 && value != LLDB_INVALID_ADDRESS) {
- prog_str << "at 0x";
+ prog_str << " at 0x";
prog_str.PutHex64(value);
}
diff --git a/lldb/source/Core/Mangled.cpp b/lldb/source/Core/Mangled.cpp
index 3663f43..ce4db4e 100644
--- a/lldb/source/Core/Mangled.cpp
+++ b/lldb/source/Core/Mangled.cpp
@@ -172,9 +172,6 @@ GetItaniumDemangledStr(const char *M) {
TrackingOutputBuffer OB(demangled_cstr, demangled_size);
demangled_cstr = ipd.finishDemangle(&OB);
- // TODO: we should set the SuffixRange inside the TrackingOutputBuffer.
- OB.NameInfo.SuffixRange.first = OB.NameInfo.QualifiersRange.second;
- OB.NameInfo.SuffixRange.second = std::string_view(OB).size();
info = std::move(OB.NameInfo);
assert(demangled_cstr &&
diff --git a/lldb/source/Expression/IRExecutionUnit.cpp b/lldb/source/Expression/IRExecutionUnit.cpp
index e7a26d3..d557084 100644
--- a/lldb/source/Expression/IRExecutionUnit.cpp
+++ b/lldb/source/Expression/IRExecutionUnit.cpp
@@ -799,7 +799,7 @@ ResolveFunctionCallLabel(const FunctionCallLabel &label,
auto sc_or_err = symbol_file->ResolveFunctionCallLabel(label);
if (!sc_or_err)
return llvm::joinErrors(
- llvm::createStringError("failed to resolve function by UID"),
+ llvm::createStringError("failed to resolve function by UID:"),
sc_or_err.takeError());
SymbolContextList sc_list;
diff --git a/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp b/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp
index fe9f5d0..1d210ea 100644
--- a/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp
+++ b/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp
@@ -1561,7 +1561,8 @@ void DynamicLoaderDarwinKernel::SetNotificationBreakpointIfNeeded() {
.CreateBreakpoint(&module_spec_list, nullptr,
"OSKextLoadedKextSummariesUpdated",
eFunctionNameTypeFull, eLanguageTypeUnknown, 0,
- skip_prologue, internal_bp, hardware)
+ /*offset_is_insn_count = */ false, skip_prologue,
+ internal_bp, hardware)
.get();
bp->SetCallback(DynamicLoaderDarwinKernel::BreakpointHitCallback, this,
diff --git a/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOS.cpp b/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOS.cpp
index 08bef49..efb9ccc 100644
--- a/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOS.cpp
+++ b/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOS.cpp
@@ -530,7 +530,7 @@ bool DynamicLoaderMacOS::SetNotificationBreakpoint() {
m_process->GetTarget()
.CreateBreakpoint(&dyld_filelist, source_files,
"lldb_image_notifier", eFunctionNameTypeFull,
- eLanguageTypeUnknown, 0, skip_prologue,
+ eLanguageTypeUnknown, 0, false, skip_prologue,
internal, hardware)
.get();
breakpoint->SetCallback(DynamicLoaderMacOS::NotifyBreakpointHit, this,
@@ -546,8 +546,9 @@ bool DynamicLoaderMacOS::SetNotificationBreakpoint() {
m_process->GetTarget()
.CreateBreakpoint(&dyld_filelist, source_files,
"gdb_image_notifier", eFunctionNameTypeFull,
- eLanguageTypeUnknown, 0, skip_prologue,
- internal, hardware)
+ eLanguageTypeUnknown, 0,
+ /*offset_is_insn_count = */ false,
+ skip_prologue, internal, hardware)
.get();
breakpoint->SetCallback(DynamicLoaderMacOS::NotifyBreakpointHit, this,
true);
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
index 2529e78..624eea9 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
@@ -123,6 +123,12 @@ private:
decl->setDeclContext(decl->getASTContext().getTranslationUnitDecl());
decl->setLexicalDeclContext(decl->getASTContext().getTranslationUnitDecl());
+ // Changing the DeclContext might change the linkage. For example, if the
+ // entity was previously declared inside a function, it will not be
+ // external, but changing the declaration context to the TU will make it
+ // external. Make sure this will recompute the linkage if it was computed
+ // before.
+ decl->invalidateCachedLinkage();
}
bool ChainPassesThrough(
@@ -320,7 +326,8 @@ CompilerType ClangASTImporter::DeportType(TypeSystemClang &dst,
DeclContextOverride decl_context_override;
if (auto *t = ClangUtil::GetQualType(src_type)->getAs<TagType>())
- decl_context_override.OverrideAllDeclsFromContainingFunction(t->getDecl());
+ decl_context_override.OverrideAllDeclsFromContainingFunction(
+ t->getOriginalDecl());
CompleteTagDeclsScope complete_scope(*this, &dst.getASTContext(),
&src_ctxt->getASTContext());
@@ -377,8 +384,7 @@ bool ClangASTImporter::CanImport(const CompilerType &type) {
} break;
case clang::Type::Enum: {
- clang::EnumDecl *enum_decl =
- llvm::cast<clang::EnumType>(qual_type)->getDecl();
+ auto *enum_decl = llvm::cast<clang::EnumType>(qual_type)->getOriginalDecl();
if (enum_decl) {
if (GetDeclOrigin(enum_decl).Valid())
return true;
@@ -414,12 +420,6 @@ bool ClangASTImporter::CanImport(const CompilerType &type) {
->getDeducedType()
.getAsOpaquePtr()));
- case clang::Type::Elaborated:
- return CanImport(CompilerType(type.GetTypeSystem(),
- llvm::cast<clang::ElaboratedType>(qual_type)
- ->getNamedType()
- .getAsOpaquePtr()));
-
case clang::Type::Paren:
return CanImport(CompilerType(
type.GetTypeSystem(),
@@ -452,7 +452,7 @@ bool ClangASTImporter::Import(const CompilerType &type) {
case clang::Type::Enum: {
clang::EnumDecl *enum_decl =
- llvm::cast<clang::EnumType>(qual_type)->getDecl();
+ llvm::cast<clang::EnumType>(qual_type)->getOriginalDecl();
if (enum_decl) {
if (GetDeclOrigin(enum_decl).Valid())
return CompleteAndFetchChildren(qual_type);
@@ -488,12 +488,6 @@ bool ClangASTImporter::Import(const CompilerType &type) {
->getDeducedType()
.getAsOpaquePtr()));
- case clang::Type::Elaborated:
- return Import(CompilerType(type.GetTypeSystem(),
- llvm::cast<clang::ElaboratedType>(qual_type)
- ->getNamedType()
- .getAsOpaquePtr()));
-
case clang::Type::Paren:
return Import(CompilerType(
type.GetTypeSystem(),
@@ -597,7 +591,7 @@ bool ExtractBaseOffsets(const ASTRecordLayout &record_layout,
return false;
DeclFromUser<RecordDecl> origin_base_record(
- origin_base_record_type->getDecl());
+ origin_base_record_type->getOriginalDecl());
if (origin_base_record.IsInvalid())
return false;
@@ -728,7 +722,8 @@ bool ClangASTImporter::importRecordLayoutFromOrigin(
QualType base_type = bi->getType();
const RecordType *base_record_type = base_type->getAs<RecordType>();
- DeclFromParser<RecordDecl> base_record(base_record_type->getDecl());
+ DeclFromParser<RecordDecl> base_record(
+ base_record_type->getOriginalDecl());
DeclFromParser<CXXRecordDecl> base_cxx_record =
DynCast<CXXRecordDecl>(base_record);
@@ -860,7 +855,7 @@ bool ClangASTImporter::CompleteAndFetchChildren(clang::QualType type) {
Log *log = GetLog(LLDBLog::Expressions);
if (const TagType *tag_type = type->getAs<TagType>()) {
- TagDecl *tag_decl = tag_type->getDecl();
+ TagDecl *tag_decl = tag_type->getOriginalDecl();
DeclOrigin decl_origin = GetDeclOrigin(tag_decl);
@@ -928,9 +923,9 @@ bool ClangASTImporter::RequireCompleteType(clang::QualType type) {
return false;
if (const TagType *tag_type = type->getAs<TagType>()) {
- TagDecl *tag_decl = tag_type->getDecl();
+ TagDecl *tag_decl = tag_type->getOriginalDecl();
- if (tag_decl->getDefinition() || tag_decl->isBeingDefined())
+ if (tag_decl->getDefinition())
return true;
return CompleteTagDecl(tag_decl);
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
index 4b52f6a..21a9307 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
@@ -223,7 +223,7 @@ TagDecl *ClangASTSource::FindCompleteType(const TagDecl *decl) {
continue;
TagDecl *candidate_tag_decl =
- const_cast<TagDecl *>(tag_type->getDecl());
+ tag_type->getOriginalDecl()->getDefinitionOrSelf();
if (TypeSystemClang::GetCompleteDecl(
&candidate_tag_decl->getASTContext(), candidate_tag_decl))
@@ -250,7 +250,8 @@ TagDecl *ClangASTSource::FindCompleteType(const TagDecl *decl) {
if (!tag_type)
continue;
- TagDecl *candidate_tag_decl = const_cast<TagDecl *>(tag_type->getDecl());
+ TagDecl *candidate_tag_decl =
+ tag_type->getOriginalDecl()->getDefinitionOrSelf();
if (TypeSystemClang::GetCompleteDecl(&candidate_tag_decl->getASTContext(),
candidate_tag_decl))
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp
index 214e260..8a68282 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp
@@ -839,7 +839,7 @@ void ClangExpressionDeclMap::LookUpLldbClass(NameSearchContext &context) {
clang::CXXRecordDecl *class_decl = method_decl->getParent();
- QualType class_qual_type(class_decl->getTypeForDecl(), 0);
+ QualType class_qual_type = m_ast_context->getCanonicalTagType(class_decl);
TypeFromUser class_user_type(
class_qual_type.getAsOpaquePtr(),
@@ -1561,7 +1561,7 @@ ClangExpressionDeclMap::AddExpressionVariable(NameSearchContext &context,
if (const clang::Type *parser_type = parser_opaque_type.getTypePtr()) {
if (const TagType *tag_type = dyn_cast<TagType>(parser_type))
- CompleteType(tag_type->getDecl());
+ CompleteType(tag_type->getOriginalDecl()->getDefinitionOrSelf());
if (const ObjCObjectPointerType *objc_object_ptr_type =
dyn_cast<ObjCObjectPointerType>(parser_type))
CompleteType(objc_object_ptr_type->getInterfaceDecl());
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangPersistentVariables.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangPersistentVariables.cpp
index aa0e6e3..319ff3fe8 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangPersistentVariables.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangPersistentVariables.cpp
@@ -79,9 +79,11 @@ ClangPersistentVariables::GetCompilerTypeFromPersistentDecl(
if (p.m_decl == nullptr)
return std::nullopt;
+ auto ctx = std::static_pointer_cast<TypeSystemClang>(p.m_context.lock());
if (clang::TypeDecl *tdecl = llvm::dyn_cast<clang::TypeDecl>(p.m_decl)) {
- opaque_compiler_type_t t = static_cast<opaque_compiler_type_t>(
- const_cast<clang::Type *>(tdecl->getTypeForDecl()));
+ opaque_compiler_type_t t =
+ static_cast<opaque_compiler_type_t>(const_cast<clang::Type *>(
+ ctx->getASTContext().getTypeDeclType(tdecl).getTypePtr()));
return CompilerType(p.m_context, t);
}
return std::nullopt;
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/NameSearchContext.cpp b/lldb/source/Plugins/ExpressionParser/Clang/NameSearchContext.cpp
index 45ad4f1..6f57c18 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/NameSearchContext.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/NameSearchContext.cpp
@@ -153,7 +153,7 @@ NameSearchContext::AddTypeDecl(const CompilerType &clang_type) {
return (NamedDecl *)typedef_name_decl;
} else if (const TagType *tag_type = qual_type->getAs<TagType>()) {
- TagDecl *tag_decl = tag_type->getDecl();
+ TagDecl *tag_decl = tag_type->getOriginalDecl()->getDefinitionOrSelf();
m_decls.push_back(tag_decl);
diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
index 3bc8708..3118ff1 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
@@ -392,13 +392,16 @@ GetDemangledScope(const SymbolContext &sc) {
return CPlusPlusLanguage::GetDemangledScope(demangled_name, info);
}
-/// Handles anything printed after the FunctionEncoding ItaniumDemangle
-/// node. Most notably the DotSuffix node.
-///
-/// FIXME: the suffix should also have an associated
-/// CPlusPlusLanguage::GetDemangledFunctionSuffix
-/// once we start setting the `DemangledNameInfo::SuffixRange`
-/// from inside the `TrackingOutputBuffer`.
+llvm::Expected<llvm::StringRef>
+CPlusPlusLanguage::GetDemangledFunctionSuffix(llvm::StringRef demangled,
+ const DemangledNameInfo &info) {
+ if (!info.hasSuffix())
+ return llvm::createStringError("Suffix range for '%s' is invalid.",
+ demangled.data());
+
+ return demangled.slice(info.SuffixRange.first, info.SuffixRange.second);
+}
+
static llvm::Expected<llvm::StringRef>
GetDemangledFunctionSuffix(const SymbolContext &sc) {
auto info_or_err = GetAndValidateInfo(sc);
@@ -407,11 +410,7 @@ GetDemangledFunctionSuffix(const SymbolContext &sc) {
auto [demangled_name, info] = *info_or_err;
- if (!info.hasSuffix())
- return llvm::createStringError("Suffix range for '%s' is invalid.",
- demangled_name.data());
-
- return demangled_name.slice(info.SuffixRange.first, info.SuffixRange.second);
+ return CPlusPlusLanguage::GetDemangledFunctionSuffix(demangled_name, info);
}
llvm::Expected<llvm::StringRef>
@@ -2424,7 +2423,7 @@ bool CPlusPlusLanguage::HandleFrameFormatVariable(
return true;
}
case FormatEntity::Entry::Type::FunctionSuffix: {
- auto suffix_or_err = GetDemangledFunctionSuffix(sc);
+ auto suffix_or_err = ::GetDemangledFunctionSuffix(sc);
if (!suffix_or_err) {
LLDB_LOG_ERROR(
GetLog(LLDBLog::Language), suffix_or_err.takeError(),
diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h
index 4f449f1..4a30299 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h
+++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h
@@ -138,6 +138,10 @@ public:
GetDemangledFunctionArguments(llvm::StringRef demangled,
const DemangledNameInfo &info);
+ static llvm::Expected<llvm::StringRef>
+ GetDemangledFunctionSuffix(llvm::StringRef demangled,
+ const DemangledNameInfo &info);
+
// Extract C++ context and identifier from a string using heuristic matching
// (as opposed to
// CPlusPlusLanguage::CxxMethodName which has to have a fully qualified C++
diff --git a/lldb/source/Plugins/Language/ObjC/NSDictionary.cpp b/lldb/source/Plugins/Language/ObjC/NSDictionary.cpp
index ef1c2c8..24e8489 100644
--- a/lldb/source/Plugins/Language/ObjC/NSDictionary.cpp
+++ b/lldb/source/Plugins/Language/ObjC/NSDictionary.cpp
@@ -73,7 +73,8 @@ static CompilerType GetLLDBNSPairType(TargetSP target_sp) {
static constexpr llvm::StringLiteral g_lldb_autogen_nspair("__lldb_autogen_nspair");
- compiler_type = scratch_ts_sp->GetTypeForIdentifier<clang::CXXRecordDecl>(g_lldb_autogen_nspair);
+ compiler_type = scratch_ts_sp->GetTypeForIdentifier<clang::CXXRecordDecl>(
+ scratch_ts_sp->getASTContext(), g_lldb_autogen_nspair);
if (!compiler_type) {
compiler_type = scratch_ts_sp->CreateRecordType(
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp
index 24a7371..b1f2a66 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV1.cpp
@@ -102,7 +102,7 @@ AppleObjCRuntimeV1::CreateExceptionResolver(const BreakpointSP &bkpt,
resolver_sp = std::make_shared<BreakpointResolverName>(
bkpt, std::get<1>(GetExceptionThrowLocation()).AsCString(),
eFunctionNameTypeBase, eLanguageTypeUnknown, Breakpoint::Exact, 0,
- eLazyBoolNo);
+ /*offset_is_insn_count = */ false, eLazyBoolNo);
// FIXME: don't do catch yet.
return resolver_sp;
}
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
index cca721e..9beb133 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
@@ -1163,7 +1163,7 @@ AppleObjCRuntimeV2::CreateExceptionResolver(const BreakpointSP &bkpt,
resolver_sp = std::make_shared<BreakpointResolverName>(
bkpt, std::get<1>(GetExceptionThrowLocation()).AsCString(),
eFunctionNameTypeBase, eLanguageTypeUnknown, Breakpoint::Exact, 0,
- eLazyBoolNo);
+ /*offset_is_insn_count = */ false, eLazyBoolNo);
// FIXME: We don't do catch breakpoints for ObjC yet.
// Should there be some way for the runtime to specify what it can do in this
// regard?
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp
index a4b3e26..8dc5f51 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/GNUstepObjCRuntime/GNUstepObjCRuntime.cpp
@@ -169,7 +169,8 @@ GNUstepObjCRuntime::CreateExceptionResolver(const BreakpointSP &bkpt,
if (throw_bp)
resolver_sp = std::make_shared<BreakpointResolverName>(
bkpt, "objc_exception_throw", eFunctionNameTypeBase,
- eLanguageTypeUnknown, Breakpoint::Exact, 0, eLazyBoolNo);
+ eLanguageTypeUnknown, Breakpoint::Exact, 0,
+ /*offset_is_insn_count = */ false, eLazyBoolNo);
return resolver_sp;
}
diff --git a/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h b/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h
index 021ec49..bca5bff 100644
--- a/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h
+++ b/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h
@@ -46,9 +46,9 @@ public:
// based on RegisterContextDarwin_arm64.h
// Pack this so there are no extra bytes, but align its start address to at
- // least 4 bytes to prevent alignment errors on Arm 32-bit.
+ // least 8 bytes to prevent alignment errors.
LLVM_PACKED_START
- struct alignas(4) GPR {
+ struct alignas(8) GPR {
uint64_t x[29]; // x0-x28
uint64_t fp; // x29
uint64_t lr; // x30
diff --git a/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.cpp b/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.cpp
index c9fe474..c359663 100644
--- a/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.cpp
+++ b/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.cpp
@@ -27,25 +27,12 @@ using namespace llvm;
LLDB_PLUGIN_DEFINE(ProtocolServerMCP)
static constexpr size_t kChunkSize = 1024;
+static constexpr llvm::StringLiteral kName = "lldb-mcp";
+static constexpr llvm::StringLiteral kVersion = "0.1.0";
-ProtocolServerMCP::ProtocolServerMCP() : ProtocolServer() {
- AddRequestHandler("initialize",
- std::bind(&ProtocolServerMCP::InitializeHandler, this,
- std::placeholders::_1));
-
- AddRequestHandler("tools/list",
- std::bind(&ProtocolServerMCP::ToolsListHandler, this,
- std::placeholders::_1));
- AddRequestHandler("tools/call",
- std::bind(&ProtocolServerMCP::ToolsCallHandler, this,
- std::placeholders::_1));
-
- AddRequestHandler("resources/list",
- std::bind(&ProtocolServerMCP::ResourcesListHandler, this,
- std::placeholders::_1));
- AddRequestHandler("resources/read",
- std::bind(&ProtocolServerMCP::ResourcesReadHandler, this,
- std::placeholders::_1));
+ProtocolServerMCP::ProtocolServerMCP()
+ : ProtocolServer(),
+ lldb_protocol::mcp::Server(std::string(kName), std::string(kVersion)) {
AddNotificationHandler("notifications/initialized",
[](const lldb_protocol::mcp::Notification &) {
LLDB_LOG(GetLog(LLDBLog::Host),
@@ -77,32 +64,6 @@ llvm::StringRef ProtocolServerMCP::GetPluginDescriptionStatic() {
return "MCP Server.";
}
-llvm::Expected<lldb_protocol::mcp::Response>
-ProtocolServerMCP::Handle(lldb_protocol::mcp::Request request) {
- auto it = m_request_handlers.find(request.method);
- if (it != m_request_handlers.end()) {
- llvm::Expected<lldb_protocol::mcp::Response> response = it->second(request);
- if (!response)
- return response;
- response->id = request.id;
- return *response;
- }
-
- return make_error<MCPError>(
- llvm::formatv("no handler for request: {0}", request.method).str());
-}
-
-void ProtocolServerMCP::Handle(lldb_protocol::mcp::Notification notification) {
- auto it = m_notification_handlers.find(notification.method);
- if (it != m_notification_handlers.end()) {
- it->second(notification);
- return;
- }
-
- LLDB_LOG(GetLog(LLDBLog::Host), "MPC notification: {0} ({1})",
- notification.method, notification.params);
-}
-
void ProtocolServerMCP::AcceptCallback(std::unique_ptr<Socket> socket) {
LLDB_LOG(GetLog(LLDBLog::Host), "New MCP client ({0}) connected",
m_clients.size() + 1);
@@ -157,7 +118,7 @@ llvm::Error ProtocolServerMCP::ReadCallback(Client &client) {
}
llvm::Error ProtocolServerMCP::Start(ProtocolServer::Connection connection) {
- std::lock_guard<std::mutex> guard(m_server_mutex);
+ std::lock_guard<std::mutex> guard(m_mutex);
if (m_running)
return llvm::createStringError("the MCP server is already running");
@@ -189,7 +150,7 @@ llvm::Error ProtocolServerMCP::Start(ProtocolServer::Connection connection) {
llvm::Error ProtocolServerMCP::Stop() {
{
- std::lock_guard<std::mutex> guard(m_server_mutex);
+ std::lock_guard<std::mutex> guard(m_mutex);
if (!m_running)
return createStringError("the MCP sever is not running");
m_running = false;
@@ -204,7 +165,7 @@ llvm::Error ProtocolServerMCP::Stop() {
m_loop_thread.join();
{
- std::lock_guard<std::mutex> guard(m_server_mutex);
+ std::lock_guard<std::mutex> guard(m_mutex);
m_listener.reset();
m_listen_handlers.clear();
m_clients.clear();
@@ -213,48 +174,6 @@ llvm::Error ProtocolServerMCP::Stop() {
return llvm::Error::success();
}
-llvm::Expected<std::optional<lldb_protocol::mcp::Message>>
-ProtocolServerMCP::HandleData(llvm::StringRef data) {
- auto message = llvm::json::parse<lldb_protocol::mcp::Message>(/*JSON=*/data);
- if (!message)
- return message.takeError();
-
- if (const lldb_protocol::mcp::Request *request =
- std::get_if<lldb_protocol::mcp::Request>(&(*message))) {
- llvm::Expected<lldb_protocol::mcp::Response> response = Handle(*request);
-
- // Handle failures by converting them into an Error message.
- if (!response) {
- lldb_protocol::mcp::Error protocol_error;
- llvm::handleAllErrors(
- response.takeError(),
- [&](const MCPError &err) { protocol_error = err.toProtcolError(); },
- [&](const llvm::ErrorInfoBase &err) {
- protocol_error.error.code = MCPError::kInternalError;
- protocol_error.error.message = err.message();
- });
- protocol_error.id = request->id;
- return protocol_error;
- }
-
- return *response;
- }
-
- if (const lldb_protocol::mcp::Notification *notification =
- std::get_if<lldb_protocol::mcp::Notification>(&(*message))) {
- Handle(*notification);
- return std::nullopt;
- }
-
- if (std::get_if<lldb_protocol::mcp::Error>(&(*message)))
- return llvm::createStringError("unexpected MCP message: error");
-
- if (std::get_if<lldb_protocol::mcp::Response>(&(*message)))
- return llvm::createStringError("unexpected MCP message: response");
-
- llvm_unreachable("all message types handled");
-}
-
lldb_protocol::mcp::Capabilities ProtocolServerMCP::GetCapabilities() {
lldb_protocol::mcp::Capabilities capabilities;
capabilities.tools.listChanged = true;
@@ -263,158 +182,3 @@ lldb_protocol::mcp::Capabilities ProtocolServerMCP::GetCapabilities() {
capabilities.resources.listChanged = false;
return capabilities;
}
-
-void ProtocolServerMCP::AddTool(std::unique_ptr<Tool> tool) {
- std::lock_guard<std::mutex> guard(m_server_mutex);
-
- if (!tool)
- return;
- m_tools[tool->GetName()] = std::move(tool);
-}
-
-void ProtocolServerMCP::AddResourceProvider(
- std::unique_ptr<ResourceProvider> resource_provider) {
- std::lock_guard<std::mutex> guard(m_server_mutex);
-
- if (!resource_provider)
- return;
- m_resource_providers.push_back(std::move(resource_provider));
-}
-
-void ProtocolServerMCP::AddRequestHandler(llvm::StringRef method,
- RequestHandler handler) {
- std::lock_guard<std::mutex> guard(m_server_mutex);
- m_request_handlers[method] = std::move(handler);
-}
-
-void ProtocolServerMCP::AddNotificationHandler(llvm::StringRef method,
- NotificationHandler handler) {
- std::lock_guard<std::mutex> guard(m_server_mutex);
- m_notification_handlers[method] = std::move(handler);
-}
-
-llvm::Expected<lldb_protocol::mcp::Response>
-ProtocolServerMCP::InitializeHandler(
- const lldb_protocol::mcp::Request &request) {
- lldb_protocol::mcp::Response response;
- response.result.emplace(llvm::json::Object{
- {"protocolVersion", lldb_protocol::mcp::kVersion},
- {"capabilities", GetCapabilities()},
- {"serverInfo",
- llvm::json::Object{{"name", kName}, {"version", kVersion}}}});
- return response;
-}
-
-llvm::Expected<lldb_protocol::mcp::Response>
-ProtocolServerMCP::ToolsListHandler(
- const lldb_protocol::mcp::Request &request) {
- lldb_protocol::mcp::Response response;
-
- llvm::json::Array tools;
- for (const auto &tool : m_tools)
- tools.emplace_back(toJSON(tool.second->GetDefinition()));
-
- response.result.emplace(llvm::json::Object{{"tools", std::move(tools)}});
-
- return response;
-}
-
-llvm::Expected<lldb_protocol::mcp::Response>
-ProtocolServerMCP::ToolsCallHandler(
- const lldb_protocol::mcp::Request &request) {
- lldb_protocol::mcp::Response response;
-
- if (!request.params)
- return llvm::createStringError("no tool parameters");
-
- const json::Object *param_obj = request.params->getAsObject();
- if (!param_obj)
- return llvm::createStringError("no tool parameters");
-
- const json::Value *name = param_obj->get("name");
- if (!name)
- return llvm::createStringError("no tool name");
-
- llvm::StringRef tool_name = name->getAsString().value_or("");
- if (tool_name.empty())
- return llvm::createStringError("no tool name");
-
- auto it = m_tools.find(tool_name);
- if (it == m_tools.end())
- return llvm::createStringError(llvm::formatv("no tool \"{0}\"", tool_name));
-
- lldb_protocol::mcp::ToolArguments tool_args;
- if (const json::Value *args = param_obj->get("arguments"))
- tool_args = *args;
-
- llvm::Expected<lldb_protocol::mcp::TextResult> text_result =
- it->second->Call(tool_args);
- if (!text_result)
- return text_result.takeError();
-
- response.result.emplace(toJSON(*text_result));
-
- return response;
-}
-
-llvm::Expected<lldb_protocol::mcp::Response>
-ProtocolServerMCP::ResourcesListHandler(
- const lldb_protocol::mcp::Request &request) {
- lldb_protocol::mcp::Response response;
-
- llvm::json::Array resources;
-
- std::lock_guard<std::mutex> guard(m_server_mutex);
- for (std::unique_ptr<ResourceProvider> &resource_provider_up :
- m_resource_providers) {
- for (const lldb_protocol::mcp::Resource &resource :
- resource_provider_up->GetResources())
- resources.push_back(resource);
- }
- response.result.emplace(
- llvm::json::Object{{"resources", std::move(resources)}});
-
- return response;
-}
-
-llvm::Expected<lldb_protocol::mcp::Response>
-ProtocolServerMCP::ResourcesReadHandler(
- const lldb_protocol::mcp::Request &request) {
- lldb_protocol::mcp::Response response;
-
- if (!request.params)
- return llvm::createStringError("no resource parameters");
-
- const json::Object *param_obj = request.params->getAsObject();
- if (!param_obj)
- return llvm::createStringError("no resource parameters");
-
- const json::Value *uri = param_obj->get("uri");
- if (!uri)
- return llvm::createStringError("no resource uri");
-
- llvm::StringRef uri_str = uri->getAsString().value_or("");
- if (uri_str.empty())
- return llvm::createStringError("no resource uri");
-
- std::lock_guard<std::mutex> guard(m_server_mutex);
- for (std::unique_ptr<ResourceProvider> &resource_provider_up :
- m_resource_providers) {
- llvm::Expected<lldb_protocol::mcp::ResourceResult> result =
- resource_provider_up->ReadResource(uri_str);
- if (result.errorIsA<UnsupportedURI>()) {
- llvm::consumeError(result.takeError());
- continue;
- }
- if (!result)
- return result.takeError();
-
- lldb_protocol::mcp::Response response;
- response.result.emplace(std::move(*result));
- return response;
- }
-
- return make_error<MCPError>(
- llvm::formatv("no resource handler for uri: {0}", uri_str).str(),
- MCPError::kResourceNotFound);
-}
diff --git a/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.h b/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.h
index 2ea9585..7fe909a 100644
--- a/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.h
+++ b/lldb/source/Plugins/Protocol/MCP/ProtocolServerMCP.h
@@ -13,14 +13,13 @@
#include "lldb/Host/MainLoop.h"
#include "lldb/Host/Socket.h"
#include "lldb/Protocol/MCP/Protocol.h"
-#include "lldb/Protocol/MCP/Resource.h"
-#include "lldb/Protocol/MCP/Tool.h"
-#include "llvm/ADT/StringMap.h"
+#include "lldb/Protocol/MCP/Server.h"
#include <thread>
namespace lldb_private::mcp {
-class ProtocolServerMCP : public ProtocolServer {
+class ProtocolServerMCP : public ProtocolServer,
+ public lldb_protocol::mcp::Server {
public:
ProtocolServerMCP();
virtual ~ProtocolServerMCP() override;
@@ -40,48 +39,10 @@ public:
Socket *GetSocket() const override { return m_listener.get(); }
-protected:
- using RequestHandler =
- std::function<llvm::Expected<lldb_protocol::mcp::Response>(
- const lldb_protocol::mcp::Request &)>;
- using NotificationHandler =
- std::function<void(const lldb_protocol::mcp::Notification &)>;
-
- void AddTool(std::unique_ptr<lldb_protocol::mcp::Tool> tool);
- void AddResourceProvider(
- std::unique_ptr<lldb_protocol::mcp::ResourceProvider> resource_provider);
-
- void AddRequestHandler(llvm::StringRef method, RequestHandler handler);
- void AddNotificationHandler(llvm::StringRef method,
- NotificationHandler handler);
-
private:
void AcceptCallback(std::unique_ptr<Socket> socket);
- llvm::Expected<std::optional<lldb_protocol::mcp::Message>>
- HandleData(llvm::StringRef data);
-
- llvm::Expected<lldb_protocol::mcp::Response>
- Handle(lldb_protocol::mcp::Request request);
- void Handle(lldb_protocol::mcp::Notification notification);
-
- llvm::Expected<lldb_protocol::mcp::Response>
- InitializeHandler(const lldb_protocol::mcp::Request &);
-
- llvm::Expected<lldb_protocol::mcp::Response>
- ToolsListHandler(const lldb_protocol::mcp::Request &);
- llvm::Expected<lldb_protocol::mcp::Response>
- ToolsCallHandler(const lldb_protocol::mcp::Request &);
-
- llvm::Expected<lldb_protocol::mcp::Response>
- ResourcesListHandler(const lldb_protocol::mcp::Request &);
- llvm::Expected<lldb_protocol::mcp::Response>
- ResourcesReadHandler(const lldb_protocol::mcp::Request &);
-
- lldb_protocol::mcp::Capabilities GetCapabilities();
-
- llvm::StringLiteral kName = "lldb-mcp";
- llvm::StringLiteral kVersion = "0.1.0";
+ lldb_protocol::mcp::Capabilities GetCapabilities() override;
bool m_running = false;
@@ -98,14 +59,6 @@ private:
};
llvm::Error ReadCallback(Client &client);
std::vector<std::unique_ptr<Client>> m_clients;
-
- std::mutex m_server_mutex;
- llvm::StringMap<std::unique_ptr<lldb_protocol::mcp::Tool>> m_tools;
- std::vector<std::unique_ptr<lldb_protocol::mcp::ResourceProvider>>
- m_resource_providers;
-
- llvm::StringMap<RequestHandler> m_request_handlers;
- llvm::StringMap<NotificationHandler> m_notification_handlers;
};
} // namespace lldb_private::mcp
diff --git a/lldb/source/Plugins/RegisterTypeBuilder/RegisterTypeBuilderClang.cpp b/lldb/source/Plugins/RegisterTypeBuilder/RegisterTypeBuilderClang.cpp
index f19dc8b..eb9e013 100644
--- a/lldb/source/Plugins/RegisterTypeBuilder/RegisterTypeBuilderClang.cpp
+++ b/lldb/source/Plugins/RegisterTypeBuilder/RegisterTypeBuilderClang.cpp
@@ -47,7 +47,7 @@ CompilerType RegisterTypeBuilderClang::GetRegisterType(
// See if we have made this type before and can reuse it.
CompilerType fields_type =
type_system->GetTypeForIdentifier<clang::CXXRecordDecl>(
- register_type_name);
+ type_system->getASTContext(), register_type_name);
if (!fields_type) {
// In most ABI, a change of field type means a change in storage unit.
@@ -83,7 +83,7 @@ CompilerType RegisterTypeBuilderClang::GetRegisterType(
// may have built this one already.
CompilerType field_enum_type =
type_system->GetTypeForIdentifier<clang::EnumDecl>(
- enum_type_name);
+ type_system->getASTContext(), enum_type_name);
if (field_enum_type)
field_type = field_enum_type;
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
index 27ac5432..a2a287a 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
@@ -405,15 +405,33 @@ Expected<llvm::StringRef> PythonString::AsUTF8() const {
if (!IsValid())
return nullDeref();
- Py_ssize_t size;
- const char *data;
+ // PyUnicode_AsUTF8AndSize caches the UTF-8 representation of the string in
+ // the Unicode object, which makes it more efficient and ties the lifetime of
+ // the data to the Python string. However, it was only added to the Stable API
+ // in Python 3.10. Older versions that want to use the Stable API must use
+ // PyUnicode_AsUTF8String in combination with ConstString.
+#if defined(Py_LIMITED_API) && (Py_LIMITED_API < 0x030a0000)
+ PyObject *py_bytes = PyUnicode_AsUTF8String(m_py_obj);
+ if (!py_bytes)
+ return exception();
+ auto release_py_str =
+ llvm::make_scope_exit([py_bytes] { Py_DECREF(py_bytes); });
+ Py_ssize_t size = PyBytes_Size(py_bytes);
+ const char *str = PyBytes_AsString(py_bytes);
+
+ if (!str)
+ return exception();
- data = PyUnicode_AsUTF8AndSize(m_py_obj, &size);
+ return ConstString(str, size).GetStringRef();
+#else
+ Py_ssize_t size;
+ const char *str = PyUnicode_AsUTF8AndSize(m_py_obj, &size);
- if (!data)
+ if (!str)
return exception();
- return llvm::StringRef(data, size);
+ return llvm::StringRef(str, size);
+#endif
}
size_t PythonString::GetSize() const {
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index 24d604f..5b97fcb 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -92,25 +92,6 @@ namespace {
struct InitializePythonRAII {
public:
InitializePythonRAII() {
- PyConfig config;
- PyConfig_InitPythonConfig(&config);
-
-#if LLDB_EMBED_PYTHON_HOME
- static std::string g_python_home = []() -> std::string {
- if (llvm::sys::path::is_absolute(LLDB_PYTHON_HOME))
- return LLDB_PYTHON_HOME;
-
- FileSpec spec = HostInfo::GetShlibDir();
- if (!spec)
- return {};
- spec.AppendPathComponent(LLDB_PYTHON_HOME);
- return spec.GetPath();
- }();
- if (!g_python_home.empty()) {
- PyConfig_SetBytesString(&config, &config.home, g_python_home.c_str());
- }
-#endif
-
// The table of built-in modules can only be extended before Python is
// initialized.
if (!Py_IsInitialized()) {
@@ -134,9 +115,30 @@ public:
PyImport_AppendInittab("_lldb", LLDBSwigPyInit);
}
+#if LLDB_EMBED_PYTHON_HOME
+ PyConfig config;
+ PyConfig_InitPythonConfig(&config);
+
+ static std::string g_python_home = []() -> std::string {
+ if (llvm::sys::path::is_absolute(LLDB_PYTHON_HOME))
+ return LLDB_PYTHON_HOME;
+
+ FileSpec spec = HostInfo::GetShlibDir();
+ if (!spec)
+ return {};
+ spec.AppendPathComponent(LLDB_PYTHON_HOME);
+ return spec.GetPath();
+ }();
+ if (!g_python_home.empty()) {
+ PyConfig_SetBytesString(&config, &config.home, g_python_home.c_str());
+ }
+
config.install_signal_handlers = 0;
Py_InitializeFromConfig(&config);
PyConfig_Clear(&config);
+#else
+ Py_InitializeEx(/*install_sigs=*/0);
+#endif
// The only case we should go further and acquire the GIL: it is unlocked.
PyGILState_STATE gil_state = PyGILState_Ensure();
diff --git a/lldb/source/Plugins/StructuredData/DarwinLog/StructuredDataDarwinLog.cpp b/lldb/source/Plugins/StructuredData/DarwinLog/StructuredDataDarwinLog.cpp
index 867f6a6..70093c9 100644
--- a/lldb/source/Plugins/StructuredData/DarwinLog/StructuredDataDarwinLog.cpp
+++ b/lldb/source/Plugins/StructuredData/DarwinLog/StructuredDataDarwinLog.cpp
@@ -1601,6 +1601,7 @@ void StructuredDataDarwinLog::AddInitCompletionHook(Process &process) {
const char *func_name = "_libtrace_init";
const lldb::addr_t offset = 0;
+ const bool offset_is_insn_count = false;
const LazyBool skip_prologue = eLazyBoolCalculate;
// This is an internal breakpoint - the user shouldn't see it.
const bool internal = true;
@@ -1608,7 +1609,8 @@ void StructuredDataDarwinLog::AddInitCompletionHook(Process &process) {
auto breakpoint_sp = target.CreateBreakpoint(
&module_spec_list, source_spec_list, func_name, eFunctionNameTypeFull,
- eLanguageTypeC, offset, skip_prologue, internal, hardware);
+ eLanguageTypeC, offset, offset_is_insn_count, skip_prologue, internal,
+ hardware);
if (!breakpoint_sp) {
// Huh? Bail here.
LLDB_LOGF(log,
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 84b3da3..9958af2 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -2483,6 +2483,30 @@ bool SymbolFileDWARF::ResolveFunction(const DWARFDIE &orig_die,
return false;
}
+DWARFDIE
+SymbolFileDWARF::FindFunctionDefinition(const FunctionCallLabel &label) {
+ DWARFDIE definition;
+ Module::LookupInfo info(ConstString(label.lookup_name),
+ lldb::eFunctionNameTypeFull,
+ lldb::eLanguageTypeUnknown);
+
+ m_index->GetFunctions(info, *this, {}, [&](DWARFDIE entry) {
+ if (entry.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0))
+ return IterationAction::Continue;
+
+ // We don't check whether the specification DIE for this function
+ // corresponds to the declaration DIE because the declaration might be in
+ // a type-unit but the definition in the compile-unit (and it's
+ // specifcation would point to the declaration in the compile-unit). We
+ // rely on the mangled name within the module to be enough to find us the
+ // unique definition.
+ definition = entry;
+ return IterationAction::Stop;
+ });
+
+ return definition;
+}
+
llvm::Expected<SymbolContext>
SymbolFileDWARF::ResolveFunctionCallLabel(const FunctionCallLabel &label) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
@@ -2495,37 +2519,19 @@ SymbolFileDWARF::ResolveFunctionCallLabel(const FunctionCallLabel &label) {
// Label was created using a declaration DIE. Need to fetch the definition
// to resolve the function call.
if (die.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0)) {
- Module::LookupInfo info(ConstString(label.lookup_name),
- lldb::eFunctionNameTypeFull,
- lldb::eLanguageTypeUnknown);
-
- m_index->GetFunctions(info, *this, {}, [&](DWARFDIE entry) {
- if (entry.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0))
- return IterationAction::Continue;
-
- // We don't check whether the specification DIE for this function
- // corresponds to the declaration DIE because the declaration might be in
- // a type-unit but the definition in the compile-unit (and it's
- // specifcation would point to the declaration in the compile-unit). We
- // rely on the mangled name within the module to be enough to find us the
- // unique definition.
- die = entry;
- return IterationAction::Stop;
- });
+ auto definition = FindFunctionDefinition(label);
+ if (!definition)
+ return llvm::createStringError("failed to find definition DIE");
- if (die.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0))
- return llvm::createStringError(
- llvm::formatv("failed to find definition DIE for {0}", label));
+ die = std::move(definition);
}
SymbolContextList sc_list;
if (!ResolveFunction(die, /*include_inlines=*/false, sc_list))
- return llvm::createStringError(
- llvm::formatv("failed to resolve function for {0}", label));
+ return llvm::createStringError("failed to resolve function");
if (sc_list.IsEmpty())
- return llvm::createStringError(
- llvm::formatv("failed to find function for {0}", label));
+ return llvm::createStringError("failed to find function");
assert(sc_list.GetSize() == 1);
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
index 5042d91..d7db8a3 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
@@ -373,6 +373,13 @@ public:
/// Returns the DWARFIndex for this symbol, if it exists.
DWARFIndex *getIndex() { return m_index.get(); }
+private:
+ /// Find the definition DIE for the specified \c label in this
+ /// SymbolFile.
+ ///
+ /// \returns A valid definition DIE on success.
+ DWARFDIE FindFunctionDefinition(const FunctionCallLabel &label);
+
protected:
SymbolFileDWARF(const SymbolFileDWARF &) = delete;
const SymbolFileDWARF &operator=(const SymbolFileDWARF &) = delete;
diff --git a/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp b/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
index f01fba3..709281c 100644
--- a/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
+++ b/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
@@ -449,7 +449,7 @@ bool PdbAstBuilder::CompleteTagDecl(clang::TagDecl &tag) {
->GetIndex();
lldbassert(IsTagRecord(type_id, index.tpi()));
- clang::QualType tag_qt = m_clang.getASTContext().getTypeDeclType(&tag);
+ clang::QualType tag_qt = m_clang.getASTContext().getCanonicalTagType(&tag);
TypeSystemClang::SetHasExternalStorage(tag_qt.getAsOpaquePtr(), false);
TypeIndex tag_ti = type_id.index;
@@ -562,7 +562,8 @@ clang::QualType PdbAstBuilder::CreatePointerType(const PointerRecord &pointer) {
m_clang.getASTContext(), spelling));
}
return m_clang.getASTContext().getMemberPointerType(
- pointee_type, /*Qualifier=*/nullptr, class_type->getAsCXXRecordDecl());
+ pointee_type, /*Qualifier=*/std::nullopt,
+ class_type->getAsCXXRecordDecl());
}
clang::QualType pointer_type;
@@ -862,9 +863,9 @@ PdbAstBuilder::CreateFunctionDecl(PdbCompilandSymId func_id,
SymbolFileNativePDB *pdb = static_cast<SymbolFileNativePDB *>(
m_clang.GetSymbolFile()->GetBackingSymbolFile());
PdbIndex &index = pdb->GetIndex();
- clang::QualType parent_qt = llvm::cast<clang::TypeDecl>(parent)
- ->getTypeForDecl()
- ->getCanonicalTypeInternal();
+ clang::CanQualType parent_qt =
+ m_clang.getASTContext().getCanonicalTypeDeclType(
+ llvm::cast<clang::TypeDecl>(parent));
lldb::opaque_compiler_type_t parent_opaque_ty =
ToCompilerType(parent_qt).GetOpaqueQualType();
// FIXME: Remove this workaround.
diff --git a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
index 8b8eac6e..3a95588 100644
--- a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
+++ b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
@@ -407,8 +407,8 @@ lldb::TypeSP PDBASTParser::CreateLLDBTypeFromPDBType(const PDBSymbol &type) {
// symbols in PDB for types with const or volatile modifiers, but we need
// to create only one declaration for them all.
Type::ResolveState type_resolve_state;
- CompilerType clang_type =
- m_ast.GetTypeForIdentifier<clang::CXXRecordDecl>(name, decl_context);
+ CompilerType clang_type = m_ast.GetTypeForIdentifier<clang::CXXRecordDecl>(
+ m_ast.getASTContext(), name, decl_context);
if (!clang_type.IsValid()) {
auto access = GetAccessibilityForUdt(*udt);
@@ -479,8 +479,8 @@ lldb::TypeSP PDBASTParser::CreateLLDBTypeFromPDBType(const PDBSymbol &type) {
uint64_t bytes = enum_type->getLength();
// Check if such an enum already exists in the current context
- CompilerType ast_enum =
- m_ast.GetTypeForIdentifier<clang::EnumDecl>(name, decl_context);
+ CompilerType ast_enum = m_ast.GetTypeForIdentifier<clang::EnumDecl>(
+ m_ast.getASTContext(), name, decl_context);
if (!ast_enum.IsValid()) {
auto underlying_type_up = enum_type->getUnderlyingType();
if (!underlying_type_up)
@@ -557,7 +557,8 @@ lldb::TypeSP PDBASTParser::CreateLLDBTypeFromPDBType(const PDBSymbol &type) {
// Check if such a typedef already exists in the current context
CompilerType ast_typedef =
- m_ast.GetTypeForIdentifier<clang::TypedefNameDecl>(name, decl_ctx);
+ m_ast.GetTypeForIdentifier<clang::TypedefNameDecl>(
+ m_ast.getASTContext(), name, decl_ctx);
if (!ast_typedef.IsValid()) {
CompilerType target_ast_type = target_type->GetFullCompilerType();
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 6addf4f..c4a917f 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Mangle.h"
+#include "clang/AST/QualTypeNames.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/AST/VTableBuilder.h"
@@ -161,8 +162,7 @@ void addOverridesForMethod(clang::CXXMethodDecl *decl) {
auto find_overridden_methods =
[&decls, decl](const clang::CXXBaseSpecifier *specifier,
clang::CXXBasePath &path) {
- if (auto *base_record = llvm::dyn_cast<clang::CXXRecordDecl>(
- specifier->getType()->castAs<clang::RecordType>()->getDecl())) {
+ if (auto *base_record = specifier->getType()->getAsCXXRecordDecl()) {
clang::DeclarationName name = decl->getDeclName();
@@ -1179,7 +1179,7 @@ CompilerType TypeSystemClang::GetTypeForDecl(clang::NamedDecl *decl) {
}
CompilerType TypeSystemClang::GetTypeForDecl(TagDecl *decl) {
- return GetType(getASTContext().getTagDeclType(decl));
+ return GetType(getASTContext().getCanonicalTagType(decl));
}
CompilerType TypeSystemClang::GetTypeForDecl(ObjCInterfaceDecl *decl) {
@@ -1306,7 +1306,7 @@ CompilerType TypeSystemClang::CreateRecordType(
if (decl_ctx)
decl_ctx->addDecl(decl);
- return GetType(ast.getTagDeclType(decl));
+ return GetType(ast.getCanonicalTagType(decl));
}
namespace {
@@ -1674,7 +1674,6 @@ TypeSystemClang::CreateClassTemplateSpecializationDecl(
class_template_specialization_decl->setInstantiationOf(class_template_decl);
class_template_specialization_decl->setTemplateArgs(
TemplateArgumentList::CreateCopy(ast, args));
- ast.getTypeDeclType(class_template_specialization_decl, nullptr);
class_template_specialization_decl->setDeclName(
class_template_decl->getDeclName());
@@ -1696,7 +1695,7 @@ CompilerType TypeSystemClang::CreateClassTemplateSpecializationType(
ClassTemplateSpecializationDecl *class_template_specialization_decl) {
if (class_template_specialization_decl) {
ASTContext &ast = getASTContext();
- return GetType(ast.getTagDeclType(class_template_specialization_decl));
+ return GetType(ast.getCanonicalTagType(class_template_specialization_decl));
}
return CompilerType();
}
@@ -1792,9 +1791,7 @@ bool TypeSystemClang::RecordHasFields(const RecordDecl *record_decl) {
for (base_class = cxx_record_decl->bases_begin(),
base_class_end = cxx_record_decl->bases_end();
base_class != base_class_end; ++base_class) {
- const CXXRecordDecl *base_class_decl = cast<CXXRecordDecl>(
- base_class->getType()->getAs<RecordType>()->getDecl());
- if (RecordHasFields(base_class_decl))
+ if (RecordHasFields(base_class->getType()->getAsCXXRecordDecl()))
return true;
}
}
@@ -2290,9 +2287,9 @@ CompilerType TypeSystemClang::CreateStructForIdentifier(
&type_fields,
bool packed) {
CompilerType type;
- if (!type_name.empty() &&
- (type = GetTypeForIdentifier<clang::CXXRecordDecl>(type_name))
- .IsValid()) {
+ if (!type_name.empty() && (type = GetTypeForIdentifier<clang::CXXRecordDecl>(
+ getASTContext(), type_name))
+ .IsValid()) {
lldbassert(0 && "Trying to create a type for an existing name");
return type;
}
@@ -2316,7 +2313,9 @@ CompilerType TypeSystemClang::GetOrCreateStructForIdentifier(
&type_fields,
bool packed) {
CompilerType type;
- if ((type = GetTypeForIdentifier<clang::CXXRecordDecl>(type_name)).IsValid())
+ if ((type = GetTypeForIdentifier<clang::CXXRecordDecl>(getASTContext(),
+ type_name))
+ .IsValid())
return type;
return CreateStructForIdentifier(type_name, type_fields, packed);
@@ -2355,7 +2354,7 @@ CompilerType TypeSystemClang::CreateEnumerationType(
enum_decl->setAccess(AS_public); // TODO respect what's in the debug info
- return GetType(ast.getTagDeclType(enum_decl));
+ return GetType(ast.getCanonicalTagType(enum_decl));
}
CompilerType TypeSystemClang::GetIntTypeFromBitSize(size_t bit_size,
@@ -2471,7 +2470,7 @@ bool TypeSystemClang::GetCompleteDecl(clang::ASTContext *ast,
ast_source->CompleteType(tag_decl);
- return !tag_decl->getTypeForDecl()->isIncompleteType();
+ return !ast->getCanonicalTagType(tag_decl)->isIncompleteType();
} else if (clang::ObjCInterfaceDecl *objc_interface_decl =
llvm::dyn_cast<clang::ObjCInterfaceDecl>(decl)) {
if (objc_interface_decl->getDefinition())
@@ -2575,7 +2574,6 @@ RemoveWrappingTypes(QualType type, ArrayRef<clang::Type::TypeClass> mask = {}) {
break;
case clang::Type::Auto:
case clang::Type::Decltype:
- case clang::Type::Elaborated:
case clang::Type::Paren:
case clang::Type::SubstTemplateTypeParm:
case clang::Type::TemplateSpecialization:
@@ -2607,10 +2605,11 @@ TypeSystemClang::GetDeclContextForType(clang::QualType type) {
return GetDeclContextForType(
llvm::cast<clang::ObjCObjectPointerType>(qual_type.getTypePtr())
->getPointeeType());
- case clang::Type::Record:
- return llvm::cast<clang::RecordType>(qual_type)->getDecl();
case clang::Type::Enum:
- return llvm::cast<clang::EnumType>(qual_type)->getDecl();
+ case clang::Type::Record:
+ return llvm::cast<clang::TagType>(qual_type)
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
default:
break;
}
@@ -2790,7 +2789,7 @@ static bool GetCompleteQualType(clang::ASTContext *ast,
if (ast->getTargetInfo().getCXXABI().isMicrosoft()) {
auto *MPT = qual_type.getTypePtr()->castAs<clang::MemberPointerType>();
if (auto *RD = MPT->getMostRecentCXXRecordDecl())
- GetCompleteRecordType(ast, QualType(RD->getTypeForDecl(), 0),
+ GetCompleteRecordType(ast, ast->getCanonicalTagType(RD),
allow_completion);
return !qual_type.getTypePtr()->isIncompleteType();
@@ -2859,7 +2858,8 @@ bool TypeSystemClang::IsAnonymousType(lldb::opaque_compiler_type_t type) {
if (const clang::RecordType *record_type =
llvm::dyn_cast_or_null<clang::RecordType>(
qual_type.getTypePtrOrNull())) {
- if (const clang::RecordDecl *record_decl = record_type->getDecl()) {
+ if (const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()) {
return record_decl->isAnonymousStructOrUnion();
}
}
@@ -3099,8 +3099,8 @@ TypeSystemClang::IsHomogeneousAggregate(lldb::opaque_compiler_type_t type,
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
if (record_type) {
- const clang::RecordDecl *record_decl = record_type->getDecl();
- if (record_decl) {
+ if (const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinition()) {
// We are looking for a structure that contains only floating point
// types
clang::RecordDecl::field_iterator field_pos,
@@ -3280,7 +3280,10 @@ bool TypeSystemClang::IsEnumerationType(lldb::opaque_compiler_type_t type,
GetCanonicalQualType(type)->getCanonicalTypeInternal());
if (enum_type) {
- IsIntegerType(enum_type->getDecl()->getIntegerType().getAsOpaquePtr(),
+ IsIntegerType(enum_type->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType()
+ .getAsOpaquePtr(),
is_signed);
return true;
}
@@ -3505,8 +3508,7 @@ bool TypeSystemClang::IsDefined(lldb::opaque_compiler_type_t type) {
const clang::TagType *tag_type =
llvm::dyn_cast<clang::TagType>(qual_type.getTypePtr());
if (tag_type) {
- clang::TagDecl *tag_decl = tag_type->getDecl();
- if (tag_decl)
+ if (clang::TagDecl *tag_decl = tag_type->getOriginalDecl()->getDefinition())
return tag_decl->isCompleteDefinition();
return false;
} else {
@@ -3565,21 +3567,14 @@ bool TypeSystemClang::IsPolymorphicClass(lldb::opaque_compiler_type_t type) {
switch (type_class) {
case clang::Type::Record:
if (GetCompleteType(type)) {
- const clang::RecordType *record_type =
- llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
- if (record_decl) {
- const clang::CXXRecordDecl *cxx_record_decl =
- llvm::dyn_cast<clang::CXXRecordDecl>(record_decl);
- if (cxx_record_decl) {
- // We can't just call is isPolymorphic() here because that just
- // means the current class has virtual functions, it doesn't check
- // if any inherited classes have virtual functions. The doc string
- // in SBType::IsPolymorphicClass() says it is looking for both
- // if the class has virtual methods or if any bases do, so this
- // should be more correct.
- return cxx_record_decl->isDynamicClass();
- }
+ if (const auto *cxx_record_decl = qual_type->getAsCXXRecordDecl()) {
+ // We can't just call is isPolymorphic() here because that just
+ // means the current class has virtual functions, it doesn't check
+ // if any inherited classes have virtual functions. The doc string
+ // in SBType::IsPolymorphicClass() says it is looking for both
+ // if the class has virtual methods or if any bases do, so this
+ // should be more correct.
+ return cxx_record_decl->isDynamicClass();
}
}
break;
@@ -3766,7 +3761,7 @@ bool TypeSystemClang::IsBeingDefined(lldb::opaque_compiler_type_t type) {
clang::QualType qual_type(GetCanonicalQualType(type));
const clang::TagType *tag_type = llvm::dyn_cast<clang::TagType>(qual_type);
if (tag_type)
- return tag_type->isBeingDefined();
+ return tag_type->getOriginalDecl()->isEntityBeingDefined();
return false;
}
@@ -3974,7 +3969,8 @@ TypeSystemClang::GetTypeInfo(lldb::opaque_compiler_type_t type,
if (pointee_or_element_clang_type)
pointee_or_element_clang_type->SetCompilerType(
weak_from_this(), llvm::cast<clang::EnumType>(qual_type)
- ->getDecl()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
->getIntegerType()
.getAsOpaquePtr());
return eTypeIsEnumeration | eTypeHasValue;
@@ -4154,7 +4150,6 @@ TypeSystemClang::GetTypeClass(lldb::opaque_compiler_type_t type) {
case clang::Type::Auto:
case clang::Type::CountAttributed:
case clang::Type::Decltype:
- case clang::Type::Elaborated:
case clang::Type::Paren:
case clang::Type::TypeOf:
case clang::Type::TypeOfExpr:
@@ -4214,7 +4209,7 @@ TypeSystemClang::GetTypeClass(lldb::opaque_compiler_type_t type) {
case clang::Type::Record: {
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl = record_type->getOriginalDecl();
if (record_decl->isUnion())
return lldb::eTypeClassUnion;
else if (record_decl->isStruct())
@@ -4412,17 +4407,10 @@ TypeSystemClang::GetNumMemberFunctions(lldb::opaque_compiler_type_t type) {
clang::QualType qual_type = RemoveWrappingTypes(GetCanonicalQualType(type));
switch (qual_type->getTypeClass()) {
case clang::Type::Record:
- if (GetCompleteQualType(&getASTContext(), qual_type)) {
- const clang::RecordType *record_type =
- llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
- assert(record_decl);
- const clang::CXXRecordDecl *cxx_record_decl =
- llvm::dyn_cast<clang::CXXRecordDecl>(record_decl);
- if (cxx_record_decl)
+ if (GetCompleteQualType(&getASTContext(), qual_type))
+ if (const auto *cxx_record_decl = qual_type->getAsCXXRecordDecl())
num_functions = std::distance(cxx_record_decl->method_begin(),
cxx_record_decl->method_end());
- }
break;
case clang::Type::ObjCObjectPointer: {
@@ -4477,13 +4465,7 @@ TypeSystemClang::GetMemberFunctionAtIndex(lldb::opaque_compiler_type_t type,
switch (qual_type->getTypeClass()) {
case clang::Type::Record:
if (GetCompleteQualType(&getASTContext(), qual_type)) {
- const clang::RecordType *record_type =
- llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
- assert(record_decl);
- const clang::CXXRecordDecl *cxx_record_decl =
- llvm::dyn_cast<clang::CXXRecordDecl>(record_decl);
- if (cxx_record_decl) {
+ if (const auto *cxx_record_decl = qual_type->getAsCXXRecordDecl()) {
auto method_iter = cxx_record_decl->method_begin();
auto method_end = cxx_record_decl->method_end();
if (idx <
@@ -4703,9 +4685,9 @@ CompilerType TypeSystemClang::CreateTypedef(
clang::TagDecl *tdecl = nullptr;
if (!qual_type.isNull()) {
if (const clang::RecordType *rt = qual_type->getAs<clang::RecordType>())
- tdecl = rt->getDecl();
+ tdecl = rt->getOriginalDecl();
if (const clang::EnumType *et = qual_type->getAs<clang::EnumType>())
- tdecl = et->getDecl();
+ tdecl = et->getOriginalDecl();
}
// Check whether this declaration is an anonymous struct, union, or enum,
@@ -4717,7 +4699,10 @@ CompilerType TypeSystemClang::CreateTypedef(
decl->setAccess(clang::AS_public); // TODO respect proper access specifier
// Get a uniqued clang::QualType for the typedef decl type
- return GetType(clang_ast.getTypedefType(decl));
+ NestedNameSpecifier Qualifier =
+ clang::TypeName::getFullyQualifiedDeclaredContext(clang_ast, decl);
+ return GetType(
+ clang_ast.getTypedefType(ElaboratedTypeKeyword::None, Qualifier, decl));
}
return CompilerType();
}
@@ -4869,7 +4854,6 @@ lldb::Encoding TypeSystemClang::GetEncoding(lldb::opaque_compiler_type_t type,
case clang::Type::Auto:
case clang::Type::CountAttributed:
case clang::Type::Decltype:
- case clang::Type::Elaborated:
case clang::Type::Paren:
case clang::Type::Typedef:
case clang::Type::TypeOf:
@@ -5171,7 +5155,6 @@ lldb::Format TypeSystemClang::GetFormat(lldb::opaque_compiler_type_t type) {
case clang::Type::Auto:
case clang::Type::CountAttributed:
case clang::Type::Decltype:
- case clang::Type::Elaborated:
case clang::Type::Paren:
case clang::Type::Typedef:
case clang::Type::TypeOf:
@@ -5380,8 +5363,8 @@ TypeSystemClang::GetNumChildren(lldb::opaque_compiler_type_t type,
if (GetCompleteQualType(&getASTContext(), qual_type)) {
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
- assert(record_decl);
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
const clang::CXXRecordDecl *cxx_record_decl =
llvm::dyn_cast<clang::CXXRecordDecl>(record_decl);
@@ -5577,7 +5560,8 @@ void TypeSystemClang::ForEachEnumerator(
const clang::EnumType *enum_type =
llvm::dyn_cast<clang::EnumType>(GetCanonicalQualType(type));
if (enum_type) {
- const clang::EnumDecl *enum_decl = enum_type->getDecl();
+ const clang::EnumDecl *enum_decl =
+ enum_type->getOriginalDecl()->getDefinitionOrSelf();
if (enum_decl) {
CompilerType integer_type = GetType(enum_decl->getIntegerType());
@@ -5608,7 +5592,8 @@ uint32_t TypeSystemClang::GetNumFields(lldb::opaque_compiler_type_t type) {
const clang::RecordType *record_type =
llvm::dyn_cast<clang::RecordType>(qual_type.getTypePtr());
if (record_type) {
- clang::RecordDecl *record_decl = record_type->getDecl();
+ clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinition();
if (record_decl) {
count = std::distance(record_decl->field_begin(),
record_decl->field_end());
@@ -5722,7 +5707,8 @@ CompilerType TypeSystemClang::GetFieldAtIndex(lldb::opaque_compiler_type_t type,
if (GetCompleteType(type)) {
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
uint32_t field_idx = 0;
clang::RecordDecl::field_iterator field, field_end;
for (field = record_decl->field_begin(),
@@ -5908,7 +5894,7 @@ CompilerType TypeSystemClang::GetDirectBaseClassAtIndex(
llvm::cast<clang::CXXRecordDecl>(
base_class->getType()
->castAs<clang::RecordType>()
- ->getDecl());
+ ->getOriginalDecl());
if (base_class->isVirtual())
*bit_offset_ptr =
record_layout.getVBaseClassOffset(base_class_decl)
@@ -6003,7 +5989,7 @@ CompilerType TypeSystemClang::GetVirtualBaseClassAtIndex(
llvm::cast<clang::CXXRecordDecl>(
base_class->getType()
->castAs<clang::RecordType>()
- ->getDecl());
+ ->getOriginalDecl());
*bit_offset_ptr =
record_layout.getVBaseClassOffset(base_class_decl)
.getQuantity() *
@@ -6033,7 +6019,8 @@ TypeSystemClang::GetStaticFieldWithName(lldb::opaque_compiler_type_t type,
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
clang::DeclarationName decl_name(&getASTContext().Idents.get(name));
for (NamedDecl *decl : record_decl->lookup(decl_name)) {
@@ -6263,8 +6250,8 @@ llvm::Expected<CompilerType> TypeSystemClang::GetChildCompilerTypeAtIndex(
if (idx_is_valid && GetCompleteType(type)) {
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(parent_qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
- assert(record_decl);
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
const clang::ASTRecordLayout &record_layout =
getASTContext().getASTRecordLayout(record_decl);
uint32_t child_idx = 0;
@@ -6283,7 +6270,10 @@ llvm::Expected<CompilerType> TypeSystemClang::GetChildCompilerTypeAtIndex(
// Skip empty base classes
if (omit_empty_base_classes) {
base_class_decl = llvm::cast<clang::CXXRecordDecl>(
- base_class->getType()->getAs<clang::RecordType>()->getDecl());
+ base_class->getType()
+ ->getAs<clang::RecordType>()
+ ->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!TypeSystemClang::RecordHasFields(base_class_decl))
continue;
}
@@ -6291,7 +6281,10 @@ llvm::Expected<CompilerType> TypeSystemClang::GetChildCompilerTypeAtIndex(
if (idx == child_idx) {
if (base_class_decl == nullptr)
base_class_decl = llvm::cast<clang::CXXRecordDecl>(
- base_class->getType()->getAs<clang::RecordType>()->getDecl());
+ base_class->getType()
+ ->getAs<clang::RecordType>()
+ ->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (base_class->isVirtual()) {
bool handled = false;
@@ -6752,7 +6745,8 @@ size_t TypeSystemClang::GetIndexOfChildMemberWithName(
if (GetCompleteType(type)) {
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
assert(record_decl);
uint32_t child_idx = 0;
@@ -6817,10 +6811,10 @@ size_t TypeSystemClang::GetIndexOfChildMemberWithName(
return 0;
} else {
child_indexes.push_back(child_idx);
- parent_record_decl = llvm::cast<clang::RecordDecl>(
- elem.Base->getType()
- ->castAs<clang::RecordType>()
- ->getDecl());
+ parent_record_decl = elem.Base->getType()
+ ->castAs<clang::RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
}
}
for (clang::DeclContext::lookup_iterator I = path->Decls, E;
@@ -6954,7 +6948,8 @@ TypeSystemClang::GetIndexOfChildWithName(lldb::opaque_compiler_type_t type,
if (GetCompleteType(type)) {
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
assert(record_decl);
uint32_t child_idx = 0;
@@ -6973,7 +6968,8 @@ TypeSystemClang::GetIndexOfChildWithName(lldb::opaque_compiler_type_t type,
llvm::cast<clang::CXXRecordDecl>(
base_class->getType()
->castAs<clang::RecordType>()
- ->getDecl());
+ ->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (omit_empty_base_classes &&
!TypeSystemClang::RecordHasFields(base_class_decl))
continue;
@@ -7092,14 +7088,17 @@ TypeSystemClang::GetDirectNestedTypeWithName(lldb::opaque_compiler_type_t type,
return CompilerType();
const clang::RecordType *record_type =
llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
clang::DeclarationName decl_name(&getASTContext().Idents.get(name));
for (NamedDecl *decl : record_decl->lookup(decl_name)) {
if (auto *tag_decl = dyn_cast<clang::TagDecl>(decl))
- return GetType(getASTContext().getTagDeclType(tag_decl));
+ return GetType(getASTContext().getCanonicalTagType(tag_decl));
if (auto *typedef_decl = dyn_cast<clang::TypedefNameDecl>(decl))
- return GetType(getASTContext().getTypedefType(typedef_decl));
+ return GetType(getASTContext().getTypedefType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ typedef_decl));
}
break;
}
@@ -7116,7 +7115,7 @@ bool TypeSystemClang::IsTemplateType(lldb::opaque_compiler_type_t type) {
const clang::Type *clang_type = ClangUtil::GetQualType(ct).getTypePtr();
if (auto *cxx_record_decl = dyn_cast<clang::TagType>(clang_type))
return isa<clang::ClassTemplateSpecializationDecl>(
- cxx_record_decl->getDecl());
+ cxx_record_decl->getOriginalDecl());
return false;
}
@@ -7319,7 +7318,7 @@ clang::EnumDecl *TypeSystemClang::GetAsEnumDecl(const CompilerType &type) {
const clang::EnumType *enutype =
llvm::dyn_cast<clang::EnumType>(ClangUtil::GetCanonicalQualType(type));
if (enutype)
- return enutype->getDecl();
+ return enutype->getOriginalDecl()->getDefinitionOrSelf();
return nullptr;
}
@@ -7327,7 +7326,7 @@ clang::RecordDecl *TypeSystemClang::GetAsRecordDecl(const CompilerType &type) {
const clang::RecordType *record_type =
llvm::dyn_cast<clang::RecordType>(ClangUtil::GetCanonicalQualType(type));
if (record_type)
- return record_type->getDecl();
+ return record_type->getOriginalDecl()->getDefinitionOrSelf();
return nullptr;
}
@@ -7409,7 +7408,7 @@ clang::FieldDecl *TypeSystemClang::AddFieldToRecordType(
if (const clang::TagType *TagT =
field->getType()->getAs<clang::TagType>()) {
if (clang::RecordDecl *Rec =
- llvm::dyn_cast<clang::RecordDecl>(TagT->getDecl()))
+ llvm::dyn_cast<clang::RecordDecl>(TagT->getOriginalDecl()))
if (!Rec->getDeclName()) {
Rec->setAnonymousStructOrUnion(true);
field->setImplicit();
@@ -7494,7 +7493,8 @@ void TypeSystemClang::BuildIndirectFields(const CompilerType &type) {
if (!field_record_type)
continue;
- clang::RecordDecl *field_record_decl = field_record_type->getDecl();
+ clang::RecordDecl *field_record_decl =
+ field_record_type->getOriginalDecl()->getDefinition();
if (!field_record_decl)
continue;
@@ -7636,7 +7636,8 @@ void TypeSystemClang::SetIntegerInitializerForVariable(
// If the variable is an enum type, take the underlying integer type as
// the type of the integer literal.
if (const EnumType *enum_type = qt->getAs<EnumType>()) {
- const EnumDecl *enum_decl = enum_type->getDecl();
+ const EnumDecl *enum_decl =
+ enum_type->getOriginalDecl()->getDefinitionOrSelf();
qt = enum_decl->getIntegerType();
}
// Bools are handled separately because the clang AST printer handles bools
@@ -8296,7 +8297,7 @@ bool TypeSystemClang::SetHasExternalStorage(lldb::opaque_compiler_type_t type,
case clang::Type::Enum: {
clang::EnumDecl *enum_decl =
- llvm::cast<clang::EnumType>(qual_type)->getDecl();
+ llvm::cast<clang::EnumType>(qual_type)->getOriginalDecl();
if (enum_decl) {
enum_decl->setHasExternalLexicalStorage(has_extern);
enum_decl->setHasExternalVisibleStorage(has_extern);
@@ -8334,7 +8335,7 @@ bool TypeSystemClang::StartTagDeclarationDefinition(const CompilerType &type) {
if (!qual_type.isNull()) {
const clang::TagType *tag_type = qual_type->getAs<clang::TagType>();
if (tag_type) {
- clang::TagDecl *tag_decl = tag_type->getDecl();
+ clang::TagDecl *tag_decl = tag_type->getOriginalDecl();
if (tag_decl) {
tag_decl->startDefinition();
return true;
@@ -8369,7 +8370,8 @@ bool TypeSystemClang::CompleteTagDeclarationDefinition(
// the definition.
const clang::TagType *tag_type = qual_type->getAs<clang::TagType>();
if (tag_type) {
- clang::TagDecl *tag_decl = tag_type->getDecl();
+ clang::TagDecl *tag_decl =
+ tag_type->getOriginalDecl()->getDefinitionOrSelf();
if (auto *cxx_record_decl = llvm::dyn_cast<CXXRecordDecl>(tag_decl)) {
// If we have a move constructor declared but no copy constructor we
@@ -8404,7 +8406,8 @@ bool TypeSystemClang::CompleteTagDeclarationDefinition(
if (!enutype)
return false;
- clang::EnumDecl *enum_decl = enutype->getDecl();
+ clang::EnumDecl *enum_decl =
+ enutype->getOriginalDecl()->getDefinitionOrSelf();
if (enum_decl->isCompleteDefinition())
return true;
@@ -8462,17 +8465,19 @@ clang::EnumConstantDecl *TypeSystemClang::AddEnumerationValueToEnumerationType(
clang::EnumConstantDecl *enumerator_decl =
clang::EnumConstantDecl::CreateDeserialized(getASTContext(),
GlobalDeclID());
- enumerator_decl->setDeclContext(enutype->getDecl());
+ clang::EnumDecl *enum_decl =
+ enutype->getOriginalDecl()->getDefinitionOrSelf();
+ enumerator_decl->setDeclContext(enum_decl);
if (name && name[0])
enumerator_decl->setDeclName(&getASTContext().Idents.get(name));
enumerator_decl->setType(clang::QualType(enutype, 0));
enumerator_decl->setInitVal(getASTContext(), value);
- SetMemberOwningModule(enumerator_decl, enutype->getDecl());
+ SetMemberOwningModule(enumerator_decl, enum_decl);
if (!enumerator_decl)
return nullptr;
- enutype->getDecl()->addDecl(enumerator_decl);
+ enum_decl->addDecl(enumerator_decl);
VerifyDecl(enumerator_decl);
return enumerator_decl;
@@ -8496,7 +8501,8 @@ CompilerType TypeSystemClang::GetEnumerationIntegerType(CompilerType type) {
if (!enum_type)
return CompilerType();
- return GetType(enum_type->getDecl()->getIntegerType());
+ return GetType(
+ enum_type->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType());
}
CompilerType
@@ -8509,7 +8515,7 @@ TypeSystemClang::CreateMemberPointerType(const CompilerType &type,
return CompilerType();
return ast->GetType(ast->getASTContext().getMemberPointerType(
ClangUtil::GetQualType(pointee_type),
- /*Qualifier=*/nullptr,
+ /*Qualifier=*/std::nullopt,
ClangUtil::GetQualType(type)->getAsCXXRecordDecl()));
}
return CompilerType();
@@ -8587,8 +8593,8 @@ static bool DumpEnumValue(const clang::QualType &qual_type, Stream &s,
uint32_t bitfield_bit_size) {
const clang::EnumType *enutype =
llvm::cast<clang::EnumType>(qual_type.getTypePtr());
- const clang::EnumDecl *enum_decl = enutype->getDecl();
- assert(enum_decl);
+ const clang::EnumDecl *enum_decl =
+ enutype->getOriginalDecl()->getDefinitionOrSelf();
lldb::offset_t offset = byte_offset;
bool qual_type_is_signed = qual_type->isSignedIntegerOrEnumerationType();
const uint64_t enum_svalue =
@@ -8694,15 +8700,7 @@ bool TypeSystemClang::DumpTypeValue(
} else {
clang::QualType qual_type(GetQualType(type));
- const clang::Type::TypeClass type_class = qual_type->getTypeClass();
-
- if (type_class == clang::Type::Elaborated) {
- qual_type = llvm::cast<clang::ElaboratedType>(qual_type)->getNamedType();
- return DumpTypeValue(qual_type.getAsOpaquePtr(), s, format, data, byte_offset, byte_size,
- bitfield_bit_size, bitfield_bit_offset, exe_scope);
- }
-
- switch (type_class) {
+ switch (qual_type->getTypeClass()) {
case clang::Type::Typedef: {
clang::QualType typedef_qual_type =
llvm::cast<clang::TypedefType>(qual_type)
@@ -8872,7 +8870,7 @@ void TypeSystemClang::DumpTypeDescription(lldb::opaque_compiler_type_t type,
GetCompleteType(type);
auto *record_type = llvm::cast<clang::RecordType>(qual_type.getTypePtr());
- const clang::RecordDecl *record_decl = record_type->getDecl();
+ const clang::RecordDecl *record_decl = record_type->getOriginalDecl();
if (level == eDescriptionLevelVerbose)
record_decl->dump(llvm_ostrm);
else {
@@ -8884,7 +8882,7 @@ void TypeSystemClang::DumpTypeDescription(lldb::opaque_compiler_type_t type,
default: {
if (auto *tag_type =
llvm::dyn_cast<clang::TagType>(qual_type.getTypePtr())) {
- if (clang::TagDecl *tag_decl = tag_type->getDecl()) {
+ if (clang::TagDecl *tag_decl = tag_type->getOriginalDecl()) {
if (level == eDescriptionLevelVerbose)
tag_decl->dump(llvm_ostrm);
else
@@ -8924,7 +8922,7 @@ void TypeSystemClang::DumpTypeName(const CompilerType &type) {
case clang::Type::Enum: {
clang::EnumDecl *enum_decl =
- llvm::cast<clang::EnumType>(qual_type)->getDecl();
+ llvm::cast<clang::EnumType>(qual_type)->getOriginalDecl();
if (enum_decl) {
printf("enum %s", enum_decl->getName().str().c_str());
}
@@ -8960,13 +8958,6 @@ void TypeSystemClang::DumpTypeName(const CompilerType &type) {
->getDeducedType()
.getAsOpaquePtr()));
- case clang::Type::Elaborated:
- printf("elaborated ");
- return DumpTypeName(CompilerType(
- type.GetTypeSystem(), llvm::cast<clang::ElaboratedType>(qual_type)
- ->getNamedType()
- .getAsOpaquePtr()));
-
case clang::Type::Paren:
printf("paren ");
return DumpTypeName(CompilerType(
@@ -9796,8 +9787,8 @@ bool TypeSystemClang::IsForcefullyCompleted(lldb::opaque_compiler_type_t type) {
const clang::RecordType *record_type =
llvm::dyn_cast<clang::RecordType>(qual_type.getTypePtr());
if (record_type) {
- const clang::RecordDecl *record_decl = record_type->getDecl();
- assert(record_decl);
+ const clang::RecordDecl *record_decl =
+ record_type->getOriginalDecl()->getDefinitionOrSelf();
if (std::optional<ClangASTMetadata> metadata = GetMetadata(record_decl))
return metadata->IsForcefullyCompleted();
}
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
index 70d613d..709f8959 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
@@ -260,7 +260,7 @@ public:
template <typename RecordDeclType>
CompilerType
- GetTypeForIdentifier(llvm::StringRef type_name,
+ GetTypeForIdentifier(const clang::ASTContext &Ctx, llvm::StringRef type_name,
clang::DeclContext *decl_context = nullptr) {
CompilerType compiler_type;
if (type_name.empty())
@@ -278,11 +278,10 @@ public:
return compiler_type;
clang::NamedDecl *named_decl = *result.begin();
- if (const RecordDeclType *record_decl =
- llvm::dyn_cast<RecordDeclType>(named_decl))
+ if (const auto *type_decl = llvm::dyn_cast<clang::TypeDecl>(named_decl);
+ llvm::isa_and_nonnull<RecordDeclType>(type_decl))
compiler_type = CompilerType(
- weak_from_this(),
- clang::QualType(record_decl->getTypeForDecl(), 0).getAsOpaquePtr());
+ weak_from_this(), Ctx.getTypeDeclType(type_decl).getAsOpaquePtr());
return compiler_type;
}
diff --git a/lldb/source/Protocol/MCP/CMakeLists.txt b/lldb/source/Protocol/MCP/CMakeLists.txt
index f1b1098..a73e7e6 100644
--- a/lldb/source/Protocol/MCP/CMakeLists.txt
+++ b/lldb/source/Protocol/MCP/CMakeLists.txt
@@ -1,6 +1,7 @@
add_lldb_library(lldbProtocolMCP NO_PLUGIN_DEPENDENCIES
MCPError.cpp
Protocol.cpp
+ Server.cpp
Tool.cpp
LINK_COMPONENTS
diff --git a/lldb/source/Protocol/MCP/Server.cpp b/lldb/source/Protocol/MCP/Server.cpp
new file mode 100644
index 0000000..4ec127fe
--- /dev/null
+++ b/lldb/source/Protocol/MCP/Server.cpp
@@ -0,0 +1,236 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "lldb/Protocol/MCP/Server.h"
+#include "lldb/Protocol/MCP/MCPError.h"
+
+using namespace lldb_protocol::mcp;
+using namespace llvm;
+
+Server::Server(std::string name, std::string version)
+ : m_name(std::move(name)), m_version(std::move(version)) {
+ AddRequestHandlers();
+}
+
+void Server::AddRequestHandlers() {
+ AddRequestHandler("initialize", std::bind(&Server::InitializeHandler, this,
+ std::placeholders::_1));
+ AddRequestHandler("tools/list", std::bind(&Server::ToolsListHandler, this,
+ std::placeholders::_1));
+ AddRequestHandler("tools/call", std::bind(&Server::ToolsCallHandler, this,
+ std::placeholders::_1));
+ AddRequestHandler("resources/list", std::bind(&Server::ResourcesListHandler,
+ this, std::placeholders::_1));
+ AddRequestHandler("resources/read", std::bind(&Server::ResourcesReadHandler,
+ this, std::placeholders::_1));
+}
+
+llvm::Expected<Response> Server::Handle(Request request) {
+ auto it = m_request_handlers.find(request.method);
+ if (it != m_request_handlers.end()) {
+ llvm::Expected<Response> response = it->second(request);
+ if (!response)
+ return response;
+ response->id = request.id;
+ return *response;
+ }
+
+ return llvm::make_error<MCPError>(
+ llvm::formatv("no handler for request: {0}", request.method).str());
+}
+
+void Server::Handle(Notification notification) {
+ auto it = m_notification_handlers.find(notification.method);
+ if (it != m_notification_handlers.end()) {
+ it->second(notification);
+ return;
+ }
+}
+
+llvm::Expected<std::optional<Message>>
+Server::HandleData(llvm::StringRef data) {
+ auto message = llvm::json::parse<Message>(/*JSON=*/data);
+ if (!message)
+ return message.takeError();
+
+ if (const Request *request = std::get_if<Request>(&(*message))) {
+ llvm::Expected<Response> response = Handle(*request);
+
+ // Handle failures by converting them into an Error message.
+ if (!response) {
+ Error protocol_error;
+ llvm::handleAllErrors(
+ response.takeError(),
+ [&](const MCPError &err) { protocol_error = err.toProtcolError(); },
+ [&](const llvm::ErrorInfoBase &err) {
+ protocol_error.error.code = MCPError::kInternalError;
+ protocol_error.error.message = err.message();
+ });
+ protocol_error.id = request->id;
+ return protocol_error;
+ }
+
+ return *response;
+ }
+
+ if (const Notification *notification =
+ std::get_if<Notification>(&(*message))) {
+ Handle(*notification);
+ return std::nullopt;
+ }
+
+ if (std::get_if<Error>(&(*message)))
+ return llvm::createStringError("unexpected MCP message: error");
+
+ if (std::get_if<Response>(&(*message)))
+ return llvm::createStringError("unexpected MCP message: response");
+
+ llvm_unreachable("all message types handled");
+}
+
+void Server::AddTool(std::unique_ptr<Tool> tool) {
+ std::lock_guard<std::mutex> guard(m_mutex);
+
+ if (!tool)
+ return;
+ m_tools[tool->GetName()] = std::move(tool);
+}
+
+void Server::AddResourceProvider(
+ std::unique_ptr<ResourceProvider> resource_provider) {
+ std::lock_guard<std::mutex> guard(m_mutex);
+
+ if (!resource_provider)
+ return;
+ m_resource_providers.push_back(std::move(resource_provider));
+}
+
+void Server::AddRequestHandler(llvm::StringRef method, RequestHandler handler) {
+ std::lock_guard<std::mutex> guard(m_mutex);
+ m_request_handlers[method] = std::move(handler);
+}
+
+void Server::AddNotificationHandler(llvm::StringRef method,
+ NotificationHandler handler) {
+ std::lock_guard<std::mutex> guard(m_mutex);
+ m_notification_handlers[method] = std::move(handler);
+}
+
+llvm::Expected<Response> Server::InitializeHandler(const Request &request) {
+ Response response;
+ response.result.emplace(llvm::json::Object{
+ {"protocolVersion", mcp::kProtocolVersion},
+ {"capabilities", GetCapabilities()},
+ {"serverInfo",
+ llvm::json::Object{{"name", m_name}, {"version", m_version}}}});
+ return response;
+}
+
+llvm::Expected<Response> Server::ToolsListHandler(const Request &request) {
+ Response response;
+
+ llvm::json::Array tools;
+ for (const auto &tool : m_tools)
+ tools.emplace_back(toJSON(tool.second->GetDefinition()));
+
+ response.result.emplace(llvm::json::Object{{"tools", std::move(tools)}});
+
+ return response;
+}
+
+llvm::Expected<Response> Server::ToolsCallHandler(const Request &request) {
+ Response response;
+
+ if (!request.params)
+ return llvm::createStringError("no tool parameters");
+
+ const json::Object *param_obj = request.params->getAsObject();
+ if (!param_obj)
+ return llvm::createStringError("no tool parameters");
+
+ const json::Value *name = param_obj->get("name");
+ if (!name)
+ return llvm::createStringError("no tool name");
+
+ llvm::StringRef tool_name = name->getAsString().value_or("");
+ if (tool_name.empty())
+ return llvm::createStringError("no tool name");
+
+ auto it = m_tools.find(tool_name);
+ if (it == m_tools.end())
+ return llvm::createStringError(llvm::formatv("no tool \"{0}\"", tool_name));
+
+ ToolArguments tool_args;
+ if (const json::Value *args = param_obj->get("arguments"))
+ tool_args = *args;
+
+ llvm::Expected<TextResult> text_result = it->second->Call(tool_args);
+ if (!text_result)
+ return text_result.takeError();
+
+ response.result.emplace(toJSON(*text_result));
+
+ return response;
+}
+
+llvm::Expected<Response> Server::ResourcesListHandler(const Request &request) {
+ Response response;
+
+ llvm::json::Array resources;
+
+ std::lock_guard<std::mutex> guard(m_mutex);
+ for (std::unique_ptr<ResourceProvider> &resource_provider_up :
+ m_resource_providers) {
+ for (const Resource &resource : resource_provider_up->GetResources())
+ resources.push_back(resource);
+ }
+ response.result.emplace(
+ llvm::json::Object{{"resources", std::move(resources)}});
+
+ return response;
+}
+
+llvm::Expected<Response> Server::ResourcesReadHandler(const Request &request) {
+ Response response;
+
+ if (!request.params)
+ return llvm::createStringError("no resource parameters");
+
+ const json::Object *param_obj = request.params->getAsObject();
+ if (!param_obj)
+ return llvm::createStringError("no resource parameters");
+
+ const json::Value *uri = param_obj->get("uri");
+ if (!uri)
+ return llvm::createStringError("no resource uri");
+
+ llvm::StringRef uri_str = uri->getAsString().value_or("");
+ if (uri_str.empty())
+ return llvm::createStringError("no resource uri");
+
+ std::lock_guard<std::mutex> guard(m_mutex);
+ for (std::unique_ptr<ResourceProvider> &resource_provider_up :
+ m_resource_providers) {
+ llvm::Expected<ResourceResult> result =
+ resource_provider_up->ReadResource(uri_str);
+ if (result.errorIsA<UnsupportedURI>()) {
+ llvm::consumeError(result.takeError());
+ continue;
+ }
+ if (!result)
+ return result.takeError();
+
+ Response response;
+ response.result.emplace(std::move(*result));
+ return response;
+ }
+
+ return make_error<MCPError>(
+ llvm::formatv("no resource handler for uri: {0}", uri_str).str(),
+ MCPError::kResourceNotFound);
+}
diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp
index 4f39f60..fa98c24 100644
--- a/lldb/source/Target/Target.cpp
+++ b/lldb/source/Target/Target.cpp
@@ -558,10 +558,11 @@ BreakpointSP Target::CreateBreakpoint(lldb::addr_t addr, bool internal,
BreakpointSP Target::CreateBreakpoint(const Address &addr, bool internal,
bool hardware) {
- SearchFilterSP filter_sp(
- new SearchFilterForUnconstrainedSearches(shared_from_this()));
- BreakpointResolverSP resolver_sp(
- new BreakpointResolverAddress(nullptr, addr));
+ SearchFilterSP filter_sp =
+ std::make_shared<SearchFilterForUnconstrainedSearches>(
+ shared_from_this());
+ BreakpointResolverSP resolver_sp =
+ std::make_shared<BreakpointResolverAddress>(nullptr, addr);
return CreateBreakpoint(filter_sp, resolver_sp, internal, hardware, false);
}
@@ -569,10 +570,12 @@ lldb::BreakpointSP
Target::CreateAddressInModuleBreakpoint(lldb::addr_t file_addr, bool internal,
const FileSpec &file_spec,
bool request_hardware) {
- SearchFilterSP filter_sp(
- new SearchFilterForUnconstrainedSearches(shared_from_this()));
- BreakpointResolverSP resolver_sp(new BreakpointResolverAddress(
- nullptr, file_addr, file_spec));
+ SearchFilterSP filter_sp =
+ std::make_shared<SearchFilterForUnconstrainedSearches>(
+ shared_from_this());
+ BreakpointResolverSP resolver_sp =
+ std::make_shared<BreakpointResolverAddress>(nullptr, file_addr,
+ file_spec);
return CreateBreakpoint(filter_sp, resolver_sp, internal, request_hardware,
false);
}
@@ -581,7 +584,8 @@ BreakpointSP Target::CreateBreakpoint(
const FileSpecList *containingModules,
const FileSpecList *containingSourceFiles, const char *func_name,
FunctionNameType func_name_type_mask, LanguageType language,
- lldb::addr_t offset, LazyBool skip_prologue, bool internal, bool hardware) {
+ lldb::addr_t offset, bool offset_is_insn_count, LazyBool skip_prologue,
+ bool internal, bool hardware) {
BreakpointSP bp_sp;
if (func_name) {
SearchFilterSP filter_sp(GetSearchFilterForModuleAndCUList(
@@ -594,7 +598,7 @@ BreakpointSP Target::CreateBreakpoint(
BreakpointResolverSP resolver_sp(new BreakpointResolverName(
nullptr, func_name, func_name_type_mask, language, Breakpoint::Exact,
- offset, skip_prologue));
+ offset, offset_is_insn_count, skip_prologue));
bp_sp = CreateBreakpoint(filter_sp, resolver_sp, internal, hardware, true);
}
return bp_sp;
@@ -2996,6 +3000,38 @@ lldb::addr_t Target::GetBreakableLoadAddress(lldb::addr_t addr) {
return arch_plugin ? arch_plugin->GetBreakableLoadAddress(addr, *this) : addr;
}
+llvm::Expected<lldb::DisassemblerSP>
+Target::ReadInstructions(const Address &start_addr, uint32_t count,
+ const char *flavor_string) {
+ DataBufferHeap data(GetArchitecture().GetMaximumOpcodeByteSize() * count, 0);
+ bool force_live_memory = true;
+ lldb_private::Status error;
+ lldb::addr_t load_addr = LLDB_INVALID_ADDRESS;
+ const size_t bytes_read =
+ ReadMemory(start_addr, data.GetBytes(), data.GetByteSize(), error,
+ force_live_memory, &load_addr);
+
+ if (error.Fail())
+ return llvm::createStringError(
+ error.AsCString("Target::ReadInstructions failed to read memory at %s"),
+ start_addr.GetLoadAddress(this));
+
+ const bool data_from_file = load_addr == LLDB_INVALID_ADDRESS;
+ if (!flavor_string || flavor_string[0] == '\0') {
+ // FIXME - we don't have the mechanism in place to do per-architecture
+ // settings. But since we know that for now we only support flavors on
+ // x86 & x86_64,
+ const llvm::Triple::ArchType arch = GetArchitecture().GetTriple().getArch();
+ if (arch == llvm::Triple::x86 || arch == llvm::Triple::x86_64)
+ flavor_string = GetDisassemblyFlavor();
+ }
+
+ return Disassembler::DisassembleBytes(
+ GetArchitecture(), nullptr, flavor_string, GetDisassemblyCPU(),
+ GetDisassemblyFeatures(), start_addr, data.GetBytes(), bytes_read, count,
+ data_from_file);
+}
+
SourceManager &Target::GetSourceManager() {
if (!m_source_manager_up)
m_source_manager_up = std::make_unique<SourceManager>(shared_from_this());
diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/string/TestDataFormatterStdString.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/string/TestDataFormatterStdString.py
index 49c0c81..fec20bae 100644
--- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/string/TestDataFormatterStdString.py
+++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/string/TestDataFormatterStdString.py
@@ -217,9 +217,6 @@ class StdStringDataFormatterTestCase(TestBase):
self.build(dictionary={"USE_LIBCPP": 1})
self.do_test_summary_unavailable()
- @expectedFailureAll(
- bugnumber="libstdc++ std::string summary provider doesn't output a user-friendly message for invalid strings."
- )
@add_test_categories(["libstdcxx"])
def test_unavailable_summary_libstdcxx(self):
self.build(dictionary={"USE_LIBSTDCPP": 1})
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
index 02c34b3..70d7fd0 100644
--- a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
@@ -31,3 +31,10 @@ class ExprDefinitionInDylibTestCase(TestBase):
)
self.expect_expr("f.method()", result_value="-72", result_type="int")
+ self.expect_expr("Foo()", result_type="Foo")
+
+ # FIXME: mangled name lookup for ABI-tagged ctors fails because
+ # the debug-info AST doesn't have ABI-tag information.
+ self.expect(
+ "expr Bar()", error=True, substrs=["error: Couldn't look up symbols"]
+ )
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp
index ad148ce..1a08817 100644
--- a/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp
@@ -1,3 +1,13 @@
#include "lib.h"
+#include <cstdio>
+
int Foo::method() { return -72; }
+
+Foo::Foo() { std::puts(__func__); }
+
+Foo::~Foo() { std::puts(__func__); }
+
+Bar::Bar() { std::puts(__func__); }
+
+Bar::~Bar() { std::puts(__func__); }
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h
index 9568db2..5ec2279 100644
--- a/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h
@@ -3,6 +3,13 @@
struct Foo {
int method();
+ Foo();
+ ~Foo();
+};
+
+struct Bar {
+ [[gnu::abi_tag("Ctor")]] Bar();
+ [[gnu::abi_tag("Dtor")]] ~Bar();
};
#endif // LIB_H_IN
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp b/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp
index 2fddb2b..4d6bece 100644
--- a/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp
@@ -2,5 +2,6 @@
int main() {
Foo f;
+ Bar b;
return f.method();
}
diff --git a/lldb/test/API/tools/lldb-dap/breakpoint-assembly/TestDAP_breakpointAssembly.py b/lldb/test/API/tools/lldb-dap/breakpoint-assembly/TestDAP_breakpointAssembly.py
index 674bfe4..7552a77 100644
--- a/lldb/test/API/tools/lldb-dap/breakpoint-assembly/TestDAP_breakpointAssembly.py
+++ b/lldb/test/API/tools/lldb-dap/breakpoint-assembly/TestDAP_breakpointAssembly.py
@@ -83,3 +83,79 @@ class TestDAP_setBreakpointsAssembly(lldbdap_testcase.DAPTestCaseBase):
break_point["message"],
"Invalid sourceReference.",
)
+
+ @skipIfWindows
+ def test_persistent_assembly_breakpoint(self):
+ """Tests that assembly breakpoints are working persistently across sessions"""
+ self.build()
+ program = self.getBuildArtifact("a.out")
+ self.create_debug_adapter()
+
+ # Run the first session and set a persistent assembly breakpoint
+ try:
+ self.dap_server.request_initialize()
+ self.dap_server.request_launch(program)
+
+ assmebly_func_breakpoints = self.set_function_breakpoints(["assembly_func"])
+ self.continue_to_breakpoints(assmebly_func_breakpoints)
+
+ assembly_func_frame = self.get_stackFrames()[0]
+ source_reference = assembly_func_frame["source"]["sourceReference"]
+
+ # Set an assembly breakpoint in the middle of the assembly function
+ persistent_breakpoint_line = 4
+ persistent_breakpoint_ids = self.set_source_breakpoints_assembly(
+ source_reference, [persistent_breakpoint_line]
+ )
+
+ self.assertEqual(
+ len(persistent_breakpoint_ids),
+ 1,
+ "Expected one assembly breakpoint to be set",
+ )
+
+ persistent_breakpoint_source = self.dap_server.resolved_breakpoints[
+ persistent_breakpoint_ids[0]
+ ].source()
+ self.assertIn(
+ "adapterData",
+ persistent_breakpoint_source,
+ "Expected assembly breakpoint to have persistent information",
+ )
+ self.assertIn(
+ "persistenceData",
+ persistent_breakpoint_source["adapterData"],
+ "Expected assembly breakpoint to have persistent information",
+ )
+
+ self.continue_to_breakpoints(persistent_breakpoint_ids)
+ finally:
+ self.dap_server.request_disconnect(terminateDebuggee=True)
+ self.dap_server.terminate()
+
+ # Restart the session and verify the breakpoint is still there
+ self.create_debug_adapter()
+ try:
+ self.dap_server.request_initialize()
+ self.dap_server.request_launch(program)
+ new_session_breakpoints_ids = self.set_source_breakpoints_from_source(
+ Source(raw_dict=persistent_breakpoint_source),
+ [persistent_breakpoint_line],
+ )
+
+ self.assertEqual(
+ len(new_session_breakpoints_ids),
+ 1,
+ "Expected one breakpoint to be set in the new session",
+ )
+
+ self.continue_to_breakpoints(new_session_breakpoints_ids)
+ current_line = self.get_stackFrames()[0]["line"]
+ self.assertEqual(
+ current_line,
+ persistent_breakpoint_line,
+ "Expected to hit the persistent assembly breakpoint at the same line",
+ )
+ finally:
+ self.dap_server.request_disconnect(terminateDebuggee=True)
+ self.dap_server.terminate()
diff --git a/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py b/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py
index 04897ac..954a3a4 100644
--- a/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py
+++ b/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py
@@ -189,7 +189,7 @@ class TestDAP_completions(lldbdap_testcase.DAPTestCaseBase):
self.dap_server.get_completions("str"),
[
{"text": "struct", "label": "struct"},
- {"text": "str1", "label": "str1 -- string &"},
+ {"text": "str1", "label": "str1 -- std::string &"},
],
)
diff --git a/lldb/test/API/tools/lldb-dap/memory/TestDAP_memory.py b/lldb/test/API/tools/lldb-dap/memory/TestDAP_memory.py
index 2c3c1fe..f51056d 100644
--- a/lldb/test/API/tools/lldb-dap/memory/TestDAP_memory.py
+++ b/lldb/test/API/tools/lldb-dap/memory/TestDAP_memory.py
@@ -126,6 +126,8 @@ class TestDAP_memory(lldbdap_testcase.DAPTestCaseBase):
self.continue_to_exit()
+ # Flakey on 32-bit Arm Linux.
+ @skipIf(oslist=["linux"], archs=["arm$"])
def test_writeMemory(self):
"""
Tests the 'writeMemory' request
diff --git a/lldb/test/Shell/SymbolFile/DWARF/x86/dwp-foreign-type-units.cpp b/lldb/test/Shell/SymbolFile/DWARF/x86/dwp-foreign-type-units.cpp
index 415b485..9251930 100644
--- a/lldb/test/Shell/SymbolFile/DWARF/x86/dwp-foreign-type-units.cpp
+++ b/lldb/test/Shell/SymbolFile/DWARF/x86/dwp-foreign-type-units.cpp
@@ -56,8 +56,8 @@
// DWPMAIN-NEXT: struct CustomType {
// DWPMAIN-NEXT: typedef int IntegerType;
// DWPMAIN-NEXT: typedef double FloatType;
-// DWPMAIN-NEXT: CustomType::IntegerType x;
-// DWPMAIN-NEXT: CustomType::FloatType y;
+// DWPMAIN-NEXT: IntegerType x;
+// DWPMAIN-NEXT: FloatType y;
// DWPMAIN-NEXT: }
// Next we check when we make the .dwp file with %t.foo.dwo first so it will
@@ -78,8 +78,8 @@
// DWPFOO-NEXT: struct CustomType {
// DWPFOO-NEXT: typedef unsigned int IntegerType;
// DWPFOO-NEXT: typedef float FloatType;
-// DWPFOO-NEXT: CustomType::IntegerType x;
-// DWPFOO-NEXT: CustomType::FloatType y;
+// DWPFOO-NEXT: IntegerType x;
+// DWPFOO-NEXT: FloatType y;
// DWPFOO-NEXT: }
struct CustomType {
diff --git a/lldb/test/Shell/SymbolFile/PDB/ast-restore.test b/lldb/test/Shell/SymbolFile/PDB/ast-restore.test
index a91364b..f127acd 100644
--- a/lldb/test/Shell/SymbolFile/PDB/ast-restore.test
+++ b/lldb/test/Shell/SymbolFile/PDB/ast-restore.test
@@ -3,13 +3,19 @@ RUN: %build --compiler=msvc --nodefaultlib --output=%t.exe %S/Inputs/AstRestoreT
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=ENUM %s
RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=ENUM %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=GLOBAL %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=GLOBAL %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=BASE %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=BASE %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=CLASS %s
RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=CLASS %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=INNER %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=INNER %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=TEMPLATE %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=TEMPLATE %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=FOO %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=FOO %s
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=MAIN %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix=MAIN %s
ENUM: Module: {{.*}}
ENUM: namespace N0 {
diff --git a/lldb/test/Shell/SymbolFile/PDB/calling-conventions-arm.test b/lldb/test/Shell/SymbolFile/PDB/calling-conventions-arm.test
index 7dabf91..8b0b5b6 100644
--- a/lldb/test/Shell/SymbolFile/PDB/calling-conventions-arm.test
+++ b/lldb/test/Shell/SymbolFile/PDB/calling-conventions-arm.test
@@ -1,7 +1,12 @@
REQUIRES: target-windows, lld, (target-arm || target-aarch64)
+
RUN: %build --compiler=clang-cl --arch=32 --nodefaultlib --output=%t.exe %S/Inputs/CallingConventionsTest.cpp
+RUN: lldb-test symbols -dump-ast %t.exe | FileCheck %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck %s
+
RUN: %build --compiler=clang-cl --arch=64 --nodefaultlib --output=%t.exe %S/Inputs/CallingConventionsTest.cpp
RUN: lldb-test symbols -dump-ast %t.exe | FileCheck %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t.exe | FileCheck %s
CHECK: Module: {{.*}}
CHECK-DAG: int (*FuncCCallPtr)();
diff --git a/lldb/test/Shell/SymbolFile/PDB/calling-conventions-x86.test b/lldb/test/Shell/SymbolFile/PDB/calling-conventions-x86.test
index 065c8b6..ceb7ad1 100644
--- a/lldb/test/Shell/SymbolFile/PDB/calling-conventions-x86.test
+++ b/lldb/test/Shell/SymbolFile/PDB/calling-conventions-x86.test
@@ -1,8 +1,12 @@
REQUIRES: system-windows, lld, (target-x86 || target-x86_64)
-RUN: %build --compiler=clang-cl --arch=32 --nodefaultlib --output=%t.exe %S/Inputs/CallingConventionsTest.cpp \
-RUN: && lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix 32BIT-CHECK %s
-RUN: %build --compiler=clang-cl --arch=64 --nodefaultlib --output=%t.exe %S/Inputs/CallingConventionsTest.cpp \
-RUN: && lldb-test symbols -dump-ast %t.exe | FileCheck --check-prefix 64BIT-CHECK %s
+
+RUN: %build --compiler=clang-cl --arch=32 --nodefaultlib --output=%t-32.exe %S/Inputs/CallingConventionsTest.cpp
+RUN: lldb-test symbols -dump-ast %t-32.exe | FileCheck --check-prefix 32BIT-CHECK %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t-32.exe | FileCheck --check-prefix 32BIT-CHECK %s
+
+RUN: %build --compiler=clang-cl --arch=64 --nodefaultlib --output=%t-64.exe %S/Inputs/CallingConventionsTest.cpp
+RUN: lldb-test symbols -dump-ast %t-64.exe | FileCheck --check-prefix 64BIT-CHECK %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 lldb-test symbols -dump-ast %t-64.exe | FileCheck --check-prefix 64BIT-CHECK %s
64BIT-CHECK: Module: {{.*}}
64BIT-CHECK-DAG: int (*FuncCCallPtr)();
diff --git a/lldb/test/Shell/SymbolFile/PDB/vbases.test b/lldb/test/Shell/SymbolFile/PDB/vbases.test
index b58e3ed..e5ab80b 100644
--- a/lldb/test/Shell/SymbolFile/PDB/vbases.test
+++ b/lldb/test/Shell/SymbolFile/PDB/vbases.test
@@ -1,6 +1,7 @@
REQUIRES: target-windows, lld
RUN: %build --compiler=clang-cl --output=%t.exe %S/Inputs/VBases.cpp
RUN: %lldb -b -s %S/Inputs/VBases.script -- %t.exe | FileCheck %s
+RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb -b -s %S/Inputs/VBases.script -- %t.exe | FileCheck %s
CHECK: {
CHECK: A = (a = '\x01')
diff --git a/lldb/tools/lldb-dap/Breakpoint.cpp b/lldb/tools/lldb-dap/Breakpoint.cpp
index b4e593e..c803957 100644
--- a/lldb/tools/lldb-dap/Breakpoint.cpp
+++ b/lldb/tools/lldb-dap/Breakpoint.cpp
@@ -8,10 +8,14 @@
#include "Breakpoint.h"
#include "DAP.h"
+#include "LLDBUtils.h"
+#include "Protocol/DAPTypes.h"
#include "ProtocolUtils.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBBreakpointLocation.h"
+#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBLineEntry.h"
+#include "lldb/API/SBModule.h"
#include "lldb/API/SBMutex.h"
#include "llvm/ADT/StringExtras.h"
#include <cstddef>
@@ -21,6 +25,22 @@
using namespace lldb_dap;
+static std::optional<protocol::PersistenceData>
+GetPersistenceDataForSymbol(lldb::SBSymbol &symbol) {
+ protocol::PersistenceData persistence_data;
+ lldb::SBModule module = symbol.GetStartAddress().GetModule();
+ if (!module.IsValid())
+ return std::nullopt;
+
+ lldb::SBFileSpec file_spec = module.GetFileSpec();
+ if (!file_spec.IsValid())
+ return std::nullopt;
+
+ persistence_data.module_path = GetSBFileSpecPath(file_spec);
+ persistence_data.symbol_name = symbol.GetName();
+ return persistence_data;
+}
+
void Breakpoint::SetCondition() { m_bp.SetCondition(m_condition.c_str()); }
void Breakpoint::SetHitCondition() {
@@ -73,7 +93,7 @@ protocol::Breakpoint Breakpoint::ToProtocolBreakpoint() {
const auto column = line_entry.GetColumn();
if (column != LLDB_INVALID_COLUMN_NUMBER)
breakpoint.column = column;
- } else {
+ } else if (source) {
// Assembly breakpoint.
auto symbol = bp_addr.GetSymbol();
if (symbol.IsValid()) {
@@ -82,6 +102,15 @@ protocol::Breakpoint Breakpoint::ToProtocolBreakpoint() {
.ReadInstructions(symbol.GetStartAddress(), bp_addr, nullptr)
.GetSize() +
1;
+
+ // Add persistent data so that the breakpoint can be resolved
+ // in future sessions.
+ std::optional<protocol::PersistenceData> persistence_data =
+ GetPersistenceDataForSymbol(symbol);
+ if (persistence_data) {
+ source->adapterData =
+ protocol::SourceLLDBData{std::move(persistence_data)};
+ }
}
}
diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt
index 4cddfb1..5e0ad53 100644
--- a/lldb/tools/lldb-dap/CMakeLists.txt
+++ b/lldb/tools/lldb-dap/CMakeLists.txt
@@ -66,7 +66,8 @@ add_lldb_library(lldbDAP
Handler/ThreadsRequestHandler.cpp
Handler/VariablesRequestHandler.cpp
Handler/WriteMemoryRequestHandler.cpp
-
+
+ Protocol/DAPTypes.cpp
Protocol/ProtocolBase.cpp
Protocol/ProtocolEvents.cpp
Protocol/ProtocolTypes.cpp
diff --git a/lldb/tools/lldb-dap/DAP.cpp b/lldb/tools/lldb-dap/DAP.cpp
index cbd3b14..849712f 100644
--- a/lldb/tools/lldb-dap/DAP.cpp
+++ b/lldb/tools/lldb-dap/DAP.cpp
@@ -1406,11 +1406,15 @@ void DAP::EventThread() {
// avoids sending paths that should be source mapped. Note that
// CreateBreakpoint doesn't apply source mapping and certain
// implementation ignore the source part of this event anyway.
- llvm::json::Value source_bp = bp.ToProtocolBreakpoint();
- source_bp.getAsObject()->erase("source");
+ protocol::Breakpoint protocol_bp = bp.ToProtocolBreakpoint();
+
+ // "source" is not needed here, unless we add adapter data to be
+ // saved by the client.
+ if (protocol_bp.source && !protocol_bp.source->adapterData)
+ protocol_bp.source = std::nullopt;
llvm::json::Object body;
- body.try_emplace("breakpoint", source_bp);
+ body.try_emplace("breakpoint", protocol_bp);
body.try_emplace("reason", "changed");
llvm::json::Object bp_event = CreateEventObject("breakpoint");
@@ -1491,8 +1495,9 @@ std::vector<protocol::Breakpoint> DAP::SetSourceBreakpoints(
protocol::Breakpoint response_breakpoint =
iv->second.ToProtocolBreakpoint();
- response_breakpoint.source = source;
+ if (!response_breakpoint.source)
+ response_breakpoint.source = source;
if (!response_breakpoint.line &&
src_bp.GetLine() != LLDB_INVALID_LINE_NUMBER)
response_breakpoint.line = src_bp.GetLine();
diff --git a/lldb/tools/lldb-dap/Handler/SetBreakpointsRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/SetBreakpointsRequestHandler.cpp
index 5d336af..142351f 100644
--- a/lldb/tools/lldb-dap/Handler/SetBreakpointsRequestHandler.cpp
+++ b/lldb/tools/lldb-dap/Handler/SetBreakpointsRequestHandler.cpp
@@ -9,7 +9,6 @@
#include "DAP.h"
#include "Protocol/ProtocolRequests.h"
#include "RequestHandler.h"
-#include <vector>
namespace lldb_dap {
diff --git a/lldb/tools/lldb-dap/Protocol/DAPTypes.cpp b/lldb/tools/lldb-dap/Protocol/DAPTypes.cpp
new file mode 100644
index 0000000..ecb4bae
--- /dev/null
+++ b/lldb/tools/lldb-dap/Protocol/DAPTypes.cpp
@@ -0,0 +1,36 @@
+#include "Protocol/DAPTypes.h"
+
+using namespace llvm;
+
+namespace lldb_dap::protocol {
+
+bool fromJSON(const llvm::json::Value &Params, PersistenceData &PD,
+ llvm::json::Path P) {
+ json::ObjectMapper O(Params, P);
+ return O && O.mapOptional("module_path", PD.module_path) &&
+ O.mapOptional("symbol_name", PD.symbol_name);
+}
+
+llvm::json::Value toJSON(const PersistenceData &PD) {
+ json::Object result{
+ {"module_path", PD.module_path},
+ {"symbol_name", PD.symbol_name},
+ };
+
+ return result;
+}
+
+bool fromJSON(const llvm::json::Value &Params, SourceLLDBData &SLD,
+ llvm::json::Path P) {
+ json::ObjectMapper O(Params, P);
+ return O && O.mapOptional("persistenceData", SLD.persistenceData);
+}
+
+llvm::json::Value toJSON(const SourceLLDBData &SLD) {
+ json::Object result;
+ if (SLD.persistenceData)
+ result.insert({"persistenceData", SLD.persistenceData});
+ return result;
+}
+
+} // namespace lldb_dap::protocol \ No newline at end of file
diff --git a/lldb/tools/lldb-dap/Protocol/DAPTypes.h b/lldb/tools/lldb-dap/Protocol/DAPTypes.h
new file mode 100644
index 0000000..716d8b4
--- /dev/null
+++ b/lldb/tools/lldb-dap/Protocol/DAPTypes.h
@@ -0,0 +1,53 @@
+//===-- ProtocolTypes.h ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains private DAP types used in the protocol.
+//
+// Each struct has a toJSON and fromJSON function, that converts between
+// the struct and a JSON representation. (See JSON.h)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_TOOLS_LLDB_DAP_PROTOCOL_DAP_TYPES_H
+#define LLDB_TOOLS_LLDB_DAP_PROTOCOL_DAP_TYPES_H
+
+#include "lldb/lldb-types.h"
+#include "llvm/Support/JSON.h"
+#include <optional>
+#include <string>
+
+namespace lldb_dap::protocol {
+
+/// Data used to help lldb-dap resolve breakpoints persistently across different
+/// sessions. This information is especially useful for assembly breakpoints,
+/// because `sourceReference` can change across sessions. For regular source
+/// breakpoints the path and line are the same For each session.
+struct PersistenceData {
+ /// The source module path.
+ std::string module_path;
+
+ /// The symbol name of the Source.
+ std::string symbol_name;
+};
+bool fromJSON(const llvm::json::Value &, PersistenceData &, llvm::json::Path);
+llvm::json::Value toJSON(const PersistenceData &);
+
+/// Custom source data used by lldb-dap.
+/// This data should help lldb-dap identify sources correctly across different
+/// sessions.
+struct SourceLLDBData {
+ /// Data that helps lldb resolve this source persistently across different
+ /// sessions.
+ std::optional<PersistenceData> persistenceData;
+};
+bool fromJSON(const llvm::json::Value &, SourceLLDBData &, llvm::json::Path);
+llvm::json::Value toJSON(const SourceLLDBData &);
+
+} // namespace lldb_dap::protocol
+
+#endif
diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp
index fe8bb52..369858c 100644
--- a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp
+++ b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp
@@ -46,7 +46,8 @@ bool fromJSON(const json::Value &Params, Source &S, json::Path P) {
json::ObjectMapper O(Params, P);
return O && O.map("name", S.name) && O.map("path", S.path) &&
O.map("presentationHint", S.presentationHint) &&
- O.map("sourceReference", S.sourceReference);
+ O.map("sourceReference", S.sourceReference) &&
+ O.map("adapterData", S.adapterData);
}
llvm::json::Value toJSON(Source::PresentationHint hint) {
@@ -71,6 +72,8 @@ llvm::json::Value toJSON(const Source &S) {
result.insert({"sourceReference", *S.sourceReference});
if (S.presentationHint)
result.insert({"presentationHint", *S.presentationHint});
+ if (S.adapterData)
+ result.insert({"adapterData", *S.adapterData});
return result;
}
diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h
index 89122c8..c4be791 100644
--- a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h
+++ b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h
@@ -20,6 +20,7 @@
#ifndef LLDB_TOOLS_LLDB_DAP_PROTOCOL_PROTOCOL_TYPES_H
#define LLDB_TOOLS_LLDB_DAP_PROTOCOL_PROTOCOL_TYPES_H
+#include "Protocol/DAPTypes.h"
#include "lldb/lldb-defines.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/JSON.h"
@@ -336,7 +337,12 @@ struct Source {
/// skipped on stepping.
std::optional<PresentationHint> presentationHint;
- // unsupported keys: origin, sources, adapterData, checksums
+ /// Additional data that a debug adapter might want to loop through the
+ /// client. The client should leave the data intact and persist it across
+ /// sessions. The client should not interpret the data.
+ std::optional<SourceLLDBData> adapterData;
+
+ // unsupported keys: origin, sources, checksums
};
bool fromJSON(const llvm::json::Value &, Source::PresentationHint &,
llvm::json::Path);
diff --git a/lldb/tools/lldb-dap/SourceBreakpoint.cpp b/lldb/tools/lldb-dap/SourceBreakpoint.cpp
index 5fce9fe..843a5eb 100644
--- a/lldb/tools/lldb-dap/SourceBreakpoint.cpp
+++ b/lldb/tools/lldb-dap/SourceBreakpoint.cpp
@@ -10,7 +10,9 @@
#include "BreakpointBase.h"
#include "DAP.h"
#include "JSONUtils.h"
+#include "ProtocolUtils.h"
#include "lldb/API/SBBreakpoint.h"
+#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBFileSpecList.h"
#include "lldb/API/SBFrame.h"
#include "lldb/API/SBInstruction.h"
@@ -46,41 +48,20 @@ llvm::Error SourceBreakpoint::SetBreakpoint(const protocol::Source &source) {
if (source.sourceReference) {
// Breakpoint set by assembly source.
- std::optional<lldb::addr_t> raw_addr =
- m_dap.GetSourceReferenceAddress(*source.sourceReference);
- if (!raw_addr)
- return llvm::createStringError(llvm::inconvertibleErrorCode(),
- "Invalid sourceReference.");
-
- lldb::SBAddress source_address(*raw_addr, m_dap.target);
- if (!source_address.IsValid())
- return llvm::createStringError(llvm::inconvertibleErrorCode(),
- "Invalid sourceReference.");
-
- lldb::SBSymbol symbol = source_address.GetSymbol();
- if (!symbol.IsValid()) {
- // FIXME: Support assembly breakpoints without a valid symbol.
- return llvm::createStringError(llvm::inconvertibleErrorCode(),
- "Breakpoints in assembly without a valid "
- "symbol are not supported yet.");
+ if (source.adapterData && source.adapterData->persistenceData) {
+ // Prefer use the adapter persitence data, because this could be a
+ // breakpoint from a previous session where the `sourceReference` is not
+ // valid anymore.
+ if (llvm::Error error = CreateAssemblyBreakpointWithPersistenceData(
+ *source.adapterData->persistenceData))
+ return error;
+ } else {
+ if (llvm::Error error = CreateAssemblyBreakpointWithSourceReference(
+ *source.sourceReference))
+ return error;
}
-
- lldb::SBInstructionList inst_list =
- m_dap.target.ReadInstructions(symbol.GetStartAddress(), m_line);
- if (inst_list.GetSize() < m_line)
- return llvm::createStringError(llvm::inconvertibleErrorCode(),
- "Invalid instruction list size.");
-
- lldb::SBAddress address =
- inst_list.GetInstructionAtIndex(m_line - 1).GetAddress();
-
- m_bp = m_dap.target.BreakpointCreateBySBAddress(address);
} else {
- // Breakpoint set by a regular source file.
- const auto source_path = source.path.value_or("");
- lldb::SBFileSpecList module_list;
- m_bp = m_dap.target.BreakpointCreateByLocation(source_path.c_str(), m_line,
- m_column, 0, module_list);
+ CreatePathBreakpoint(source);
}
if (!m_log_message.empty())
@@ -97,6 +78,60 @@ void SourceBreakpoint::UpdateBreakpoint(const SourceBreakpoint &request_bp) {
BreakpointBase::UpdateBreakpoint(request_bp);
}
+void SourceBreakpoint::CreatePathBreakpoint(const protocol::Source &source) {
+ const auto source_path = source.path.value_or("");
+ lldb::SBFileSpecList module_list;
+ m_bp = m_dap.target.BreakpointCreateByLocation(source_path.c_str(), m_line,
+ m_column, 0, module_list);
+}
+
+llvm::Error SourceBreakpoint::CreateAssemblyBreakpointWithSourceReference(
+ int64_t source_reference) {
+ std::optional<lldb::addr_t> raw_addr =
+ m_dap.GetSourceReferenceAddress(source_reference);
+ if (!raw_addr)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Invalid sourceReference.");
+
+ lldb::SBAddress source_address(*raw_addr, m_dap.target);
+ if (!source_address.IsValid())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Invalid sourceReference.");
+
+ lldb::SBSymbol symbol = source_address.GetSymbol();
+ if (!symbol.IsValid()) {
+ // FIXME: Support assembly breakpoints without a valid symbol.
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Breakpoints in assembly without a valid "
+ "symbol are not supported yet.");
+ }
+
+ lldb::SBInstructionList inst_list =
+ m_dap.target.ReadInstructions(symbol.GetStartAddress(), m_line);
+ if (inst_list.GetSize() < m_line)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Invalid instruction list size.");
+
+ lldb::SBAddress address =
+ inst_list.GetInstructionAtIndex(m_line - 1).GetAddress();
+
+ m_bp = m_dap.target.BreakpointCreateBySBAddress(address);
+ return llvm::Error::success();
+}
+
+llvm::Error SourceBreakpoint::CreateAssemblyBreakpointWithPersistenceData(
+ const protocol::PersistenceData &persistence_data) {
+ lldb::SBFileSpec file_spec(persistence_data.module_path.c_str());
+ lldb::SBFileSpecList comp_unit_list;
+ lldb::SBFileSpecList file_spec_list;
+ file_spec_list.Append(file_spec);
+ m_bp = m_dap.target.BreakpointCreateByName(
+ persistence_data.symbol_name.c_str(), lldb::eFunctionNameTypeFull,
+ lldb::eLanguageTypeUnknown, m_line - 1, true, file_spec_list,
+ comp_unit_list);
+ return llvm::Error::success();
+}
+
lldb::SBError SourceBreakpoint::AppendLogMessagePart(llvm::StringRef part,
bool is_expr) {
if (is_expr) {
diff --git a/lldb/tools/lldb-dap/SourceBreakpoint.h b/lldb/tools/lldb-dap/SourceBreakpoint.h
index 857ac42..34054a8 100644
--- a/lldb/tools/lldb-dap/SourceBreakpoint.h
+++ b/lldb/tools/lldb-dap/SourceBreakpoint.h
@@ -11,6 +11,7 @@
#include "Breakpoint.h"
#include "DAPForward.h"
+#include "Protocol/DAPTypes.h"
#include "Protocol/ProtocolTypes.h"
#include "lldb/API/SBError.h"
#include "llvm/ADT/StringRef.h"
@@ -50,6 +51,12 @@ public:
uint32_t GetColumn() const { return m_column; }
protected:
+ void CreatePathBreakpoint(const protocol::Source &source);
+ llvm::Error
+ CreateAssemblyBreakpointWithSourceReference(int64_t source_reference);
+ llvm::Error CreateAssemblyBreakpointWithPersistenceData(
+ const protocol::PersistenceData &persistence_data);
+
// logMessage part can be either a raw text or an expression.
struct LogMessagePart {
LogMessagePart(llvm::StringRef text, bool is_expr)
diff --git a/lldb/tools/lldb-rpc-gen/lldb-rpc-gen.cpp b/lldb/tools/lldb-rpc-gen/lldb-rpc-gen.cpp
index 9b48796..47b3093 100644
--- a/lldb/tools/lldb-rpc-gen/lldb-rpc-gen.cpp
+++ b/lldb/tools/lldb-rpc-gen/lldb-rpc-gen.cpp
@@ -121,11 +121,10 @@ private:
/// - Certain inconvenient classes
/// - Records without definitions (forward declarations)
bool ShouldSkipRecord(CXXRecordDecl *Decl) {
- const Type *DeclType = Decl->getTypeForDecl();
- QualType CanonicalType = DeclType->getCanonicalTypeInternal();
return !Manager.isInMainFile(Decl->getBeginLoc()) ||
!Decl->hasDefinition() || Decl->getDefinition() != Decl ||
- lldb_rpc_gen::TypeIsDisallowedClass(CanonicalType);
+ lldb_rpc_gen::TypeIsDisallowedClass(
+ Context.getCanonicalTagType(Decl));
}
/// Check the support level for a type
diff --git a/lldb/unittests/Core/MangledTest.cpp b/lldb/unittests/Core/MangledTest.cpp
index a6e6e75f..cbc0c5d 100644
--- a/lldb/unittests/Core/MangledTest.cpp
+++ b/lldb/unittests/Core/MangledTest.cpp
@@ -889,10 +889,10 @@ TEST_P(DemanglingInfoCorrectnessTestFixutre, Correctness) {
EXPECT_THAT_EXPECTED(qualifiers, llvm::Succeeded());
reconstructed_name += *qualifiers;
- // TODO: should retrieve suffix using the plugin too.
- auto suffix = tracked_name.slice(OB->NameInfo.QualifiersRange.second,
- llvm::StringRef::npos);
- reconstructed_name += suffix;
+ auto suffix =
+ CPlusPlusLanguage::GetDemangledFunctionSuffix(tracked_name, OB->NameInfo);
+ EXPECT_THAT_EXPECTED(suffix, llvm::Succeeded());
+ reconstructed_name += *suffix;
EXPECT_EQ(reconstructed_name, demangled);
}
diff --git a/lldb/unittests/DAP/ProtocolTypesTest.cpp b/lldb/unittests/DAP/ProtocolTypesTest.cpp
index 3a1b940..4aab2dc 100644
--- a/lldb/unittests/DAP/ProtocolTypesTest.cpp
+++ b/lldb/unittests/DAP/ProtocolTypesTest.cpp
@@ -108,8 +108,9 @@ TEST(ProtocolTypesTest, Breakpoint) {
breakpoint.id = 42;
breakpoint.verified = true;
breakpoint.message = "Breakpoint set successfully";
- breakpoint.source = Source{"test.cpp", "/path/to/test.cpp", 123,
- Source::eSourcePresentationHintNormal};
+ breakpoint.source =
+ Source{"test.cpp", "/path/to/test.cpp", 123,
+ Source::eSourcePresentationHintNormal, std::nullopt};
breakpoint.line = 10;
breakpoint.column = 5;
breakpoint.endLine = 15;
@@ -567,8 +568,9 @@ TEST(ProtocolTypesTest, DisassembledInstruction) {
instruction.instructionBytes = "0F 1F 00";
instruction.instruction = "mov eax, ebx";
instruction.symbol = "main";
- instruction.location = Source{"test.cpp", "/path/to/test.cpp", 123,
- Source::eSourcePresentationHintNormal};
+ instruction.location =
+ Source{"test.cpp", "/path/to/test.cpp", 123,
+ Source::eSourcePresentationHintNormal, std::nullopt};
instruction.line = 10;
instruction.column = 5;
instruction.endLine = 15;
diff --git a/lldb/unittests/Host/MainLoopTest.cpp b/lldb/unittests/Host/MainLoopTest.cpp
index 448ad9c..0bc291c 100644
--- a/lldb/unittests/Host/MainLoopTest.cpp
+++ b/lldb/unittests/Host/MainLoopTest.cpp
@@ -80,6 +80,8 @@ TEST_F(MainLoopTest, ReadSocketObject) {
ASSERT_EQ(1u, callback_count);
}
+// Flakey, see https://github.com/llvm/llvm-project/issues/152677.
+#ifndef _WIN32
TEST_F(MainLoopTest, ReadPipeObject) {
Pipe pipe;
@@ -142,6 +144,7 @@ TEST_F(MainLoopTest, MultipleReadsPipeObject) {
ASSERT_EQ(5u, callback_count);
async_writer.wait();
}
+#endif
TEST_F(MainLoopTest, PipeDelayBetweenRegisterAndRun) {
Pipe pipe;
diff --git a/lldb/unittests/Instruction/ARM64/TestAArch64Emulator.cpp b/lldb/unittests/Instruction/ARM64/TestAArch64Emulator.cpp
index 08296da..c6bfe0f 100644
--- a/lldb/unittests/Instruction/ARM64/TestAArch64Emulator.cpp
+++ b/lldb/unittests/Instruction/ARM64/TestAArch64Emulator.cpp
@@ -62,7 +62,7 @@ struct Arch64EmulatorTester : public EmulateInstructionARM64 {
reg_value.SetUInt64(tester->gpr.pc);
return true;
case gpr_cpsr_arm64:
- reg_value.SetUInt64(tester->gpr.cpsr);
+ reg_value.SetUInt32(tester->gpr.cpsr);
return true;
default:
return false;
@@ -97,7 +97,7 @@ struct Arch64EmulatorTester : public EmulateInstructionARM64 {
tester->gpr.pc = reg_value.GetAsUInt64();
return true;
case gpr_cpsr_arm64:
- tester->gpr.cpsr = reg_value.GetAsUInt64();
+ tester->gpr.cpsr = reg_value.GetAsUInt32();
return true;
default:
return false;
@@ -112,7 +112,7 @@ struct Arch64EmulatorTester : public EmulateInstructionARM64 {
assert(addr - tester->memory_offset + length <= sizeof(tester->memory));
if (addr >= tester->memory_offset &&
addr - tester->memory_offset + length <= sizeof(tester->memory)) {
- memcpy(dst, tester->memory + addr - tester->memory_offset, length);
+ memcpy(dst, tester->memory + (addr - tester->memory_offset), length);
return length;
}
return 0;
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 4c70b98..b672cb9 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -187,28 +187,29 @@ if ("lldb" IN_LIST LLVM_ENABLE_PROJECTS)
endif ()
if ("libc" IN_LIST LLVM_ENABLE_PROJECTS)
- message(WARNING "Using LLVM_ENABLE_PROJECTS=libc is deprecated. Please use "
+ message(WARNING "Using LLVM_ENABLE_PROJECTS=libc is deprecated now, and will "
+ "become a fatal error in a future release. Please use "
"-DLLVM_ENABLE_RUNTIMES=libc or see the instructions at "
"https://libc.llvm.org/ for building the runtimes.")
endif()
if ("compiler-rt" IN_LIST LLVM_ENABLE_PROJECTS)
message(WARNING "Using LLVM_ENABLE_PROJECTS=compiler-rt is deprecated now, and will "
- "become a fatal error in the LLVM 21 release. Please use "
+ "become a fatal error in a future release. Please use "
"-DLLVM_ENABLE_RUNTIMES=compiler-rt or see the instructions at "
"https://compiler-rt.llvm.org/ for building the runtimes.")
endif()
if ("offload" IN_LIST LLVM_ENABLE_PROJECTS)
message(WARNING "Using LLVM_ENABLE_PROJECTS=offload is deprecated now, and will "
- "become a fatal error in the LLVM 21 release. Please use "
+ "become a fatal error in a future release. Please use "
"-DLLVM_ENABLE_RUNTIMES=offload or see the instructions at "
"https://openmp.llvm.org/ for building the runtimes.")
endif()
if ("openmp" IN_LIST LLVM_ENABLE_PROJECTS)
message(WARNING "Using LLVM_ENABLE_PROJECTS=openmp is deprecated now, and will "
- "become a fatal error in the LLVM 21 release. Please use "
+ "become a fatal error in a future release. Please use "
"-DLLVM_ENABLE_RUNTIMES=openmp or see the instructions at "
"https://openmp.llvm.org/ for building the runtimes.")
endif()
@@ -221,7 +222,7 @@ endif ()
if ("libclc" IN_LIST LLVM_ENABLE_PROJECTS)
message(WARNING "Using LLVM_ENABLE_PROJECTS=libclc is deprecated now, and will "
- "become a fatal error in the LLVM 21 release. Please use "
+ "become a fatal error in a future release. Please use "
"-DLLVM_ENABLE_RUNTIMES=libclc or see the instructions at "
"https://libclc.llvm.org/ for building the runtimes.")
endif()
@@ -587,9 +588,6 @@ set(LLVM_ENABLE_DEBUGLOC_COVERAGE_TRACKING "DISABLED" CACHE STRING
"Enhance Debugify's line number coverage tracking; enabling this is ABI-breaking. Can be DISABLED, COVERAGE, or COVERAGE_AND_ORIGIN.")
set_property(CACHE LLVM_ENABLE_DEBUGLOC_COVERAGE_TRACKING PROPERTY STRINGS DISABLED COVERAGE COVERAGE_AND_ORIGIN)
-option(LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS
- "Add additional fields to DILocations to support Key Instructions" ON)
-
set(WINDOWS_PREFER_FORWARD_SLASH_DEFAULT OFF)
if (MINGW)
# Cygwin doesn't identify itself as Windows, and thus gets path::Style::posix
diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake
index c126b0d..91aaeb5 100644
--- a/llvm/cmake/modules/HandleLLVMOptions.cmake
+++ b/llvm/cmake/modules/HandleLLVMOptions.cmake
@@ -212,10 +212,6 @@ endif()
# LLVM_ENABLE_DEBUGLOC_TRACKING_COVERAGE (non-cached) is expected to be
# 1 or 0 here, assuming referenced in #cmakedefine01.
-if(LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS)
- add_compile_definitions(EXPERIMENTAL_KEY_INSTRUCTIONS)
-endif()
-
if( LLVM_REVERSE_ITERATION )
set( LLVM_ENABLE_REVERSE_ITERATION 1 )
endif()
diff --git a/llvm/cmake/modules/LLVMProcessSources.cmake b/llvm/cmake/modules/LLVMProcessSources.cmake
index cf358a8..0670d60 100644
--- a/llvm/cmake/modules/LLVMProcessSources.cmake
+++ b/llvm/cmake/modules/LLVMProcessSources.cmake
@@ -58,21 +58,6 @@ function(llvm_process_sources OUT_VAR)
set(sources ${ARG_UNPARSED_ARGUMENTS})
llvm_check_source_file_list(${sources})
- # Don't generate __SHORT_FILE__ on VS builds as it can prevent build parallelisation.
- if(NOT CMAKE_GENERATOR MATCHES "Visual Studio")
- foreach(fn ${sources})
- get_filename_component(suf ${fn} EXT)
- if("${suf}" STREQUAL ".cpp" OR "${suf}" STREQUAL ".c")
- get_filename_component(short_name ${fn} NAME)
- set_property(
- SOURCE ${fn}
- APPEND
- PROPERTY COMPILE_DEFINITIONS __SHORT_FILE__="${short_name}")
- endif()
- endforeach()
- endif()
-
-
# This adds .td and .h files to the Visual Studio solution:
add_td_sources(sources)
find_all_header_files(hdrs "${ARG_ADDITIONAL_HEADER_DIRS}")
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 3a3a74f..6ba3759 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -5175,6 +5175,8 @@ The following is the syntax for constant expressions:
Perform the :ref:`trunc operation <i_trunc>` on constants.
``ptrtoint (CST to TYPE)``
Perform the :ref:`ptrtoint operation <i_ptrtoint>` on constants.
+``ptrtoaddr (CST to TYPE)``
+ Perform the :ref:`ptrtoaddr operation <i_ptrtoaddr>` on constants.
``inttoptr (CST to TYPE)``
Perform the :ref:`inttoptr operation <i_inttoptr>` on constants.
This one is *really* dangerous!
@@ -12523,6 +12525,59 @@ Example:
%Y = ptrtoint ptr %P to i64 ; yields zero extension on 32-bit architecture
%Z = ptrtoint <4 x ptr> %P to <4 x i64>; yields vector zero extension for a vector of addresses on 32-bit architecture
+.. _i_ptrtoaddr:
+
+'``ptrtoaddr .. to``' Instruction
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ <result> = ptrtoaddr <ty> <value> to <ty2> ; yields ty2
+
+Overview:
+"""""""""
+
+The '``ptrtoaddr``' instruction converts the pointer or a vector of
+pointers ``value`` to the underlying integer address (or vector of addresses) of
+type ``ty2``. This is different from :ref:`ptrtoint <i_ptrtoint>` in that it
+only operates on the index bits of the pointer and ignores all other bits, and
+does not capture the provenance of the pointer.
+
+Arguments:
+""""""""""
+
+The '``ptrtoaddr``' instruction takes a ``value`` to cast, which must be
+a value of type :ref:`pointer <t_pointer>` or a vector of pointers, and a
+type to cast it to ``ty2``, which must be must be the :ref:`integer <t_integer>`
+type (or vector of integers) matching the pointer index width of the address
+space of ``ty``.
+
+Semantics:
+""""""""""
+
+The '``ptrtoaddr``' instruction converts ``value`` to integer type ``ty2`` by
+interpreting the lowest index-width pointer representation bits as an integer.
+If the address size and the pointer representation size are the same and
+``value`` and ``ty2`` are the same size, then nothing is done (*no-op cast*)
+other than a type change.
+
+The ``ptrtoaddr`` instruction always :ref:`captures the address but not the provenance <pointercapture>`
+of the pointer argument.
+
+Example:
+""""""""
+This example assumes pointers in address space 1 are 64 bits in size with an
+address width of 32 bits (``p1:64:64:64:32`` :ref:`datalayout string<langref_datalayout>`)
+
+.. code-block:: llvm
+
+ %X = ptrtoaddr ptr addrspace(1) %P to i32 ; extracts low 32 bits of pointer
+ %Y = ptrtoaddr <4 x ptr addrspace(1)> %P to <4 x i32> ; yields vector of low 32 bits for each pointer
+
+
.. _i_inttoptr:
'``inttoptr .. to``' Instruction
@@ -26730,7 +26785,7 @@ Syntax:
::
- declare void @llvm.lifetime.start(i64 <size>, ptr captures(none) <ptr>)
+ declare void @llvm.lifetime.start(ptr captures(none) <ptr>)
Overview:
"""""""""
@@ -26741,11 +26796,8 @@ object's lifetime.
Arguments:
""""""""""
-The first argument is a constant integer, which is ignored and will be removed
-in the future.
-
-The second argument is either a pointer to an ``alloca`` instruction or
-a ``poison`` value.
+The argument is either a pointer to an ``alloca`` instruction or a ``poison``
+value.
Semantics:
""""""""""
@@ -26774,7 +26826,7 @@ Syntax:
::
- declare void @llvm.lifetime.end(i64 <size>, ptr captures(none) <ptr>)
+ declare void @llvm.lifetime.end(ptr captures(none) <ptr>)
Overview:
"""""""""
@@ -26785,11 +26837,8 @@ The '``llvm.lifetime.end``' intrinsic specifies the end of a
Arguments:
""""""""""
-The first argument is a constant integer, which is ignored and will be removed
-in the future.
-
-The second argument is either a pointer to an ``alloca`` instruction or
-a ``poison`` value.
+The argument is either a pointer to an ``alloca`` instruction or a ``poison``
+value.
Semantics:
""""""""""
diff --git a/llvm/docs/MergeFunctions.rst b/llvm/docs/MergeFunctions.rst
index 02344bc..c27f603 100644
--- a/llvm/docs/MergeFunctions.rst
+++ b/llvm/docs/MergeFunctions.rst
@@ -7,7 +7,7 @@ MergeFunctions pass, how it works
Introduction
============
-Sometimes code contains equal functions, or functions that does exactly the same
+Sometimes code contains equal functions, or functions that do exactly the same
thing even though they are non-equal on the IR level (e.g.: multiplication on 2
and 'shl 1'). It could happen due to several reasons: mainly, the usage of
templates and automatic code generators. Though, sometimes the user itself could
@@ -16,7 +16,7 @@ write the same thing twice :-)
The main purpose of this pass is to recognize such functions and merge them.
This document is the extension to pass comments and describes the pass logic. It
-describes the algorithm that is used in order to compare functions and
+describes the algorithm used to compare functions and
explains how we could combine equal functions correctly to keep the module
valid.
@@ -58,7 +58,7 @@ It's especially important to understand chapter 3 of tutorial:
:doc:`tutorial/LangImpl03`
-The reader should also know how passes work in LLVM. They could use this
+The reader should also know how passes work in LLVM. They can use this
article as a reference and start point here:
:doc:`WritingAnLLVMPass`
@@ -68,7 +68,7 @@ debugging and bug-fixing.
Narrative structure
-------------------
-The article consists of three parts. The first part explains pass functionality
+This article consists of three parts. The first part explains pass functionality
on the top-level. The second part describes the comparison procedure itself.
The third part describes the merging process.
@@ -130,7 +130,7 @@ access lookup? The answer is: "yes".
Random-access
"""""""""""""
-How it could this be done? Just convert each function to a number, and gather
+How can this be done? Just convert each function to a number, and gather
all of them in a special hash-table. Functions with equal hashes are equal.
Good hashing means, that every function part must be taken into account. That
means we have to convert every function part into some number, and then add it
@@ -190,17 +190,17 @@ The algorithm is pretty simple:
1. Put all module's functions into the *worklist*.
-2. Scan *worklist*'s functions twice: first enumerate only strong functions and
+2. Scan *worklist*'s functions twice: first, enumerate only strong functions and
then only weak ones:
2.1. Loop body: take a function from *worklist* (call it *FCur*) and try to
insert it into *FnTree*: check whether *FCur* is equal to one of functions
in *FnTree*. If there *is* an equal function in *FnTree*
- (call it *FExists*): merge function *FCur* with *FExists*. Otherwise add
+ (call it *FExists*): merge function *FCur* with *FExists*. Otherwise, add
the function from the *worklist* to *FnTree*.
3. Once the *worklist* scanning and merging operations are complete, check the
-*Deferred* list. If it is not empty: refill the *worklist* contents with
+*Deferred* list. If it is not empty, refill the *worklist* contents with
*Deferred* list and redo step 2, if the *Deferred* list is empty, then exit
from method.
@@ -249,14 +249,14 @@ Below, we will use the following operations:
The rest of the article is based on *MergeFunctions.cpp* source code
(found in *<llvm_dir>/lib/Transforms/IPO/MergeFunctions.cpp*). We would like
-to ask reader to keep this file open, so we could use it as a reference
+to ask the reader to keep this file open, so we could use it as a reference
for further explanations.
Now, we're ready to proceed to the next chapter and see how it works.
Functions comparison
====================
-At first, let's define how exactly we compare complex objects.
+First, let's define exactly how we compare complex objects.
Complex object comparison (function, basic-block, etc) is mostly based on its
sub-object comparison results. It is similar to the next "tree" objects
@@ -307,7 +307,7 @@ to those we met later in function body (value we met first would be *less*).
This is done by “``FunctionComparator::cmpValues(const Value*, const Value*)``”
method (will be described a bit later).
-4. Function body comparison. As it written in method comments:
+4. Function body comparison. As written in method comments:
“We do a CFG-ordered walk since the actual ordering of the blocks in the linked
list is immaterial. Our walk starts at the entry block for both functions, then
@@ -477,7 +477,7 @@ Of course, we can combine insertion and comparison:
= sn_mapR.insert(std::make_pair(Right, sn_mapR.size()));
return cmpNumbers(LeftRes.first->second, RightRes.first->second);
-Let's look, how whole method could be implemented.
+Let's look at how the whole method could be implemented.
1. We have to start with the bad news. Consider function self and
cross-referencing cases:
@@ -519,7 +519,7 @@ the result of numbers comparison:
if (LeftRes.first->second < RightRes.first->second) return -1;
return 1;
-Now when *cmpValues* returns 0, we can proceed the comparison procedure.
+Now, when *cmpValues* returns 0, we can proceed with the comparison procedure.
Otherwise, if we get (-1 or 1), we need to pass this result to the top level,
and finish comparison procedure.
@@ -549,7 +549,7 @@ losslessly bitcasted to each other. The further explanation is modification of
2.1.3.1. If types are vectors, compare their bitwidth using the
*cmpNumbers*. If result is not 0, return it.
- 2.1.3.2. Different types, but not a vectors:
+ 2.1.3.2. Different types, but not vectors:
* if both of them are pointers, good for us, we can proceed to step 3.
* if one of types is pointer, return result of *isPointer* flags
@@ -654,7 +654,7 @@ O(N*N) to O(log(N)).
Merging process, mergeTwoFunctions
==================================
-Once *MergeFunctions* detected that current function (*G*) is equal to one that
+Once *MergeFunctions* detects that current function (*G*) is equal to one that
were analyzed before (function *F*) it calls ``mergeTwoFunctions(Function*,
Function*)``.
@@ -664,7 +664,7 @@ Operation affects ``FnTree`` contents with next way: *F* will stay in
functions that calls *G* would be put into ``Deferred`` set and removed from
``FnTree``, and analyzed again.
-The approach is next:
+The approach is as follows:
1. Most wished case: when we can use alias and both of *F* and *G* are weak. We
make both of them with aliases to the third strong function *H*. Actually *H*
@@ -691,12 +691,12 @@ ok: we can use alias to *F* instead of *G* or change call instructions itself.
HasGlobalAliases, removeUsers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-First consider the case when we have global aliases of one function name to
+First, consider the case when we have global aliases of one function name to
another. Our purpose is make both of them with aliases to the third strong
function. Though if we keep *F* alive and without major changes we can leave it
in ``FnTree``. Try to combine these two goals.
-Do stub replacement of *F* itself with an alias to *F*.
+Do a stub replacement of *F* itself with an alias to *F*.
1. Create stub function *H*, with the same name and attributes like function
*F*. It takes maximum alignment of *F* and *G*.
@@ -725,7 +725,7 @@ also have alias to *F*.
No global aliases, replaceDirectCallers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If global aliases are not supported. We call ``replaceDirectCallers``. Just
+If global aliases are not supported, we call ``replaceDirectCallers``. Just
go through all calls of *G* and replace it with calls of *F*. If you look into
the method you will see that it scans all uses of *G* too, and if use is callee
(if user is call instruction and *G* is used as what to be called), we replace
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index d28eb68..2dc8f9f 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -971,6 +971,10 @@ Syntax:
declare void @llvm.nvvm.prefetch.L1(ptr %ptr)
declare void @llvm.nvvm.prefetch.L2(ptr %ptr)
+ declare void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+ declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+ declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+
declare void @llvm.nvvm.prefetch.global.L2.evict.normal(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.last(ptr addrspace(1) %global_ptr)
@@ -983,7 +987,10 @@ The '``@llvm.nvvm.prefetch.*``' and '``@llvm.nvvm.prefetchu.*``' intrinsic
correspond to the '``prefetch.*``;' and '``prefetchu.*``' family of PTX instructions.
The '``prefetch.*``' instructions bring the cache line containing the
specified address in global or local memory address space into the
-specified cache level (L1 or L2). The '`prefetchu.*``' instruction brings the cache line
+specified cache level (L1 or L2). If the '``.tensormap``' qualifier is specified then the
+prefetch instruction brings the cache line containing the specified address in the
+'``.const``' or '``.param memory``' state space for subsequent use by the '``cp.async.bulk.tensor``'
+instruction. The '`prefetchu.*``' instruction brings the cache line
containing the specified generic address into the specified uniform cache level.
If no address space is specified, it is assumed to be generic address. The intrinsic
uses and eviction priority which can be accessed by the '``.level::eviction_priority``' modifier.
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 0c49fc8..8171058 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -56,6 +56,10 @@ Makes programs 10x faster by doing Special New Thing.
Changes to the LLVM IR
----------------------
+* The `ptrtoaddr` instruction was introduced. This instruction returns the
+ address component of a pointer type variable but unlike `ptrtoint` does not
+ capture provenance ([#125687](https://github.com/llvm/llvm-project/pull/125687)).
+
Changes to LLVM infrastructure
------------------------------
@@ -104,6 +108,10 @@ Changes to the PowerPC Backend
Changes to the RISC-V Backend
-----------------------------
+* `llvm-objdump` now has basic support for switching between disassembling code
+ and data using mapping symbols such as `$x` and `$d`. Switching architectures
+ using `$x` with an architecture string suffix is not yet supported.
+
Changes to the WebAssembly Backend
----------------------------------
diff --git a/llvm/docs/SourceLevelDebugging.rst b/llvm/docs/SourceLevelDebugging.rst
index dfc8c53e..ea27ee5b 100644
--- a/llvm/docs/SourceLevelDebugging.rst
+++ b/llvm/docs/SourceLevelDebugging.rst
@@ -34,7 +34,7 @@ important ones are:
the source-level-language.
* Source-level languages are often **widely** different from one another.
- LLVM should not put any restrictions of the flavor of the source-language,
+ LLVM should not put any restrictions on the flavor of the source-language,
and the debugging information should work with any language.
* With code generator support, it should be possible to use an LLVM compiler
@@ -74,10 +74,10 @@ from and inspired by DWARF, but it is feasible to translate into other target
debug info formats such as STABS.
SamplePGO (also known as `AutoFDO <https://gcc.gnu.org/wiki/AutoFDO>`_)
-is a variant of profile guided optimizations which uses hardware sampling based
+is a variant of profile-guided optimizations which uses hardware sampling based
profilers to collect branch frequency data with low overhead in production
environments. It relies on debug information to associate profile information
-to LLVM IR which is then used to guide optimization heuristics. Maintaining
+with LLVM IR which is then used to guide optimization heuristics. Maintaining
deterministic and distinct source locations is necessary to maximize the
accuracy of mapping hardware sample counts to LLVM IR. For example, DWARF
`discriminators <https://wiki.dwarfstd.org/Path_Discriminators.md>`_ allow
@@ -334,7 +334,7 @@ performs the assignment, and the destination address.
The first three arguments are the same as for a ``#dbg_value``. The fourth
argument is a ``DIAssignID`` used to reference a store. The fifth is the
destination of the store, the sixth is a `complex
-expression <LangRef.html#diexpression>`_ that modfies it, and the seventh is a
+expression <LangRef.html#diexpression>`_ that modifies it, and the seventh is a
`source location <LangRef.html#dilocation>`_.
See :doc:`AssignmentTracking` for more info.
@@ -512,7 +512,7 @@ Here ``!13`` is metadata providing `location information
information parameter to the records indicates that the variable ``X`` is
declared at line number 2 at a function level scope in function ``foo``.
-Now lets take another example.
+Now, let's take another example.
.. code-block:: llvm
@@ -532,14 +532,14 @@ Here ``!18`` indicates that ``Z`` is declared at line number 5 and column
number 11 inside of lexical scope ``!17``. The lexical scope itself resides
inside of subprogram ``!4`` described above.
-The scope information attached with each instruction provides a straightforward
+The scope information attached to each instruction provides a straightforward
way to find instructions covered by a scope.
Object lifetime in optimized code
=================================
In the example above, every variable assignment uniquely corresponds to a
-memory store to the variable's position on the stack. However in heavily
+memory store to the variable's position on the stack. However, in heavily
optimized code LLVM promotes most variables into SSA values, which can
eventually be placed in physical registers or memory locations. To track SSA
values through compilation, when objects are promoted to SSA values a
@@ -628,7 +628,7 @@ perhaps, be optimized into the following code:
}
What ``#dbg_value`` records should be placed to represent the original variable
-locations in this code? Unfortunately the second, third and fourth
+locations in this code? Unfortunately the second, third, and fourth
#dbg_values for ``!1`` in the source function have had their operands
(%tval, %fval, %merge) optimized out. Assuming we cannot recover them, we
might consider this placement of #dbg_values:
@@ -696,7 +696,7 @@ How variable location metadata is transformed during CodeGen
LLVM preserves debug information throughout mid-level and backend passes,
ultimately producing a mapping between source-level information and
instruction ranges. This
-is relatively straightforwards for line number information, as mapping
+is relatively straightforward for line number information, as mapping
instructions to line numbers is a simple association. For variable locations
however the story is more complex. As each ``#dbg_value`` record
represents a source-level assignment of a value to a source variable, the
@@ -710,7 +710,7 @@ location fidelity are:
2. Register allocation
3. Block layout
-each of which are discussed below. In addition, instruction scheduling can
+each of which is discussed below. In addition, instruction scheduling can
significantly change the ordering of the program, and occurs in a number of
different passes.
@@ -782,13 +782,13 @@ And has the following operands:
location operands, which may take any of the same values as the first
operand of the ``DBG_VALUE`` instruction above. These variable location
operands are inserted into the final DWARF Expression in positions indicated
- by the DW_OP_LLVM_arg operator in the `DIExpression
+ by the ``DW_OP_LLVM_arg`` operator in the `DIExpression
<LangRef.html#diexpression>`_.
The position at which the DBG_VALUEs are inserted should correspond to the
positions of their matching ``#dbg_value`` records in the IR block. As
with optimization, LLVM aims to preserve the order in which variable
-assignments occurred in the source program. However SelectionDAG performs some
+assignments occurred in the source program. However, SelectionDAG performs some
instruction scheduling, which can reorder assignments (discussed below).
Function parameter locations are moved to the beginning of the function if
they're not already, to ensure they're immediately available on function entry.
@@ -855,19 +855,19 @@ If one compiles this IR with ``llc -o - -start-after=codegen-prepare -stop-after
$eax = COPY %8, debug-location !5
RET 0, $eax, debug-location !5
-Observe first that there is a DBG_VALUE instruction for every ``#dbg_value``
+Observe first that there is a ``DBG_VALUE`` instruction for every ``#dbg_value``
record in the source IR, ensuring no source level assignments go missing.
Then consider the different ways in which variable locations have been recorded:
* For the first #dbg_value an immediate operand is used to record a zero value.
-* The #dbg_value of the PHI instruction leads to a DBG_VALUE of virtual register
+* The #dbg_value of the PHI instruction leads to a ``DBG_VALUE`` of virtual register
``%0``.
* The first GEP has its effect folded into the first load instruction
(as a 4-byte offset), but the variable location is salvaged by folding
- the GEPs effect into the DIExpression.
+ the GEPs effect into the ``DIExpression``.
* The second GEP is also folded into the corresponding load. However, it is
insufficiently simple to be salvaged, and is emitted as a ``$noreg``
- DBG_VALUE, indicating that the variable takes on an undefined location.
+ ``DBG_VALUE``, indicating that the variable takes on an undefined location.
* The final #dbg_value has its Value placed in virtual register ``%1``.
Instruction Scheduling
@@ -880,7 +880,7 @@ case the instruction sequence could be completely reversed. In such
circumstances LLVM follows the principle applied to optimizations, that it is
better for the debugger not to display any state than a misleading state.
Thus, whenever instructions are advanced in order of execution, any
-corresponding DBG_VALUE is kept in its original position, and if an instruction
+corresponding ``DBG_VALUE`` is kept in its original position, and if an instruction
is delayed then the variable is given an undefined location for the duration
of the delay. To illustrate, consider this pseudo-MIR:
@@ -893,7 +893,7 @@ of the delay. To illustrate, consider this pseudo-MIR:
%7:gr32 = SUB32rr %6, %5, implicit-def dead $eflags
DBG_VALUE %7, $noreg, !5, !6
-Imagine that the SUB32rr were moved forward to give us the following MIR:
+Imagine that the ``SUB32rr`` were moved forward to give us the following MIR:
.. code-block:: text
@@ -905,13 +905,13 @@ Imagine that the SUB32rr were moved forward to give us the following MIR:
DBG_VALUE %7, $noreg, !5, !6
In this circumstance LLVM would leave the MIR as shown above. Were we to move
-the DBG_VALUE of virtual register %7 upwards with the SUB32rr, we would re-order
+the ``DBG_VALUE`` of virtual register %7 upwards with the ``SUB32rr``, we would re-order
assignments and introduce a new state of the program. Whereas with the solution
above, the debugger will see one fewer combination of variable values, because
``!3`` and ``!5`` will change value at the same time. This is preferred over
misrepresenting the original program.
-In comparison, if one sunk the MOV32rm, LLVM would produce the following:
+In comparison, if one sunk the ``MOV32rm``, LLVM would produce the following:
.. code-block:: text
@@ -924,10 +924,10 @@ In comparison, if one sunk the MOV32rm, LLVM would produce the following:
DBG_VALUE %1, $noreg, !1, !2
Here, to avoid presenting a state in which the first assignment to ``!1``
-disappears, the DBG_VALUE at the top of the block assigns the variable the
+disappears, the ``DBG_VALUE`` at the top of the block assigns the variable the
undefined location, until its value is available at the end of the block where
-an additional DBG_VALUE is added. Were any other DBG_VALUE for ``!1`` to occur
-in the instructions that the MOV32rm was sunk past, the DBG_VALUE for ``%1``
+an additional ``DBG_VALUE`` is added. Were any other ``DBG_VALUE`` for ``!1`` to occur
+in the instructions that the ``MOV32rm`` was sunk past, the ``DBG_VALUE`` for ``%1``
would be dropped and the debugger would never observe it in the variable. This
accurately reflects that the value is not available during the corresponding
portion of the original program.
@@ -937,13 +937,13 @@ Variable locations during Register Allocation
To avoid debug instructions interfering with the register allocator, the
LiveDebugVariables pass extracts variable locations from a MIR function and
-deletes the corresponding DBG_VALUE instructions. Some localized copy
+deletes the corresponding ``DBG_VALUE`` instructions. Some localized copy
propagation is performed within blocks. After register allocation, the
-VirtRegRewriter pass re-inserts DBG_VALUE instructions in their original
+VirtRegRewriter pass re-inserts ``DBG_VALUE`` instructions in their original
positions, translating virtual register references into their physical
machine locations. To avoid encoding incorrect variable locations, in this
-pass any DBG_VALUE of a virtual register that is not live, is replaced by
-the undefined location. The LiveDebugVariables may insert redundant DBG_VALUEs
+pass any ``DBG_VALUE`` of a virtual register that is not live, is replaced by
+the undefined location. The LiveDebugVariables may insert redundant ``DBG_VALUE``'s
because of virtual register rewriting. These will be subsequently removed by
the RemoveRedundantDebugValues pass.
@@ -956,11 +956,11 @@ LiveDebugValues pass runs to achieve two aims:
* To propagate the location of variables through copies and register spills,
* For every block, to record every valid variable location in that block.
-After this pass the DBG_VALUE instruction changes meaning: rather than
+After this pass the ``DBG_VALUE`` instruction changes meaning: rather than
corresponding to a source-level assignment where the variable may change value,
it asserts the location of a variable in a block, and loses effect outside the
block. Propagating variable locations through copies and spills is
-straightforwards: determining the variable location in every basic block
+straightforward: determining the variable location in every basic block
requires the consideration of control flow. Consider the following IR, which
presents several difficulties:
@@ -1021,9 +1021,9 @@ predecessors then that location is propagated into the successor. If the
predecessor locations disagree, the location becomes undefined.
Once LiveDebugValues has run, every block should have all valid variable
-locations described by DBG_VALUE instructions within the block. Very little
+locations described by ``DBG_VALUE`` instructions within the block. Very little
effort is then required by supporting classes (such as
-DbgEntityHistoryCalculator) to build a map of each instruction to every
+``DbgEntityHistoryCalculator``) to build a map of each instruction to every
valid variable location, without the need to consider control flow. From
the example above, it is otherwise difficult to determine that the location
of variable ``!30`` should flow "up" into block ``%bb1``, but that the location
@@ -1057,7 +1057,7 @@ helper functions in ``lib/IR/DIBuilder.cpp``.
C/C++ source file information
-----------------------------
-``llvm::Instruction`` provides easy access to metadata attached with an
+``llvm::Instruction`` provides easy access to metadata attached to an
instruction. One can extract line number information encoded in LLVM IR using
``Instruction::getDebugLoc()`` and ``DILocation::getLine()``.
@@ -1081,7 +1081,7 @@ added by the front-end but doesn't correspond to source code written by the user
}
At the end of the scope the MyObject's destructor is called but it isn't written
-explicitly. This information is useful to avoid to have counters on brackets when
+explicitly. This information is useful to avoid having counters on brackets when
making code coverage.
C/C++ global variable information
@@ -1147,11 +1147,11 @@ a C/C++ front-end would generate the following descriptors:
!8 = !{!"clang version 4.0.0"}
-The align value in DIGlobalVariable description specifies variable alignment in
-case it was forced by C11 _Alignas(), C++11 alignas() keywords or compiler
-attribute __attribute__((aligned ())). In other case (when this field is missing)
+The align value in ``DIGlobalVariable`` description specifies variable alignment in
+case it was forced by C11 ``_Alignas()``, C++11 ``alignas()`` keywords or compiler
+attribute ``__attribute__((aligned ()))``. In other case (when this field is missing)
alignment is considered default. This is used when producing DWARF output
-for DW_AT_alignment value.
+for ``DW_AT_alignment`` value.
C/C++ function information
--------------------------
@@ -1200,7 +1200,7 @@ Given a class declaration with copy constructor declared as deleted:
foo(const foo&) = deleted;
};
-A C++ frontend would generate following:
+A C++ frontend would generate the following:
.. code-block:: text
@@ -1247,7 +1247,7 @@ and this will materialize an additional DWARF attribute as:
...
DW_AT_elemental [DW_FORM_flag_present] (true)
-There are a few DWARF tags defined to represent Fortran specific constructs i.e DW_TAG_string_type for representing Fortran character(n). In LLVM this is represented as DIStringType.
+There are a few DWARF tags defined to represent Fortran specific constructs i.e ``DW_TAG_string_type`` for representing Fortran character(n). In LLVM, this is represented as ``DIStringType``.
.. code-block:: fortran
@@ -1260,7 +1260,7 @@ a Fortran front-end would generate the following descriptors:
!DILocalVariable(name: "string", arg: 1, scope: !10, file: !3, line: 4, type: !15)
!DIStringType(name: "character(*)!2", stringLength: !16, stringLengthExpression: !DIExpression(), size: 32)
-A fortran deferred-length character can also contain the information of raw storage of the characters in addition to the length of the string. This information is encoded in the stringLocationExpression field. Based on this information, DW_AT_data_location attribute is emitted in a DW_TAG_string_type debug info.
+A fortran deferred-length character can also contain the information of raw storage of the characters in addition to the length of the string. This information is encoded in the stringLocationExpression field. Based on this information, ``DW_AT_data_location`` attribute is emitted in a ``DW_TAG_string_type`` debug info.
!DIStringType(name: "character(*)!2", stringLengthExpression: !DIExpression(), stringLocationExpression: !DIExpression(DW_OP_push_object_address, DW_OP_deref), size: 32)
@@ -1300,28 +1300,28 @@ calls. This descriptor results in the following DWARF tag:
Debugging information format
============================
-Debugging Information Extension for Objective C Properties
+Debugging Information Extension for Objective-C Properties
----------------------------------------------------------
Introduction
^^^^^^^^^^^^
-Objective C provides a simpler way to declare and define accessor methods using
+Objective-C provides a simpler way to declare and define accessor methods using
declared properties. The language provides features to declare a property and
to let compiler synthesize accessor methods.
-The debugger lets developer inspect Objective C interfaces and their instance
+The debugger lets developers inspect Objective-C interfaces and their instance
variables and class variables. However, the debugger does not know anything
-about the properties defined in Objective C interfaces. The debugger consumes
+about the properties defined in Objective-C interfaces. The debugger consumes
information generated by compiler in DWARF format. The format does not support
-encoding of Objective C properties. This proposal describes DWARF extensions to
-encode Objective C properties, which the debugger can use to let developers
-inspect Objective C properties.
+encoding of Objective-C properties. This proposal describes DWARF extensions to
+encode Objective-C properties, which the debugger can use to let developers
+inspect Objective-C properties.
Proposal
^^^^^^^^
-Objective C properties exist separately from class members. A property can be
+Objective-C properties exist separately from class members. A property can be
defined only by "setter" and "getter" selectors, and be calculated anew on each
access. Or a property can just be a direct access to some declared ivar.
Finally it can have an ivar "automatically synthesized" for it by the compiler,
@@ -1397,7 +1397,7 @@ don't need to know this convention, since we are given the name of the ivar
directly.
Also, it is common practice in ObjC to have different property declarations in
-the @interface and @implementation - e.g. to provide a read-only property in
+the ``@interface`` and ``@implementation`` - e.g. to provide a read-only property in
the interface, and a read-write interface in the implementation. In that case,
the compiler should emit whichever property declaration will be in force in the
current translation unit.
@@ -1624,24 +1624,24 @@ The BUCKETS are an array of offsets to DATA for each hash:
So for ``bucket[3]`` in the example above, we have an offset into the table
0x000034f0 which points to a chain of entries for the bucket. Each bucket must
-contain a next pointer, full 32 bit hash value, the string itself, and the data
+contain a next pointer, full 32-bit hash value, the string itself, and the data
for the current string value.
.. code-block:: none
.------------.
0x000034f0: | 0x00003500 | next pointer
- | 0x12345678 | 32 bit hash
+ | 0x12345678 | 32-bit hash
| "erase" | string value
| data[n] | HashData for this bucket
|------------|
0x00003500: | 0x00003550 | next pointer
- | 0x29273623 | 32 bit hash
+ | 0x29273623 | 32-bit hash
| "dump" | string value
| data[n] | HashData for this bucket
|------------|
0x00003550: | 0x00000000 | next pointer
- | 0x82638293 | 32 bit hash
+ | 0x82638293 | 32-bit hash
| "main" | string value
| data[n] | HashData for this bucket
`------------'
@@ -1650,17 +1650,17 @@ The problem with this layout for debuggers is that we need to optimize for the
negative lookup case where the symbol we're searching for is not present. So
if we were to lookup "``printf``" in the table above, we would make a 32-bit
hash for "``printf``", it might match ``bucket[3]``. We would need to go to
-the offset 0x000034f0 and start looking to see if our 32 bit hash matches. To
+the offset 0x000034f0 and start looking to see if our 32-bit hash matches. To
do so, we need to read the next pointer, then read the hash, compare it, and
skip to the next bucket. Each time we are skipping many bytes in memory and
-touching new pages just to do the compare on the full 32 bit hash. All of
+touching new pages just to do the compare on the full 32-bit hash. All of
these accesses then tell us that we didn't have a match.
Name Hash Tables
""""""""""""""""
-To solve the issues mentioned above we have structured the hash tables a bit
-differently: a header, buckets, an array of all unique 32 bit hash values,
+To solve the issues mentioned above, we have structured the hash tables a bit
+differently: a header, buckets, an array of all unique 32-bit hash values,
followed by an array of hash value data offsets, one for each hash value, then
the data for all hash values:
@@ -1679,11 +1679,11 @@ the data for all hash values:
`-------------'
The ``BUCKETS`` in the name tables are an index into the ``HASHES`` array. By
-making all of the full 32 bit hash values contiguous in memory, we allow
+making all of the full 32-bit hash values contiguous in memory, we allow
ourselves to efficiently check for a match while touching as little memory as
-possible. Most often checking the 32 bit hash values is as far as the lookup
+possible. Most often checking the 32-bit hash values is as far as the lookup
goes. If it does match, it usually is a match with no collisions. So for a
-table with "``n_buckets``" buckets, and "``n_hashes``" unique 32 bit hash
+table with "``n_buckets``" buckets, and "``n_hashes``" unique 32-bit hash
values, we can clarify the contents of the ``BUCKETS``, ``HASHES`` and
``OFFSETS`` as:
@@ -1698,16 +1698,16 @@ values, we can clarify the contents of the ``BUCKETS``, ``HASHES`` and
| HEADER.header_data_len | uint32_t
| HEADER_DATA | HeaderData
|-------------------------|
- | BUCKETS | uint32_t[n_buckets] // 32 bit hash indexes
+ | BUCKETS | uint32_t[n_buckets] // 32-bit hash indexes
|-------------------------|
- | HASHES | uint32_t[n_hashes] // 32 bit hash values
+ | HASHES | uint32_t[n_hashes] // 32-bit hash values
|-------------------------|
- | OFFSETS | uint32_t[n_hashes] // 32 bit offsets to hash value data
+ | OFFSETS | uint32_t[n_hashes] // 32-bit offsets to hash value data
|-------------------------|
| ALL HASH DATA |
`-------------------------'
-So taking the exact same data from the standard hash example above we end up
+So taking the exact same data from the standard hash example above, we end up
with:
.. code-block:: none
@@ -1761,7 +1761,7 @@ with:
| |
|------------|
0x000034f0: | 0x00001203 | .debug_str ("erase")
- | 0x00000004 | A 32 bit array count - number of HashData with name "erase"
+ | 0x00000004 | A 32-bit array count - number of HashData with name "erase"
| 0x........ | HashData[0]
| 0x........ | HashData[1]
| 0x........ | HashData[2]
@@ -1769,18 +1769,18 @@ with:
| 0x00000000 | String offset into .debug_str (terminate data for hash)
|------------|
0x00003500: | 0x00001203 | String offset into .debug_str ("collision")
- | 0x00000002 | A 32 bit array count - number of HashData with name "collision"
+ | 0x00000002 | A 32-bit array count - number of HashData with name "collision"
| 0x........ | HashData[0]
| 0x........ | HashData[1]
| 0x00001203 | String offset into .debug_str ("dump")
- | 0x00000003 | A 32 bit array count - number of HashData with name "dump"
+ | 0x00000003 | A 32-bit array count - number of HashData with name "dump"
| 0x........ | HashData[0]
| 0x........ | HashData[1]
| 0x........ | HashData[2]
| 0x00000000 | String offset into .debug_str (terminate data for hash)
|------------|
0x00003550: | 0x00001203 | String offset into .debug_str ("main")
- | 0x00000009 | A 32 bit array count - number of HashData with name "main"
+ | 0x00000009 | A 32-bit array count - number of HashData with name "main"
| 0x........ | HashData[0]
| 0x........ | HashData[1]
| 0x........ | HashData[2]
@@ -1795,13 +1795,13 @@ with:
So we still have all of the same data, we just organize it more efficiently for
debugger lookup. If we repeat the same "``printf``" lookup from above, we
-would hash "``printf``" and find it matches ``BUCKETS[3]`` by taking the 32 bit
+would hash "``printf``" and find it matches ``BUCKETS[3]`` by taking the 32-bit
hash value and modulo it by ``n_buckets``. ``BUCKETS[3]`` contains "6" which
is the index into the ``HASHES`` table. We would then compare any consecutive
-32 bit hashes values in the ``HASHES`` array as long as the hashes would be in
+32-bit hash values in the ``HASHES`` array as long as the hashes would be in
``BUCKETS[3]``. We do this by verifying that each subsequent hash value modulo
``n_buckets`` is still 3. In the case of a failed lookup we would access the
-memory for ``BUCKETS[3]``, and then compare a few consecutive 32 bit hashes
+memory for ``BUCKETS[3]``, and then compare a few consecutive 32-bit hashes
before we know that we have no match. We don't end up marching through
multiple words of memory and we really keep the number of processor data cache
lines being accessed as small as possible.
@@ -1842,10 +1842,10 @@ header is:
HeaderData header_data; // Implementation specific header data
};
-The header starts with a 32 bit "``magic``" value which must be ``'HASH'``
+The header starts with a 32-bit "``magic``" value which must be ``'HASH'``
encoded as an ASCII integer. This allows the detection of the start of the
hash table and also allows the table's byte order to be determined so the table
-can be correctly extracted. The "``magic``" value is followed by a 16 bit
+can be correctly extracted. The "``magic``" value is followed by a 16-bit
``version`` number which allows the table to be revised and modified in the
future. The current version number is 1. ``hash_function`` is a ``uint16_t``
enumeration that specifies which hash function was used to produce this table.
@@ -1858,8 +1858,8 @@ The current values for the hash function enumerations include:
eHashFunctionDJB = 0u, // Daniel J Bernstein hash function
};
-``bucket_count`` is a 32 bit unsigned integer that represents how many buckets
-are in the ``BUCKETS`` array. ``hashes_count`` is the number of unique 32 bit
+``bucket_count`` is a 32-bit unsigned integer that represents how many buckets
+are in the ``BUCKETS`` array. ``hashes_count`` is the number of unique 32-bit
hash values that are in the ``HASHES`` array, and is the same number of offsets
are contained in the ``OFFSETS`` array. ``header_data_len`` specifies the size
in bytes of the ``HeaderData`` that is filled in by specialized versions of
@@ -1875,12 +1875,12 @@ The header is followed by the buckets, hashes, offsets, and hash value data.
struct FixedTable
{
uint32_t buckets[Header.bucket_count]; // An array of hash indexes into the "hashes[]" array below
- uint32_t hashes [Header.hashes_count]; // Every unique 32 bit hash for the entire table is in this table
+ uint32_t hashes [Header.hashes_count]; // Every unique 32-bit hash for the entire table is in this table
uint32_t offsets[Header.hashes_count]; // An offset that corresponds to each item in the "hashes[]" array above
};
-``buckets`` is an array of 32 bit indexes into the ``hashes`` array. The
-``hashes`` array contains all of the 32 bit hash values for all names in the
+``buckets`` is an array of 32-bit indexes into the ``hashes`` array. The
+``hashes`` array contains all of the 32-bit hash values for all names in the
hash table. Each hash in the ``hashes`` table has an offset in the ``offsets``
array that points to the data for the hash value.
@@ -1966,23 +1966,23 @@ array to be:
HeaderData.atoms[0].type = eAtomTypeDIEOffset;
HeaderData.atoms[0].form = DW_FORM_data4;
-This defines the contents to be the DIE offset (eAtomTypeDIEOffset) that is
-encoded as a 32 bit value (DW_FORM_data4). This allows a single name to have
+This defines the contents to be the DIE offset (``eAtomTypeDIEOffset``) that is
+encoded as a 32-bit value (``DW_FORM_data4``). This allows a single name to have
multiple matching DIEs in a single file, which could come up with an inlined
function for instance. Future tables could include more information about the
DIE such as flags indicating if the DIE is a function, method, block,
or inlined.
-The KeyType for the DWARF table is a 32 bit string table offset into the
+The KeyType for the DWARF table is a 32-bit string table offset into the
".debug_str" table. The ".debug_str" is the string table for the DWARF which
may already contain copies of all of the strings. This helps make sure, with
help from the compiler, that we reuse the strings between all of the DWARF
sections and keeps the hash table size down. Another benefit to having the
-compiler generate all strings as DW_FORM_strp in the debug info, is that
+compiler generate all strings as ``DW_FORM_strp`` in the debug info, is that
DWARF parsing can be made much faster.
After a lookup is made, we get an offset into the hash data. The hash data
-needs to be able to deal with 32 bit hash collisions, so the chunk of data
+needs to be able to deal with 32-bit hash collisions, so the chunk of data
at the offset in the hash data consists of a triple:
.. code-block:: c
@@ -1992,7 +1992,7 @@ at the offset in the hash data consists of a triple:
HashData[hash_data_count]
If "str_offset" is zero, then the bucket contents are done. 99.9% of the
-hash data chunks contain a single item (no 32 bit hash collision):
+hash data chunks contain a single item (no 32-bit hash collision):
.. code-block:: none
@@ -2025,7 +2025,7 @@ If there are collisions, you will have multiple valid string offsets:
`------------'
Current testing with real world C++ binaries has shown that there is around 1
-32 bit hash collision per 100,000 name entries.
+32-bit hash collision per 100,000 name entries.
Contents
^^^^^^^^
@@ -2114,7 +2114,7 @@ We get a few type DIEs:
AT_type( {0x00000067} ( int ) )
AT_byte_size( 0x08 )
-The DW_TAG_pointer_type is not included because it does not have a ``DW_AT_name``.
+The ``DW_TAG_pointer_type`` is not included because it does not have a ``DW_AT_name``.
"``.apple_namespaces``" section should contain all ``DW_TAG_namespace`` DIEs.
If we run into a namespace that has no name this is an anonymous namespace, and
diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h
index d645646..9879d0d 100644
--- a/llvm/include/llvm-c/Core.h
+++ b/llvm/include/llvm-c/Core.h
@@ -111,6 +111,7 @@ typedef enum {
LLVMFPTrunc = 37,
LLVMFPExt = 38,
LLVMPtrToInt = 39,
+ LLVMPtrToAddr = 69,
LLVMIntToPtr = 40,
LLVMBitCast = 41,
LLVMAddrSpaceCast = 60,
diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h
index ea5eac4..1f27213 100644
--- a/llvm/include/llvm/ADT/DenseMap.h
+++ b/llvm/include/llvm/ADT/DenseMap.h
@@ -454,28 +454,28 @@ protected:
return NextPowerOf2(NumEntries * 4 / 3 + 1);
}
- void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ void moveFromOldBuckets(iterator_range<BucketT *> OldBuckets) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
- !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+ for (BucketT &B : OldBuckets) {
+ if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
- bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+ bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
(void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
- DestBucket->getFirst() = std::move(B->getFirst());
- ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
+ DestBucket->getFirst() = std::move(B.getFirst());
+ ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
incrementNumEntries();
// Free the value.
- B->getSecond().~ValueT();
+ B.getSecond().~ValueT();
}
- B->getFirst().~KeyT();
+ B.getFirst().~KeyT();
}
}
@@ -867,7 +867,8 @@ public:
return;
}
- this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
+ this->moveFromOldBuckets(
+ llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
// Free the old table.
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
@@ -952,6 +953,9 @@ class SmallDenseMap
struct LargeRep {
BucketT *Buckets;
unsigned NumBuckets;
+ iterator_range<BucketT *> buckets() {
+ return llvm::make_range(Buckets, Buckets + NumBuckets);
+ }
};
/// A "union" of an inline bucket array and the struct representing
@@ -1129,7 +1133,7 @@ public:
Small = false;
new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
}
- this->moveFromOldBuckets(TmpBegin, TmpEnd);
+ this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
return;
}
@@ -1141,8 +1145,7 @@ public:
new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
}
- this->moveFromOldBuckets(OldRep.Buckets,
- OldRep.Buckets + OldRep.NumBuckets);
+ this->moveFromOldBuckets(OldRep.buckets());
// Free the old table.
deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h
index 892040e..f0f28ac3 100644
--- a/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ADL.h"
#include "llvm/ADT/EpochTracker.h"
#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ReverseIteration.h"
@@ -46,7 +47,7 @@ namespace llvm {
/// sets are often small. In this case, no memory allocation is used, and only
/// light-weight and cache-efficient scanning is used.
///
-/// Large sets use a classic exponentially-probed hash table. Empty buckets are
+/// Large sets use a classic quadratically-probed hash table. Empty buckets are
/// represented with an illegal pointer value (-1) to allow null pointers to be
/// inserted. Tombstones are represented with another illegal pointer value
/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
@@ -147,17 +148,27 @@ protected:
return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
}
+ iterator_range<const void **> small_buckets() {
+ return make_range(CurArray, CurArray + NumNonEmpty);
+ }
+
+ iterator_range<const void *const *> small_buckets() const {
+ return {CurArray, CurArray + NumNonEmpty};
+ }
+
+ iterator_range<const void **> buckets() {
+ return make_range(CurArray, EndPointer());
+ }
+
/// insert_imp - This returns true if the pointer was new to the set, false if
/// it was already in the set. This is hidden from the client so that the
/// derived class can check that the right type of pointer is passed in.
std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
if (isSmall()) {
// Check to see if it is already in the set.
- for (const void **APtr = CurArray, **E = CurArray + NumNonEmpty;
- APtr != E; ++APtr) {
- const void *Value = *APtr;
- if (Value == Ptr)
- return std::make_pair(APtr, false);
+ for (const void *&Bucket : small_buckets()) {
+ if (Bucket == Ptr)
+ return std::make_pair(&Bucket, false);
}
// Nope, there isn't. If we stay small, just 'pushback' now.
@@ -177,10 +188,9 @@ protected:
/// in.
bool erase_imp(const void * Ptr) {
if (isSmall()) {
- for (const void **APtr = CurArray, **E = CurArray + NumNonEmpty;
- APtr != E; ++APtr) {
- if (*APtr == Ptr) {
- *APtr = CurArray[--NumNonEmpty];
+ for (const void *&Bucket : small_buckets()) {
+ if (Bucket == Ptr) {
+ Bucket = CurArray[--NumNonEmpty];
incrementEpoch();
return true;
}
@@ -206,11 +216,9 @@ protected:
const void *const * find_imp(const void * Ptr) const {
if (isSmall()) {
// Linear search for the item.
- for (const void *const *APtr = CurArray, *const *E =
- CurArray + NumNonEmpty;
- APtr != E; ++APtr)
- if (*APtr == Ptr)
- return APtr;
+ for (const void *const &Bucket : small_buckets())
+ if (Bucket == Ptr)
+ return &Bucket;
return EndPointer();
}
@@ -223,10 +231,8 @@ protected:
bool contains_imp(const void *Ptr) const {
if (isSmall()) {
// Linear search for the item.
- const void *const *APtr = CurArray;
- const void *const *E = CurArray + NumNonEmpty;
- for (; APtr != E; ++APtr)
- if (*APtr == Ptr)
+ for (const void *const &Bucket : small_buckets())
+ if (Bucket == Ptr)
return true;
return false;
}
@@ -439,13 +445,12 @@ public:
return Removed;
}
- for (const void **APtr = CurArray, **E = EndPointer(); APtr != E; ++APtr) {
- const void *Value = *APtr;
- if (Value == getTombstoneMarker() || Value == getEmptyMarker())
+ for (const void *&Bucket : buckets()) {
+ if (Bucket == getTombstoneMarker() || Bucket == getEmptyMarker())
continue;
- PtrType Ptr = PtrTraits::getFromVoidPointer(const_cast<void *>(Value));
+ PtrType Ptr = PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket));
if (P(Ptr)) {
- *APtr = getTombstoneMarker();
+ Bucket = getTombstoneMarker();
++NumTombstones;
incrementEpoch();
Removed = true;
diff --git a/llvm/include/llvm/ADT/StringMap.h b/llvm/include/llvm/ADT/StringMap.h
index f839edf..0bf062f 100644
--- a/llvm/include/llvm/ADT/StringMap.h
+++ b/llvm/include/llvm/ADT/StringMap.h
@@ -89,6 +89,10 @@ protected:
/// setup the map as empty.
LLVM_ABI void init(unsigned Size);
+ iterator_range<StringMapEntryBase **> buckets() {
+ return make_range(TheTable, TheTable + NumBuckets);
+ }
+
public:
static constexpr uintptr_t TombstoneIntVal =
static_cast<uintptr_t>(-1)
@@ -198,8 +202,7 @@ public:
// to default values. This is a copy of clear(), but avoids unnecessary
// work not required in the destructor.
if (!empty()) {
- for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
- StringMapEntryBase *Bucket = TheTable[I];
+ for (StringMapEntryBase *Bucket : buckets()) {
if (Bucket && Bucket != getTombstoneVal()) {
static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
}
@@ -398,8 +401,7 @@ public:
// Zap all values, resetting the keys back to non-present (not tombstone),
// which is safe because we're removing all elements.
- for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
- StringMapEntryBase *&Bucket = TheTable[I];
+ for (StringMapEntryBase *&Bucket : buckets()) {
if (Bucket && Bucket != getTombstoneVal()) {
static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
}
diff --git a/llvm/include/llvm/ADT/StringRef.h b/llvm/include/llvm/ADT/StringRef.h
index 0ced1c0..16aca4d 100644
--- a/llvm/include/llvm/ADT/StringRef.h
+++ b/llvm/include/llvm/ADT/StringRef.h
@@ -38,6 +38,8 @@ namespace llvm {
LLVM_ABI bool getAsSignedInteger(StringRef Str, unsigned Radix,
long long &Result);
+ LLVM_ABI unsigned getAutoSenseRadix(StringRef &Str);
+
LLVM_ABI bool consumeUnsignedInteger(StringRef &Str, unsigned Radix,
unsigned long long &Result);
LLVM_ABI bool consumeSignedInteger(StringRef &Str, unsigned Radix,
diff --git a/llvm/include/llvm/Analysis/DXILResource.h b/llvm/include/llvm/Analysis/DXILResource.h
index 9d07bc0..88ac0a1 100644
--- a/llvm/include/llvm/Analysis/DXILResource.h
+++ b/llvm/include/llvm/Analysis/DXILResource.h
@@ -360,9 +360,11 @@ public:
std::tie(RHS.RecordID, RHS.Space, RHS.LowerBound, RHS.Size);
}
bool overlapsWith(const ResourceBinding &RHS) const {
+ if (Space != RHS.Space)
+ return false;
if (Size == UINT32_MAX)
return LowerBound < RHS.LowerBound;
- return Space == RHS.Space && LowerBound + Size - 1 >= RHS.LowerBound;
+ return LowerBound + Size - 1 >= RHS.LowerBound;
}
};
diff --git a/llvm/include/llvm/Analysis/Delinearization.h b/llvm/include/llvm/Analysis/Delinearization.h
index eb775ba..434cfb6 100644
--- a/llvm/include/llvm/Analysis/Delinearization.h
+++ b/llvm/include/llvm/Analysis/Delinearization.h
@@ -112,6 +112,35 @@ void delinearize(ScalarEvolution &SE, const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes, const SCEV *ElementSize);
+/// Compute the dimensions of fixed size array from \Expr and save the results
+/// in \p Sizes.
+bool findFixedSizeArrayDimensions(ScalarEvolution &SE, const SCEV *Expr,
+ SmallVectorImpl<uint64_t> &Sizes,
+ const SCEV *ElementSize);
+
+/// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
+/// subscripts and sizes of an access to a fixed size array. This is a special
+/// case of delinearization for fixed size arrays.
+///
+/// The delinearization is a 2 step process: the first step estimates the sizes
+/// of each dimension of the array. The second step computes the access
+/// functions for the delinearized array:
+///
+/// 1. Compute the array size
+/// 2. Compute the access function: same as normal delinearization
+///
+/// Different from the normal delinearization, this function assumes that NO
+/// terms exist in the \p Expr. In other words, it assumes that the all step
+/// values are constant.
+///
+/// This function is intended to replace getIndexExpressionsFromGEP and
+/// tryDelinearizeFixedSizeImpl. They rely on the GEP source element type so
+/// that they will be removed in the future.
+bool delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize);
+
/// Gathers the individual index expressions from a GEP instruction.
///
/// This function optimistically assumes the GEP references into a fixed size
diff --git a/llvm/include/llvm/Analysis/DependenceAnalysis.h b/llvm/include/llvm/Analysis/DependenceAnalysis.h
index f98bd68..1679596 100644
--- a/llvm/include/llvm/Analysis/DependenceAnalysis.h
+++ b/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -47,994 +47,908 @@
#include "llvm/Support/Compiler.h"
namespace llvm {
- class AAResults;
- template <typename T> class ArrayRef;
- class Loop;
- class LoopInfo;
- class SCEVConstant;
- class raw_ostream;
-
- /// Dependence - This class represents a dependence between two memory
- /// memory references in a function. It contains minimal information and
- /// is used in the very common situation where the compiler is unable to
- /// determine anything beyond the existence of a dependence; that is, it
- /// represents a confused dependence (see also FullDependence). In most
- /// cases (for output, flow, and anti dependences), the dependence implies
- /// an ordering, where the source must precede the destination; in contrast,
- /// input dependences are unordered.
- ///
- /// When a dependence graph is built, each Dependence will be a member of
- /// the set of predecessor edges for its destination instruction and a set
- /// if successor edges for its source instruction. These sets are represented
- /// as singly-linked lists, with the "next" fields stored in the dependence
- /// itelf.
- class LLVM_ABI Dependence {
- protected:
- Dependence(Dependence &&) = default;
- Dependence &operator=(Dependence &&) = default;
-
- public:
- Dependence(Instruction *Source, Instruction *Destination,
- const SCEVUnionPredicate &A)
- : Src(Source), Dst(Destination), Assumptions(A) {}
- virtual ~Dependence() = default;
-
- /// Dependence::DVEntry - Each level in the distance/direction vector
- /// has a direction (or perhaps a union of several directions), and
- /// perhaps a distance.
- struct DVEntry {
- enum : unsigned char {
- NONE = 0,
- LT = 1,
- EQ = 2,
- LE = 3,
- GT = 4,
- NE = 5,
- GE = 6,
- ALL = 7
- };
- unsigned char Direction : 3; // Init to ALL, then refine.
- bool Scalar : 1; // Init to true.
- bool PeelFirst : 1; // Peeling the first iteration will break dependence.
- bool PeelLast : 1; // Peeling the last iteration will break the dependence.
- bool Splitable : 1; // Splitting the loop will break dependence.
- const SCEV *Distance = nullptr; // NULL implies no distance available.
- DVEntry()
- : Direction(ALL), Scalar(true), PeelFirst(false), PeelLast(false),
- Splitable(false) {}
+class AAResults;
+template <typename T> class ArrayRef;
+class Loop;
+class LoopInfo;
+class SCEVConstant;
+class raw_ostream;
+
+/// Dependence - This class represents a dependence between two memory
+/// memory references in a function. It contains minimal information and
+/// is used in the very common situation where the compiler is unable to
+/// determine anything beyond the existence of a dependence; that is, it
+/// represents a confused dependence (see also FullDependence). In most
+/// cases (for output, flow, and anti dependences), the dependence implies
+/// an ordering, where the source must precede the destination; in contrast,
+/// input dependences are unordered.
+///
+/// When a dependence graph is built, each Dependence will be a member of
+/// the set of predecessor edges for its destination instruction and a set
+/// if successor edges for its source instruction. These sets are represented
+/// as singly-linked lists, with the "next" fields stored in the dependence
+/// itelf.
+class LLVM_ABI Dependence {
+protected:
+ Dependence(Dependence &&) = default;
+ Dependence &operator=(Dependence &&) = default;
+
+public:
+ Dependence(Instruction *Source, Instruction *Destination,
+ const SCEVUnionPredicate &A)
+ : Src(Source), Dst(Destination), Assumptions(A) {}
+ virtual ~Dependence() = default;
+
+ /// Dependence::DVEntry - Each level in the distance/direction vector
+ /// has a direction (or perhaps a union of several directions), and
+ /// perhaps a distance.
+ struct DVEntry {
+ enum : unsigned char {
+ NONE = 0,
+ LT = 1,
+ EQ = 2,
+ LE = 3,
+ GT = 4,
+ NE = 5,
+ GE = 6,
+ ALL = 7
};
+ unsigned char Direction : 3; // Init to ALL, then refine.
+ bool Scalar : 1; // Init to true.
+ bool PeelFirst : 1; // Peeling the first iteration will break dependence.
+ bool PeelLast : 1; // Peeling the last iteration will break the dependence.
+ bool Splitable : 1; // Splitting the loop will break dependence.
+ const SCEV *Distance = nullptr; // NULL implies no distance available.
+ DVEntry()
+ : Direction(ALL), Scalar(true), PeelFirst(false), PeelLast(false),
+ Splitable(false) {}
+ };
- /// getSrc - Returns the source instruction for this dependence.
- ///
- Instruction *getSrc() const { return Src; }
-
- /// getDst - Returns the destination instruction for this dependence.
- ///
- Instruction *getDst() const { return Dst; }
-
- /// isInput - Returns true if this is an input dependence.
- ///
- bool isInput() const;
-
- /// isOutput - Returns true if this is an output dependence.
- ///
- bool isOutput() const;
-
- /// isFlow - Returns true if this is a flow (aka true) dependence.
- ///
- bool isFlow() const;
-
- /// isAnti - Returns true if this is an anti dependence.
- ///
- bool isAnti() const;
-
- /// isOrdered - Returns true if dependence is Output, Flow, or Anti
- ///
- bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }
+ /// getSrc - Returns the source instruction for this dependence.
+ Instruction *getSrc() const { return Src; }
- /// isUnordered - Returns true if dependence is Input
- ///
- bool isUnordered() const { return isInput(); }
+ /// getDst - Returns the destination instruction for this dependence.
+ Instruction *getDst() const { return Dst; }
- /// isLoopIndependent - Returns true if this is a loop-independent
- /// dependence.
- virtual bool isLoopIndependent() const { return true; }
+ /// isInput - Returns true if this is an input dependence.
+ bool isInput() const;
- /// isConfused - Returns true if this dependence is confused
- /// (the compiler understands nothing and makes worst-case
- /// assumptions).
- virtual bool isConfused() const { return true; }
+ /// isOutput - Returns true if this is an output dependence.
+ bool isOutput() const;
- /// isConsistent - Returns true if this dependence is consistent
- /// (occurs every time the source and destination are executed).
- virtual bool isConsistent() const { return false; }
+ /// isFlow - Returns true if this is a flow (aka true) dependence.
+ bool isFlow() const;
- /// getLevels - Returns the number of common loops surrounding the
- /// source and destination of the dependence.
- virtual unsigned getLevels() const { return 0; }
+ /// isAnti - Returns true if this is an anti dependence.
+ bool isAnti() const;
- /// getDirection - Returns the direction associated with a particular
- /// level.
- virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }
+ /// isOrdered - Returns true if dependence is Output, Flow, or Anti
+ bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }
- /// getDistance - Returns the distance (or NULL) associated with a
- /// particular level.
- virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
+ /// isUnordered - Returns true if dependence is Input
+ bool isUnordered() const { return isInput(); }
- /// Check if the direction vector is negative. A negative direction
- /// vector means Src and Dst are reversed in the actual program.
- virtual bool isDirectionNegative() const { return false; }
+ /// isLoopIndependent - Returns true if this is a loop-independent
+ /// dependence.
+ virtual bool isLoopIndependent() const { return true; }
- /// If the direction vector is negative, normalize the direction
- /// vector to make it non-negative. Normalization is done by reversing
- /// Src and Dst, plus reversing the dependence directions and distances
- /// in the vector.
- virtual bool normalize(ScalarEvolution *SE) { return false; }
+ /// isConfused - Returns true if this dependence is confused
+ /// (the compiler understands nothing and makes worst-case assumptions).
+ virtual bool isConfused() const { return true; }
- /// isPeelFirst - Returns true if peeling the first iteration from
- /// this loop will break this dependence.
- virtual bool isPeelFirst(unsigned Level) const { return false; }
+ /// isConsistent - Returns true if this dependence is consistent
+ /// (occurs every time the source and destination are executed).
+ virtual bool isConsistent() const { return false; }
- /// isPeelLast - Returns true if peeling the last iteration from
- /// this loop will break this dependence.
- virtual bool isPeelLast(unsigned Level) const { return false; }
+ /// getLevels - Returns the number of common loops surrounding the
+ /// source and destination of the dependence.
+ virtual unsigned getLevels() const { return 0; }
- /// isSplitable - Returns true if splitting this loop will break
- /// the dependence.
- virtual bool isSplitable(unsigned Level) const { return false; }
+ /// getDirection - Returns the direction associated with a particular level.
+ virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }
- /// isScalar - Returns true if a particular level is scalar; that is,
- /// if no subscript in the source or destination mention the induction
- /// variable associated with the loop at this level.
- virtual bool isScalar(unsigned Level) const;
+ /// getDistance - Returns the distance (or NULL) associated with a particular
+ /// level.
+ virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
- /// getNextPredecessor - Returns the value of the NextPredecessor
- /// field.
- const Dependence *getNextPredecessor() const { return NextPredecessor; }
+ /// Check if the direction vector is negative. A negative direction
+ /// vector means Src and Dst are reversed in the actual program.
+ virtual bool isDirectionNegative() const { return false; }
+
+ /// If the direction vector is negative, normalize the direction
+ /// vector to make it non-negative. Normalization is done by reversing
+ /// Src and Dst, plus reversing the dependence directions and distances
+ /// in the vector.
+ virtual bool normalize(ScalarEvolution *SE) { return false; }
- /// getNextSuccessor - Returns the value of the NextSuccessor
- /// field.
- const Dependence *getNextSuccessor() const { return NextSuccessor; }
+ /// isPeelFirst - Returns true if peeling the first iteration from
+ /// this loop will break this dependence.
+ virtual bool isPeelFirst(unsigned Level) const { return false; }
- /// setNextPredecessor - Sets the value of the NextPredecessor
- /// field.
- void setNextPredecessor(const Dependence *pred) { NextPredecessor = pred; }
+ /// isPeelLast - Returns true if peeling the last iteration from
+ /// this loop will break this dependence.
+ virtual bool isPeelLast(unsigned Level) const { return false; }
- /// setNextSuccessor - Sets the value of the NextSuccessor
- /// field.
- void setNextSuccessor(const Dependence *succ) { NextSuccessor = succ; }
+ /// isSplitable - Returns true if splitting this loop will break the
+ /// dependence.
+ virtual bool isSplitable(unsigned Level) const { return false; }
- /// getRuntimeAssumptions - Returns the runtime assumptions under which this
- /// Dependence relation is valid.
- SCEVUnionPredicate getRuntimeAssumptions() const { return Assumptions; }
+ /// isScalar - Returns true if a particular level is scalar; that is,
+ /// if no subscript in the source or destination mention the induction
+ /// variable associated with the loop at this level.
+ virtual bool isScalar(unsigned Level) const;
+
+ /// getNextPredecessor - Returns the value of the NextPredecessor field.
+ const Dependence *getNextPredecessor() const { return NextPredecessor; }
+
+ /// getNextSuccessor - Returns the value of the NextSuccessor field.
+ const Dependence *getNextSuccessor() const { return NextSuccessor; }
+
+ /// setNextPredecessor - Sets the value of the NextPredecessor
+ /// field.
+ void setNextPredecessor(const Dependence *pred) { NextPredecessor = pred; }
+
+ /// setNextSuccessor - Sets the value of the NextSuccessor field.
+ void setNextSuccessor(const Dependence *succ) { NextSuccessor = succ; }
+
+ /// getRuntimeAssumptions - Returns the runtime assumptions under which this
+ /// Dependence relation is valid.
+ SCEVUnionPredicate getRuntimeAssumptions() const { return Assumptions; }
+
+ /// dump - For debugging purposes, dumps a dependence to OS.
+ void dump(raw_ostream &OS) const;
+
+protected:
+ Instruction *Src, *Dst;
+
+private:
+ SCEVUnionPredicate Assumptions;
+ const Dependence *NextPredecessor = nullptr, *NextSuccessor = nullptr;
+ friend class DependenceInfo;
+};
+
+/// FullDependence - This class represents a dependence between two memory
+/// references in a function. It contains detailed information about the
+/// dependence (direction vectors, etc.) and is used when the compiler is
+/// able to accurately analyze the interaction of the references; that is,
+/// it is not a confused dependence (see Dependence). In most cases
+/// (for output, flow, and anti dependences), the dependence implies an
+/// ordering, where the source must precede the destination; in contrast,
+/// input dependences are unordered.
+class LLVM_ABI FullDependence final : public Dependence {
+public:
+ FullDependence(Instruction *Source, Instruction *Destination,
+ const SCEVUnionPredicate &Assumes,
+ bool PossiblyLoopIndependent, unsigned Levels);
+
+ /// isLoopIndependent - Returns true if this is a loop-independent
+ /// dependence.
+ bool isLoopIndependent() const override { return LoopIndependent; }
+
+ /// isConfused - Returns true if this dependence is confused
+ /// (the compiler understands nothing and makes worst-case
+ /// assumptions).
+ bool isConfused() const override { return false; }
+
+ /// isConsistent - Returns true if this dependence is consistent
+ /// (occurs every time the source and destination are executed).
+ bool isConsistent() const override { return Consistent; }
+
+ /// getLevels - Returns the number of common loops surrounding the
+ /// source and destination of the dependence.
+ unsigned getLevels() const override { return Levels; }
+
+ /// getDirection - Returns the direction associated with a particular
+ /// level.
+ unsigned getDirection(unsigned Level) const override;
+
+ /// getDistance - Returns the distance (or NULL) associated with a
+ /// particular level.
+ const SCEV *getDistance(unsigned Level) const override;
+
+ /// Check if the direction vector is negative. A negative direction
+ /// vector means Src and Dst are reversed in the actual program.
+ bool isDirectionNegative() const override;
+
+ /// If the direction vector is negative, normalize the direction
+ /// vector to make it non-negative. Normalization is done by reversing
+ /// Src and Dst, plus reversing the dependence directions and distances
+ /// in the vector.
+ bool normalize(ScalarEvolution *SE) override;
+
+ /// isPeelFirst - Returns true if peeling the first iteration from
+ /// this loop will break this dependence.
+ bool isPeelFirst(unsigned Level) const override;
+
+ /// isPeelLast - Returns true if peeling the last iteration from
+ /// this loop will break this dependence.
+ bool isPeelLast(unsigned Level) const override;
+
+ /// isSplitable - Returns true if splitting the loop will break
+ /// the dependence.
+ bool isSplitable(unsigned Level) const override;
+
+ /// isScalar - Returns true if a particular level is scalar; that is,
+ /// if no subscript in the source or destination mention the induction
+ /// variable associated with the loop at this level.
+ bool isScalar(unsigned Level) const override;
+
+private:
+ unsigned short Levels;
+ bool LoopIndependent;
+ bool Consistent; // Init to true, then refine.
+ std::unique_ptr<DVEntry[]> DV;
+ friend class DependenceInfo;
+};
+
+/// DependenceInfo - This class is the main dependence-analysis driver.
+class DependenceInfo {
+public:
+ DependenceInfo(Function *F, AAResults *AA, ScalarEvolution *SE, LoopInfo *LI)
+ : AA(AA), SE(SE), LI(LI), F(F) {}
+
+ /// Handle transitive invalidation when the cached analysis results go away.
+ LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
+
+ /// depends - Tests for a dependence between the Src and Dst instructions.
+ /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
+ /// FullDependence) with as much information as can be gleaned. By default,
+ /// the dependence test collects a set of runtime assumptions that cannot be
+ /// solved at compilation time. By default UnderRuntimeAssumptions is false
+ /// for a safe approximation of the dependence relation that does not
+ /// require runtime checks.
+ LLVM_ABI std::unique_ptr<Dependence>
+ depends(Instruction *Src, Instruction *Dst,
+ bool UnderRuntimeAssumptions = false);
+
+ /// getSplitIteration - Give a dependence that's splittable at some
+ /// particular level, return the iteration that should be used to split
+ /// the loop.
+ ///
+ /// Generally, the dependence analyzer will be used to build
+ /// a dependence graph for a function (basically a map from instructions
+ /// to dependences). Looking for cycles in the graph shows us loops
+ /// that cannot be trivially vectorized/parallelized.
+ ///
+ /// We can try to improve the situation by examining all the dependences
+ /// that make up the cycle, looking for ones we can break.
+ /// Sometimes, peeling the first or last iteration of a loop will break
+ /// dependences, and there are flags for those possibilities.
+ /// Sometimes, splitting a loop at some other iteration will do the trick,
+ /// and we've got a flag for that case. Rather than waste the space to
+ /// record the exact iteration (since we rarely know), we provide
+ /// a method that calculates the iteration. It's a drag that it must work
+ /// from scratch, but wonderful in that it's possible.
+ ///
+ /// Here's an example:
+ ///
+ /// for (i = 0; i < 10; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ ///
+ /// There's a loop-carried flow dependence from the store to the load,
+ /// found by the weak-crossing SIV test. The dependence will have a flag,
+ /// indicating that the dependence can be broken by splitting the loop.
+ /// Calling getSplitIteration will return 5.
+ /// Splitting the loop breaks the dependence, like so:
+ ///
+ /// for (i = 0; i <= 5; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ /// for (i = 6; i < 10; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ ///
+ /// breaks the dependence and allows us to vectorize/parallelize
+ /// both loops.
+ LLVM_ABI const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
+
+ Function *getFunction() const { return F; }
+
+ /// getRuntimeAssumptions - Returns all the runtime assumptions under which
+ /// the dependence test is valid.
+ LLVM_ABI SCEVUnionPredicate getRuntimeAssumptions() const;
+
+private:
+ AAResults *AA;
+ ScalarEvolution *SE;
+ LoopInfo *LI;
+ Function *F;
+ SmallVector<const SCEVPredicate *, 4> Assumptions;
+
+ /// Subscript - This private struct represents a pair of subscripts from
+ /// a pair of potentially multi-dimensional array references. We use a
+ /// vector of them to guide subscript partitioning.
+ struct Subscript {
+ const SCEV *Src;
+ const SCEV *Dst;
+ enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
+ SmallBitVector Loops;
+ SmallBitVector GroupLoops;
+ SmallBitVector Group;
+ };
- /// dump - For debugging purposes, dumps a dependence to OS.
- ///
- void dump(raw_ostream &OS) const;
+ struct CoefficientInfo {
+ const SCEV *Coeff;
+ const SCEV *PosPart;
+ const SCEV *NegPart;
+ const SCEV *Iterations;
+ };
- protected:
- Instruction *Src, *Dst;
+ struct BoundInfo {
+ const SCEV *Iterations;
+ const SCEV *Upper[8];
+ const SCEV *Lower[8];
+ unsigned char Direction;
+ unsigned char DirSet;
+ };
+ /// Constraint - This private class represents a constraint, as defined
+ /// in the paper
+ ///
+ /// Practical Dependence Testing
+ /// Goff, Kennedy, Tseng
+ /// PLDI 1991
+ ///
+ /// There are 5 kinds of constraint, in a hierarchy.
+ /// 1) Any - indicates no constraint, any dependence is possible.
+ /// 2) Line - A line ax + by = c, where a, b, and c are parameters,
+ /// representing the dependence equation.
+ /// 3) Distance - The value d of the dependence distance;
+ /// 4) Point - A point <x, y> representing the dependence from
+ /// iteration x to iteration y.
+ /// 5) Empty - No dependence is possible.
+ class Constraint {
private:
- SCEVUnionPredicate Assumptions;
- const Dependence *NextPredecessor = nullptr, *NextSuccessor = nullptr;
- friend class DependenceInfo;
- };
+ enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
+ ScalarEvolution *SE;
+ const SCEV *A;
+ const SCEV *B;
+ const SCEV *C;
+ const Loop *AssociatedLoop;
- /// FullDependence - This class represents a dependence between two memory
- /// references in a function. It contains detailed information about the
- /// dependence (direction vectors, etc.) and is used when the compiler is
- /// able to accurately analyze the interaction of the references; that is,
- /// it is not a confused dependence (see Dependence). In most cases
- /// (for output, flow, and anti dependences), the dependence implies an
- /// ordering, where the source must precede the destination; in contrast,
- /// input dependences are unordered.
- class LLVM_ABI FullDependence final : public Dependence {
public:
- FullDependence(Instruction *Source, Instruction *Destination,
- const SCEVUnionPredicate &Assumes,
- bool PossiblyLoopIndependent, unsigned Levels);
-
- /// isLoopIndependent - Returns true if this is a loop-independent
- /// dependence.
- bool isLoopIndependent() const override { return LoopIndependent; }
-
- /// isConfused - Returns true if this dependence is confused
- /// (the compiler understands nothing and makes worst-case
- /// assumptions).
- bool isConfused() const override { return false; }
-
- /// isConsistent - Returns true if this dependence is consistent
- /// (occurs every time the source and destination are executed).
- bool isConsistent() const override { return Consistent; }
-
- /// getLevels - Returns the number of common loops surrounding the
- /// source and destination of the dependence.
- unsigned getLevels() const override { return Levels; }
-
- /// getDirection - Returns the direction associated with a particular
- /// level.
- unsigned getDirection(unsigned Level) const override;
-
- /// getDistance - Returns the distance (or NULL) associated with a
- /// particular level.
- const SCEV *getDistance(unsigned Level) const override;
-
- /// Check if the direction vector is negative. A negative direction
- /// vector means Src and Dst are reversed in the actual program.
- bool isDirectionNegative() const override;
-
- /// If the direction vector is negative, normalize the direction
- /// vector to make it non-negative. Normalization is done by reversing
- /// Src and Dst, plus reversing the dependence directions and distances
- /// in the vector.
- bool normalize(ScalarEvolution *SE) override;
-
- /// isPeelFirst - Returns true if peeling the first iteration from
- /// this loop will break this dependence.
- bool isPeelFirst(unsigned Level) const override;
-
- /// isPeelLast - Returns true if peeling the last iteration from
- /// this loop will break this dependence.
- bool isPeelLast(unsigned Level) const override;
-
- /// isSplitable - Returns true if splitting the loop will break
- /// the dependence.
- bool isSplitable(unsigned Level) const override;
-
- /// isScalar - Returns true if a particular level is scalar; that is,
- /// if no subscript in the source or destination mention the induction
- /// variable associated with the loop at this level.
- bool isScalar(unsigned Level) const override;
+ /// isEmpty - Return true if the constraint is of kind Empty.
+ bool isEmpty() const { return Kind == Empty; }
- private:
- unsigned short Levels;
- bool LoopIndependent;
- bool Consistent; // Init to true, then refine.
- std::unique_ptr<DVEntry[]> DV;
- friend class DependenceInfo;
- };
+ /// isPoint - Return true if the constraint is of kind Point.
+ bool isPoint() const { return Kind == Point; }
- /// DependenceInfo - This class is the main dependence-analysis driver.
- ///
- class DependenceInfo {
- public:
- DependenceInfo(Function *F, AAResults *AA, ScalarEvolution *SE,
- LoopInfo *LI)
- : AA(AA), SE(SE), LI(LI), F(F) {}
-
- /// Handle transitive invalidation when the cached analysis results go away.
- LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
- FunctionAnalysisManager::Invalidator &Inv);
-
- /// depends - Tests for a dependence between the Src and Dst instructions.
- /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
- /// FullDependence) with as much information as can be gleaned. By default,
- /// the dependence test collects a set of runtime assumptions that cannot be
- /// solved at compilation time. By default UnderRuntimeAssumptions is false
- /// for a safe approximation of the dependence relation that does not
- /// require runtime checks.
- LLVM_ABI std::unique_ptr<Dependence>
- depends(Instruction *Src, Instruction *Dst,
- bool UnderRuntimeAssumptions = false);
-
- /// getSplitIteration - Give a dependence that's splittable at some
- /// particular level, return the iteration that should be used to split
- /// the loop.
- ///
- /// Generally, the dependence analyzer will be used to build
- /// a dependence graph for a function (basically a map from instructions
- /// to dependences). Looking for cycles in the graph shows us loops
- /// that cannot be trivially vectorized/parallelized.
- ///
- /// We can try to improve the situation by examining all the dependences
- /// that make up the cycle, looking for ones we can break.
- /// Sometimes, peeling the first or last iteration of a loop will break
- /// dependences, and there are flags for those possibilities.
- /// Sometimes, splitting a loop at some other iteration will do the trick,
- /// and we've got a flag for that case. Rather than waste the space to
- /// record the exact iteration (since we rarely know), we provide
- /// a method that calculates the iteration. It's a drag that it must work
- /// from scratch, but wonderful in that it's possible.
- ///
- /// Here's an example:
- ///
- /// for (i = 0; i < 10; i++)
- /// A[i] = ...
- /// ... = A[11 - i]
- ///
- /// There's a loop-carried flow dependence from the store to the load,
- /// found by the weak-crossing SIV test. The dependence will have a flag,
- /// indicating that the dependence can be broken by splitting the loop.
- /// Calling getSplitIteration will return 5.
- /// Splitting the loop breaks the dependence, like so:
- ///
- /// for (i = 0; i <= 5; i++)
- /// A[i] = ...
- /// ... = A[11 - i]
- /// for (i = 6; i < 10; i++)
- /// A[i] = ...
- /// ... = A[11 - i]
- ///
- /// breaks the dependence and allows us to vectorize/parallelize
- /// both loops.
- LLVM_ABI const SCEV *getSplitIteration(const Dependence &Dep,
- unsigned Level);
-
- Function *getFunction() const { return F; }
-
- /// getRuntimeAssumptions - Returns all the runtime assumptions under which
- /// the dependence test is valid.
- LLVM_ABI SCEVUnionPredicate getRuntimeAssumptions() const;
+ /// isDistance - Return true if the constraint is of kind Distance.
+ bool isDistance() const { return Kind == Distance; }
- private:
- AAResults *AA;
- ScalarEvolution *SE;
- LoopInfo *LI;
- Function *F;
- SmallVector<const SCEVPredicate *, 4> Assumptions;
-
- /// Subscript - This private struct represents a pair of subscripts from
- /// a pair of potentially multi-dimensional array references. We use a
- /// vector of them to guide subscript partitioning.
- struct Subscript {
- const SCEV *Src;
- const SCEV *Dst;
- enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
- SmallBitVector Loops;
- SmallBitVector GroupLoops;
- SmallBitVector Group;
- };
+ /// isLine - Return true if the constraint is of kind Line.
+ /// Since Distance's can also be represented as Lines, we also return
+ /// true if the constraint is of kind Distance.
+ bool isLine() const { return Kind == Line || Kind == Distance; }
- struct CoefficientInfo {
- const SCEV *Coeff;
- const SCEV *PosPart;
- const SCEV *NegPart;
- const SCEV *Iterations;
- };
+ /// isAny - Return true if the constraint is of kind Any;
+ bool isAny() const { return Kind == Any; }
- struct BoundInfo {
- const SCEV *Iterations;
- const SCEV *Upper[8];
- const SCEV *Lower[8];
- unsigned char Direction;
- unsigned char DirSet;
- };
+ /// getX - If constraint is a point <X, Y>, returns X.
+ /// Otherwise assert.
+ LLVM_ABI const SCEV *getX() const;
- /// Constraint - This private class represents a constraint, as defined
- /// in the paper
- ///
- /// Practical Dependence Testing
- /// Goff, Kennedy, Tseng
- /// PLDI 1991
- ///
- /// There are 5 kinds of constraint, in a hierarchy.
- /// 1) Any - indicates no constraint, any dependence is possible.
- /// 2) Line - A line ax + by = c, where a, b, and c are parameters,
- /// representing the dependence equation.
- /// 3) Distance - The value d of the dependence distance;
- /// 4) Point - A point <x, y> representing the dependence from
- /// iteration x to iteration y.
- /// 5) Empty - No dependence is possible.
- class Constraint {
- private:
- enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
- ScalarEvolution *SE;
- const SCEV *A;
- const SCEV *B;
- const SCEV *C;
- const Loop *AssociatedLoop;
-
- public:
- /// isEmpty - Return true if the constraint is of kind Empty.
- bool isEmpty() const { return Kind == Empty; }
-
- /// isPoint - Return true if the constraint is of kind Point.
- bool isPoint() const { return Kind == Point; }
-
- /// isDistance - Return true if the constraint is of kind Distance.
- bool isDistance() const { return Kind == Distance; }
-
- /// isLine - Return true if the constraint is of kind Line.
- /// Since Distance's can also be represented as Lines, we also return
- /// true if the constraint is of kind Distance.
- bool isLine() const { return Kind == Line || Kind == Distance; }
-
- /// isAny - Return true if the constraint is of kind Any;
- bool isAny() const { return Kind == Any; }
-
- /// getX - If constraint is a point <X, Y>, returns X.
- /// Otherwise assert.
- LLVM_ABI const SCEV *getX() const;
-
- /// getY - If constraint is a point <X, Y>, returns Y.
- /// Otherwise assert.
- LLVM_ABI const SCEV *getY() const;
-
- /// getA - If constraint is a line AX + BY = C, returns A.
- /// Otherwise assert.
- LLVM_ABI const SCEV *getA() const;
-
- /// getB - If constraint is a line AX + BY = C, returns B.
- /// Otherwise assert.
- LLVM_ABI const SCEV *getB() const;
-
- /// getC - If constraint is a line AX + BY = C, returns C.
- /// Otherwise assert.
- LLVM_ABI const SCEV *getC() const;
-
- /// getD - If constraint is a distance, returns D.
- /// Otherwise assert.
- LLVM_ABI const SCEV *getD() const;
-
- /// getAssociatedLoop - Returns the loop associated with this constraint.
- LLVM_ABI const Loop *getAssociatedLoop() const;
-
- /// setPoint - Change a constraint to Point.
- LLVM_ABI void setPoint(const SCEV *X, const SCEV *Y,
- const Loop *CurrentLoop);
-
- /// setLine - Change a constraint to Line.
- LLVM_ABI void setLine(const SCEV *A, const SCEV *B, const SCEV *C,
- const Loop *CurrentLoop);
-
- /// setDistance - Change a constraint to Distance.
- LLVM_ABI void setDistance(const SCEV *D, const Loop *CurrentLoop);
-
- /// setEmpty - Change a constraint to Empty.
- LLVM_ABI void setEmpty();
-
- /// setAny - Change a constraint to Any.
- LLVM_ABI void setAny(ScalarEvolution *SE);
-
- /// dump - For debugging purposes. Dumps the constraint
- /// out to OS.
- LLVM_ABI void dump(raw_ostream &OS) const;
- };
+ /// getY - If constraint is a point <X, Y>, returns Y.
+ /// Otherwise assert.
+ LLVM_ABI const SCEV *getY() const;
- /// establishNestingLevels - Examines the loop nesting of the Src and Dst
- /// instructions and establishes their shared loops. Sets the variables
- /// CommonLevels, SrcLevels, and MaxLevels.
- /// The source and destination instructions needn't be contained in the same
- /// loop. The routine establishNestingLevels finds the level of most deeply
- /// nested loop that contains them both, CommonLevels. An instruction that's
- /// not contained in a loop is at level = 0. MaxLevels is equal to the level
- /// of the source plus the level of the destination, minus CommonLevels.
- /// This lets us allocate vectors MaxLevels in length, with room for every
- /// distinct loop referenced in both the source and destination subscripts.
- /// The variable SrcLevels is the nesting depth of the source instruction.
- /// It's used to help calculate distinct loops referenced by the destination.
- /// Here's the map from loops to levels:
- /// 0 - unused
- /// 1 - outermost common loop
- /// ... - other common loops
- /// CommonLevels - innermost common loop
- /// ... - loops containing Src but not Dst
- /// SrcLevels - innermost loop containing Src but not Dst
- /// ... - loops containing Dst but not Src
- /// MaxLevels - innermost loop containing Dst but not Src
- /// Consider the follow code fragment:
- /// for (a = ...) {
- /// for (b = ...) {
- /// for (c = ...) {
- /// for (d = ...) {
- /// A[] = ...;
- /// }
- /// }
- /// for (e = ...) {
- /// for (f = ...) {
- /// for (g = ...) {
- /// ... = A[];
- /// }
- /// }
- /// }
- /// }
- /// }
- /// If we're looking at the possibility of a dependence between the store
- /// to A (the Src) and the load from A (the Dst), we'll note that they
- /// have 2 loops in common, so CommonLevels will equal 2 and the direction
- /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
- /// A map from loop names to level indices would look like
- /// a - 1
- /// b - 2 = CommonLevels
- /// c - 3
- /// d - 4 = SrcLevels
- /// e - 5
- /// f - 6
- /// g - 7 = MaxLevels
- void establishNestingLevels(const Instruction *Src,
- const Instruction *Dst);
-
- unsigned CommonLevels, SrcLevels, MaxLevels;
-
- /// mapSrcLoop - Given one of the loops containing the source, return
- /// its level index in our numbering scheme.
- unsigned mapSrcLoop(const Loop *SrcLoop) const;
-
- /// mapDstLoop - Given one of the loops containing the destination,
- /// return its level index in our numbering scheme.
- unsigned mapDstLoop(const Loop *DstLoop) const;
-
- /// isLoopInvariant - Returns true if Expression is loop invariant
- /// in LoopNest.
- bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;
-
- /// Makes sure all subscript pairs share the same integer type by
- /// sign-extending as necessary.
- /// Sign-extending a subscript is safe because getelementptr assumes the
- /// array subscripts are signed.
- void unifySubscriptType(ArrayRef<Subscript *> Pairs);
-
- /// removeMatchingExtensions - Examines a subscript pair.
- /// If the source and destination are identically sign (or zero)
- /// extended, it strips off the extension in an effort to
- /// simplify the actual analysis.
- void removeMatchingExtensions(Subscript *Pair);
-
- /// collectCommonLoops - Finds the set of loops from the LoopNest that
- /// have a level <= CommonLevels and are referred to by the SCEV Expression.
- void collectCommonLoops(const SCEV *Expression,
- const Loop *LoopNest,
- SmallBitVector &Loops) const;
-
- /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
- /// linear. Collect the set of loops mentioned by Src.
- bool checkSrcSubscript(const SCEV *Src,
- const Loop *LoopNest,
- SmallBitVector &Loops);
-
- /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
- /// linear. Collect the set of loops mentioned by Dst.
- bool checkDstSubscript(const SCEV *Dst,
- const Loop *LoopNest,
- SmallBitVector &Loops);
-
- /// isKnownPredicate - Compare X and Y using the predicate Pred.
- /// Basically a wrapper for SCEV::isKnownPredicate,
- /// but tries harder, especially in the presence of sign and zero
- /// extensions and symbolics.
- bool isKnownPredicate(ICmpInst::Predicate Pred,
- const SCEV *X,
- const SCEV *Y) const;
-
- /// isKnownLessThan - Compare to see if S is less than Size
- /// Another wrapper for isKnownNegative(S - max(Size, 1)) with some extra
- /// checking if S is an AddRec and we can prove lessthan using the loop
- /// bounds.
- bool isKnownLessThan(const SCEV *S, const SCEV *Size) const;
-
- /// isKnownNonNegative - Compare to see if S is known not to be negative
- /// Uses the fact that S comes from Ptr, which may be an inbound GEP,
- /// Proving there is no wrapping going on.
- bool isKnownNonNegative(const SCEV *S, const Value *Ptr) const;
-
- /// collectUpperBound - All subscripts are the same type (on my machine,
- /// an i64). The loop bound may be a smaller type. collectUpperBound
- /// find the bound, if available, and zero extends it to the Type T.
- /// (I zero extend since the bound should always be >= 0.)
- /// If no upper bound is available, return NULL.
- const SCEV *collectUpperBound(const Loop *l, Type *T) const;
-
- /// collectConstantUpperBound - Calls collectUpperBound(), then
- /// attempts to cast it to SCEVConstant. If the cast fails,
- /// returns NULL.
- const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;
-
- /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
- /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
- /// Collects the associated loops in a set.
- Subscript::ClassificationKind classifyPair(const SCEV *Src,
- const Loop *SrcLoopNest,
- const SCEV *Dst,
- const Loop *DstLoopNest,
- SmallBitVector &Loops);
-
- /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// If the dependence isn't proven to exist,
- /// marks the Result as inconsistent.
- bool testZIV(const SCEV *Src,
- const SCEV *Dst,
- FullDependence &Result) const;
-
- /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
- /// Things of the form [c1 + a1*i] and [c2 + a2*j], where
- /// i and j are induction variables, c1 and c2 are loop invariant,
- /// and a1 and a2 are constant.
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Sets appropriate direction vector entry and, when possible,
- /// the distance vector entry.
- /// If the dependence isn't proven to exist,
- /// marks the Result as inconsistent.
- bool testSIV(const SCEV *Src,
- const SCEV *Dst,
- unsigned &Level,
- FullDependence &Result,
- Constraint &NewConstraint,
- const SCEV *&SplitIter) const;
-
- /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
- /// Things of the form [c1 + a1*i] and [c2 + a2*j]
- /// where i and j are induction variables, c1 and c2 are loop invariant,
- /// and a1 and a2 are constant.
- /// With minor algebra, this test can also be used for things like
- /// [c1 + a1*i + a2*j][c2].
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Marks the Result as inconsistent.
- bool testRDIV(const SCEV *Src,
- const SCEV *Dst,
- FullDependence &Result) const;
+ /// getA - If constraint is a line AX + BY = C, returns A.
+ /// Otherwise assert.
+ LLVM_ABI const SCEV *getA() const;
- /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
- /// Returns true if dependence disproved.
- /// Can sometimes refine direction vectors.
- bool testMIV(const SCEV *Src,
- const SCEV *Dst,
- const SmallBitVector &Loops,
- FullDependence &Result) const;
-
- /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
- /// for dependence.
- /// Things of the form [c1 + a*i] and [c2 + a*i],
- /// where i is an induction variable, c1 and c2 are loop invariant,
- /// and a is a constant
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Sets appropriate direction and distance.
- bool strongSIVtest(const SCEV *Coeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *CurrentLoop,
- unsigned Level,
- FullDependence &Result,
- Constraint &NewConstraint) const;
-
- /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
- /// (Src and Dst) for dependence.
- /// Things of the form [c1 + a*i] and [c2 - a*i],
- /// where i is an induction variable, c1 and c2 are loop invariant,
- /// and a is a constant.
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Sets appropriate direction entry.
- /// Set consistent to false.
- /// Marks the dependence as splitable.
- bool weakCrossingSIVtest(const SCEV *SrcCoeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *CurrentLoop,
- unsigned Level,
- FullDependence &Result,
- Constraint &NewConstraint,
- const SCEV *&SplitIter) const;
-
- /// ExactSIVtest - Tests the SIV subscript pair
- /// (Src and Dst) for dependence.
- /// Things of the form [c1 + a1*i] and [c2 + a2*i],
- /// where i is an induction variable, c1 and c2 are loop invariant,
- /// and a1 and a2 are constant.
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Sets appropriate direction entry.
- /// Set consistent to false.
- bool exactSIVtest(const SCEV *SrcCoeff,
- const SCEV *DstCoeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *CurrentLoop,
- unsigned Level,
- FullDependence &Result,
- Constraint &NewConstraint) const;
-
- /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
- /// (Src and Dst) for dependence.
- /// Things of the form [c1] and [c2 + a*i],
- /// where i is an induction variable, c1 and c2 are loop invariant,
- /// and a is a constant. See also weakZeroDstSIVtest.
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Sets appropriate direction entry.
- /// Set consistent to false.
- /// If loop peeling will break the dependence, mark appropriately.
- bool weakZeroSrcSIVtest(const SCEV *DstCoeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *CurrentLoop,
- unsigned Level,
- FullDependence &Result,
- Constraint &NewConstraint) const;
-
- /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
- /// (Src and Dst) for dependence.
- /// Things of the form [c1 + a*i] and [c2],
- /// where i is an induction variable, c1 and c2 are loop invariant,
- /// and a is a constant. See also weakZeroSrcSIVtest.
- /// Returns true if any possible dependence is disproved.
- /// If there might be a dependence, returns false.
- /// Sets appropriate direction entry.
- /// Set consistent to false.
- /// If loop peeling will break the dependence, mark appropriately.
- bool weakZeroDstSIVtest(const SCEV *SrcCoeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *CurrentLoop,
- unsigned Level,
- FullDependence &Result,
- Constraint &NewConstraint) const;
-
- /// exactRDIVtest - Tests the RDIV subscript pair for dependence.
- /// Things of the form [c1 + a*i] and [c2 + b*j],
- /// where i and j are induction variable, c1 and c2 are loop invariant,
- /// and a and b are constants.
- /// Returns true if any possible dependence is disproved.
- /// Marks the result as inconsistent.
- /// Works in some cases that symbolicRDIVtest doesn't,
- /// and vice versa.
- bool exactRDIVtest(const SCEV *SrcCoeff,
- const SCEV *DstCoeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *SrcLoop,
- const Loop *DstLoop,
- FullDependence &Result) const;
+ /// getB - If constraint is a line AX + BY = C, returns B.
+ /// Otherwise assert.
+ LLVM_ABI const SCEV *getB() const;
- /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
- /// Things of the form [c1 + a*i] and [c2 + b*j],
- /// where i and j are induction variable, c1 and c2 are loop invariant,
- /// and a and b are constants.
- /// Returns true if any possible dependence is disproved.
- /// Marks the result as inconsistent.
- /// Works in some cases that exactRDIVtest doesn't,
- /// and vice versa. Can also be used as a backup for
- /// ordinary SIV tests.
- bool symbolicRDIVtest(const SCEV *SrcCoeff,
- const SCEV *DstCoeff,
- const SCEV *SrcConst,
- const SCEV *DstConst,
- const Loop *SrcLoop,
- const Loop *DstLoop) const;
-
- /// gcdMIVtest - Tests an MIV subscript pair for dependence.
- /// Returns true if any possible dependence is disproved.
- /// Marks the result as inconsistent.
- /// Can sometimes disprove the equal direction for 1 or more loops.
- // Can handle some symbolics that even the SIV tests don't get,
- /// so we use it as a backup for everything.
- bool gcdMIVtest(const SCEV *Src,
- const SCEV *Dst,
- FullDependence &Result) const;
-
- /// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
- /// Returns true if any possible dependence is disproved.
- /// Marks the result as inconsistent.
- /// Computes directions.
- bool banerjeeMIVtest(const SCEV *Src,
- const SCEV *Dst,
- const SmallBitVector &Loops,
- FullDependence &Result) const;
-
- /// collectCoefficientInfo - Walks through the subscript,
- /// collecting each coefficient, the associated loop bounds,
- /// and recording its positive and negative parts for later use.
- CoefficientInfo *collectCoeffInfo(const SCEV *Subscript,
- bool SrcFlag,
- const SCEV *&Constant) const;
-
- /// getPositivePart - X^+ = max(X, 0).
- ///
- const SCEV *getPositivePart(const SCEV *X) const;
-
- /// getNegativePart - X^- = min(X, 0).
- ///
- const SCEV *getNegativePart(const SCEV *X) const;
-
- /// getLowerBound - Looks through all the bounds info and
- /// computes the lower bound given the current direction settings
- /// at each level.
- const SCEV *getLowerBound(BoundInfo *Bound) const;
-
- /// getUpperBound - Looks through all the bounds info and
- /// computes the upper bound given the current direction settings
- /// at each level.
- const SCEV *getUpperBound(BoundInfo *Bound) const;
-
- /// exploreDirections - Hierarchically expands the direction vector
- /// search space, combining the directions of discovered dependences
- /// in the DirSet field of Bound. Returns the number of distinct
- /// dependences discovered. If the dependence is disproved,
- /// it will return 0.
- unsigned exploreDirections(unsigned Level,
- CoefficientInfo *A,
- CoefficientInfo *B,
- BoundInfo *Bound,
- const SmallBitVector &Loops,
- unsigned &DepthExpanded,
- const SCEV *Delta) const;
-
- /// testBounds - Returns true iff the current bounds are plausible.
- bool testBounds(unsigned char DirKind,
- unsigned Level,
- BoundInfo *Bound,
- const SCEV *Delta) const;
-
- /// findBoundsALL - Computes the upper and lower bounds for level K
- /// using the * direction. Records them in Bound.
- void findBoundsALL(CoefficientInfo *A,
- CoefficientInfo *B,
- BoundInfo *Bound,
- unsigned K) const;
-
- /// findBoundsLT - Computes the upper and lower bounds for level K
- /// using the < direction. Records them in Bound.
- void findBoundsLT(CoefficientInfo *A,
- CoefficientInfo *B,
- BoundInfo *Bound,
- unsigned K) const;
-
- /// findBoundsGT - Computes the upper and lower bounds for level K
- /// using the > direction. Records them in Bound.
- void findBoundsGT(CoefficientInfo *A,
- CoefficientInfo *B,
- BoundInfo *Bound,
- unsigned K) const;
-
- /// findBoundsEQ - Computes the upper and lower bounds for level K
- /// using the = direction. Records them in Bound.
- void findBoundsEQ(CoefficientInfo *A,
- CoefficientInfo *B,
- BoundInfo *Bound,
- unsigned K) const;
-
- /// intersectConstraints - Updates X with the intersection
- /// of the Constraints X and Y. Returns true if X has changed.
- bool intersectConstraints(Constraint *X,
- const Constraint *Y);
-
- /// propagate - Review the constraints, looking for opportunities
- /// to simplify a subscript pair (Src and Dst).
- /// Return true if some simplification occurs.
- /// If the simplification isn't exact (that is, if it is conservative
- /// in terms of dependence), set consistent to false.
- bool propagate(const SCEV *&Src,
- const SCEV *&Dst,
- SmallBitVector &Loops,
- SmallVectorImpl<Constraint> &Constraints,
- bool &Consistent);
-
- /// propagateDistance - Attempt to propagate a distance
- /// constraint into a subscript pair (Src and Dst).
- /// Return true if some simplification occurs.
- /// If the simplification isn't exact (that is, if it is conservative
- /// in terms of dependence), set consistent to false.
- bool propagateDistance(const SCEV *&Src,
- const SCEV *&Dst,
- Constraint &CurConstraint,
- bool &Consistent);
-
- /// propagatePoint - Attempt to propagate a point
- /// constraint into a subscript pair (Src and Dst).
- /// Return true if some simplification occurs.
- bool propagatePoint(const SCEV *&Src,
- const SCEV *&Dst,
- Constraint &CurConstraint);
-
- /// propagateLine - Attempt to propagate a line
- /// constraint into a subscript pair (Src and Dst).
- /// Return true if some simplification occurs.
- /// If the simplification isn't exact (that is, if it is conservative
- /// in terms of dependence), set consistent to false.
- bool propagateLine(const SCEV *&Src,
- const SCEV *&Dst,
- Constraint &CurConstraint,
- bool &Consistent);
-
- /// findCoefficient - Given a linear SCEV,
- /// return the coefficient corresponding to specified loop.
- /// If there isn't one, return the SCEV constant 0.
- /// For example, given a*i + b*j + c*k, returning the coefficient
- /// corresponding to the j loop would yield b.
- const SCEV *findCoefficient(const SCEV *Expr,
- const Loop *TargetLoop) const;
-
- /// zeroCoefficient - Given a linear SCEV,
- /// return the SCEV given by zeroing out the coefficient
- /// corresponding to the specified loop.
- /// For example, given a*i + b*j + c*k, zeroing the coefficient
- /// corresponding to the j loop would yield a*i + c*k.
- const SCEV *zeroCoefficient(const SCEV *Expr,
- const Loop *TargetLoop) const;
-
- /// addToCoefficient - Given a linear SCEV Expr,
- /// return the SCEV given by adding some Value to the
- /// coefficient corresponding to the specified TargetLoop.
- /// For example, given a*i + b*j + c*k, adding 1 to the coefficient
- /// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
- const SCEV *addToCoefficient(const SCEV *Expr,
- const Loop *TargetLoop,
- const SCEV *Value) const;
-
- /// updateDirection - Update direction vector entry
- /// based on the current constraint.
- void updateDirection(Dependence::DVEntry &Level,
- const Constraint &CurConstraint) const;
-
- /// Given a linear access function, tries to recover subscripts
- /// for each dimension of the array element access.
- bool tryDelinearize(Instruction *Src, Instruction *Dst,
- SmallVectorImpl<Subscript> &Pair);
-
- /// Tries to delinearize \p Src and \p Dst access functions for a fixed size
- /// multi-dimensional array. Calls tryDelinearizeFixedSizeImpl() to
- /// delinearize \p Src and \p Dst separately,
- bool tryDelinearizeFixedSize(Instruction *Src, Instruction *Dst,
- const SCEV *SrcAccessFn,
- const SCEV *DstAccessFn,
- SmallVectorImpl<const SCEV *> &SrcSubscripts,
- SmallVectorImpl<const SCEV *> &DstSubscripts);
-
- /// Tries to delinearize access function for a multi-dimensional array with
- /// symbolic runtime sizes.
- /// Returns true upon success and false otherwise.
- bool tryDelinearizeParametricSize(
- Instruction *Src, Instruction *Dst, const SCEV *SrcAccessFn,
- const SCEV *DstAccessFn, SmallVectorImpl<const SCEV *> &SrcSubscripts,
- SmallVectorImpl<const SCEV *> &DstSubscripts);
-
- /// checkSubscript - Helper function for checkSrcSubscript and
- /// checkDstSubscript to avoid duplicate code
- bool checkSubscript(const SCEV *Expr, const Loop *LoopNest,
- SmallBitVector &Loops, bool IsSrc);
- }; // class DependenceInfo
-
- /// AnalysisPass to compute dependence information in a function
- class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
- public:
- typedef DependenceInfo Result;
- LLVM_ABI Result run(Function &F, FunctionAnalysisManager &FAM);
+ /// getC - If constraint is a line AX + BY = C, returns C.
+ /// Otherwise assert.
+ LLVM_ABI const SCEV *getC() const;
- private:
- LLVM_ABI static AnalysisKey Key;
- friend struct AnalysisInfoMixin<DependenceAnalysis>;
- }; // class DependenceAnalysis
+ /// getD - If constraint is a distance, returns D.
+ /// Otherwise assert.
+ LLVM_ABI const SCEV *getD() const;
- /// Printer pass to dump DA results.
- struct DependenceAnalysisPrinterPass
- : public PassInfoMixin<DependenceAnalysisPrinterPass> {
- DependenceAnalysisPrinterPass(raw_ostream &OS,
- bool NormalizeResults = false)
- : OS(OS), NormalizeResults(NormalizeResults) {}
+ /// getAssociatedLoop - Returns the loop associated with this constraint.
+ LLVM_ABI const Loop *getAssociatedLoop() const;
- LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ /// setPoint - Change a constraint to Point.
+ LLVM_ABI void setPoint(const SCEV *X, const SCEV *Y,
+ const Loop *CurrentLoop);
- static bool isRequired() { return true; }
+ /// setLine - Change a constraint to Line.
+ LLVM_ABI void setLine(const SCEV *A, const SCEV *B, const SCEV *C,
+ const Loop *CurrentLoop);
- private:
- raw_ostream &OS;
- bool NormalizeResults;
- }; // class DependenceAnalysisPrinterPass
+ /// setDistance - Change a constraint to Distance.
+ LLVM_ABI void setDistance(const SCEV *D, const Loop *CurrentLoop);
- /// Legacy pass manager pass to access dependence information
- class LLVM_ABI DependenceAnalysisWrapperPass : public FunctionPass {
- public:
- static char ID; // Class identification, replacement for typeinfo
- DependenceAnalysisWrapperPass();
+ /// setEmpty - Change a constraint to Empty.
+ LLVM_ABI void setEmpty();
- bool runOnFunction(Function &F) override;
- void releaseMemory() override;
- void getAnalysisUsage(AnalysisUsage &) const override;
- void print(raw_ostream &, const Module * = nullptr) const override;
- DependenceInfo &getDI() const;
+ /// setAny - Change a constraint to Any.
+ LLVM_ABI void setAny(ScalarEvolution *SE);
- private:
- std::unique_ptr<DependenceInfo> info;
- }; // class DependenceAnalysisWrapperPass
+ /// dump - For debugging purposes. Dumps the constraint
+ /// out to OS.
+ LLVM_ABI void dump(raw_ostream &OS) const;
+ };
+
+ /// establishNestingLevels - Examines the loop nesting of the Src and Dst
+ /// instructions and establishes their shared loops. Sets the variables
+ /// CommonLevels, SrcLevels, and MaxLevels.
+ /// The source and destination instructions needn't be contained in the same
+ /// loop. The routine establishNestingLevels finds the level of most deeply
+ /// nested loop that contains them both, CommonLevels. An instruction that's
+ /// not contained in a loop is at level = 0. MaxLevels is equal to the level
+ /// of the source plus the level of the destination, minus CommonLevels.
+ /// This lets us allocate vectors MaxLevels in length, with room for every
+ /// distinct loop referenced in both the source and destination subscripts.
+ /// The variable SrcLevels is the nesting depth of the source instruction.
+ /// It's used to help calculate distinct loops referenced by the destination.
+ /// Here's the map from loops to levels:
+ /// 0 - unused
+ /// 1 - outermost common loop
+ /// ... - other common loops
+ /// CommonLevels - innermost common loop
+ /// ... - loops containing Src but not Dst
+ /// SrcLevels - innermost loop containing Src but not Dst
+ /// ... - loops containing Dst but not Src
+ /// MaxLevels - innermost loop containing Dst but not Src
+ /// Consider the follow code fragment:
+ /// for (a = ...) {
+ /// for (b = ...) {
+ /// for (c = ...) {
+ /// for (d = ...) {
+ /// A[] = ...;
+ /// }
+ /// }
+ /// for (e = ...) {
+ /// for (f = ...) {
+ /// for (g = ...) {
+ /// ... = A[];
+ /// }
+ /// }
+ /// }
+ /// }
+ /// }
+ /// If we're looking at the possibility of a dependence between the store
+ /// to A (the Src) and the load from A (the Dst), we'll note that they
+ /// have 2 loops in common, so CommonLevels will equal 2 and the direction
+ /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
+ /// A map from loop names to level indices would look like
+ /// a - 1
+ /// b - 2 = CommonLevels
+ /// c - 3
+ /// d - 4 = SrcLevels
+ /// e - 5
+ /// f - 6
+ /// g - 7 = MaxLevels
+ void establishNestingLevels(const Instruction *Src, const Instruction *Dst);
+
+ unsigned CommonLevels, SrcLevels, MaxLevels;
+
+ /// mapSrcLoop - Given one of the loops containing the source, return
+ /// its level index in our numbering scheme.
+ unsigned mapSrcLoop(const Loop *SrcLoop) const;
+
+ /// mapDstLoop - Given one of the loops containing the destination,
+ /// return its level index in our numbering scheme.
+ unsigned mapDstLoop(const Loop *DstLoop) const;
+
+ /// isLoopInvariant - Returns true if Expression is loop invariant
+ /// in LoopNest.
+ bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;
+
+ /// Makes sure all subscript pairs share the same integer type by
+ /// sign-extending as necessary.
+ /// Sign-extending a subscript is safe because getelementptr assumes the
+ /// array subscripts are signed.
+ void unifySubscriptType(ArrayRef<Subscript *> Pairs);
+
+ /// removeMatchingExtensions - Examines a subscript pair.
+ /// If the source and destination are identically sign (or zero)
+ /// extended, it strips off the extension in an effort to
+ /// simplify the actual analysis.
+ void removeMatchingExtensions(Subscript *Pair);
+
+ /// collectCommonLoops - Finds the set of loops from the LoopNest that
+ /// have a level <= CommonLevels and are referred to by the SCEV Expression.
+ void collectCommonLoops(const SCEV *Expression, const Loop *LoopNest,
+ SmallBitVector &Loops) const;
+
+ /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
+ /// linear. Collect the set of loops mentioned by Src.
+ bool checkSrcSubscript(const SCEV *Src, const Loop *LoopNest,
+ SmallBitVector &Loops);
+
+ /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
+ /// linear. Collect the set of loops mentioned by Dst.
+ bool checkDstSubscript(const SCEV *Dst, const Loop *LoopNest,
+ SmallBitVector &Loops);
+
+ /// isKnownPredicate - Compare X and Y using the predicate Pred.
+ /// Basically a wrapper for SCEV::isKnownPredicate,
+ /// but tries harder, especially in the presence of sign and zero
+ /// extensions and symbolics.
+ bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *X,
+ const SCEV *Y) const;
+
+ /// isKnownLessThan - Compare to see if S is less than Size
+ /// Another wrapper for isKnownNegative(S - max(Size, 1)) with some extra
+ /// checking if S is an AddRec and we can prove lessthan using the loop
+ /// bounds.
+ bool isKnownLessThan(const SCEV *S, const SCEV *Size) const;
+
+ /// isKnownNonNegative - Compare to see if S is known not to be negative
+ /// Uses the fact that S comes from Ptr, which may be an inbound GEP,
+ /// Proving there is no wrapping going on.
+ bool isKnownNonNegative(const SCEV *S, const Value *Ptr) const;
+
+ /// collectUpperBound - All subscripts are the same type (on my machine,
+ /// an i64). The loop bound may be a smaller type. collectUpperBound
+ /// find the bound, if available, and zero extends it to the Type T.
+ /// (I zero extend since the bound should always be >= 0.)
+ /// If no upper bound is available, return NULL.
+ const SCEV *collectUpperBound(const Loop *l, Type *T) const;
+
+ /// collectConstantUpperBound - Calls collectUpperBound(), then
+ /// attempts to cast it to SCEVConstant. If the cast fails,
+ /// returns NULL.
+ const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;
+
+ /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
+ /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
+ /// Collects the associated loops in a set.
+ Subscript::ClassificationKind
+ classifyPair(const SCEV *Src, const Loop *SrcLoopNest, const SCEV *Dst,
+ const Loop *DstLoopNest, SmallBitVector &Loops);
+
+ /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// If the dependence isn't proven to exist,
+ /// marks the Result as inconsistent.
+ bool testZIV(const SCEV *Src, const SCEV *Dst, FullDependence &Result) const;
+
+ /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*j], where
+ /// i and j are induction variables, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction vector entry and, when possible,
+ /// the distance vector entry.
+ /// If the dependence isn't proven to exist,
+ /// marks the Result as inconsistent.
+ bool testSIV(const SCEV *Src, const SCEV *Dst, unsigned &Level,
+ FullDependence &Result, Constraint &NewConstraint,
+ const SCEV *&SplitIter) const;
+
+ /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*j]
+ /// where i and j are induction variables, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// With minor algebra, this test can also be used for things like
+ /// [c1 + a1*i + a2*j][c2].
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Marks the Result as inconsistent.
+ bool testRDIV(const SCEV *Src, const SCEV *Dst, FullDependence &Result) const;
+
+ /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
+ /// Returns true if dependence disproved.
+ /// Can sometimes refine direction vectors.
+ bool testMIV(const SCEV *Src, const SCEV *Dst, const SmallBitVector &Loops,
+ FullDependence &Result) const;
+
+ /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
+ /// for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction and distance.
+ bool strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
+ const SCEV *DstConst, const Loop *CurrentLoop,
+ unsigned Level, FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a*i] and [c2 - a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// Marks the dependence as splitable.
+ bool weakCrossingSIVtest(const SCEV *SrcCoeff, const SCEV *SrcConst,
+ const SCEV *DstConst, const Loop *CurrentLoop,
+ unsigned Level, FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const;
+
+ /// ExactSIVtest - Tests the SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ bool exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
+ const SCEV *SrcConst, const SCEV *DstConst,
+ const Loop *CurrentLoop, unsigned Level,
+ FullDependence &Result, Constraint &NewConstraint) const;
+
+ /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1] and [c2 + a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant. See also weakZeroDstSIVtest.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// If loop peeling will break the dependence, mark appropriately.
+ bool weakZeroSrcSIVtest(const SCEV *DstCoeff, const SCEV *SrcConst,
+ const SCEV *DstConst, const Loop *CurrentLoop,
+ unsigned Level, FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a*i] and [c2],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant. See also weakZeroSrcSIVtest.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// If loop peeling will break the dependence, mark appropriately.
+ bool weakZeroDstSIVtest(const SCEV *SrcCoeff, const SCEV *SrcConst,
+ const SCEV *DstConst, const Loop *CurrentLoop,
+ unsigned Level, FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// exactRDIVtest - Tests the RDIV subscript pair for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + b*j],
+ /// where i and j are induction variable, c1 and c2 are loop invariant,
+ /// and a and b are constants.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Works in some cases that symbolicRDIVtest doesn't,
+ /// and vice versa.
+ bool exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
+ const SCEV *SrcConst, const SCEV *DstConst,
+ const Loop *SrcLoop, const Loop *DstLoop,
+ FullDependence &Result) const;
+
+ /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + b*j],
+ /// where i and j are induction variable, c1 and c2 are loop invariant,
+ /// and a and b are constants.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Works in some cases that exactRDIVtest doesn't,
+ /// and vice versa. Can also be used as a backup for
+ /// ordinary SIV tests.
+ bool symbolicRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
+ const SCEV *SrcConst, const SCEV *DstConst,
+ const Loop *SrcLoop, const Loop *DstLoop) const;
+
+ /// gcdMIVtest - Tests an MIV subscript pair for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Can sometimes disprove the equal direction for 1 or more loops.
+ // Can handle some symbolics that even the SIV tests don't get,
+ /// so we use it as a backup for everything.
+ bool gcdMIVtest(const SCEV *Src, const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Computes directions.
+ bool banerjeeMIVtest(const SCEV *Src, const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const;
- /// createDependenceAnalysisPass - This creates an instance of the
- /// DependenceAnalysis wrapper pass.
- LLVM_ABI FunctionPass *createDependenceAnalysisWrapperPass();
+ /// collectCoeffInfo - Walks through the subscript, collecting each
+ /// coefficient, the associated loop bounds, and recording its positive and
+ /// negative parts for later use.
+ CoefficientInfo *collectCoeffInfo(const SCEV *Subscript, bool SrcFlag,
+ const SCEV *&Constant) const;
+
+ /// getPositivePart - X^+ = max(X, 0).
+ const SCEV *getPositivePart(const SCEV *X) const;
+
+ /// getNegativePart - X^- = min(X, 0).
+ const SCEV *getNegativePart(const SCEV *X) const;
+
+ /// getLowerBound - Looks through all the bounds info and
+ /// computes the lower bound given the current direction settings
+ /// at each level.
+ const SCEV *getLowerBound(BoundInfo *Bound) const;
+
+ /// getUpperBound - Looks through all the bounds info and
+ /// computes the upper bound given the current direction settings
+ /// at each level.
+ const SCEV *getUpperBound(BoundInfo *Bound) const;
+
+ /// exploreDirections - Hierarchically expands the direction vector
+ /// search space, combining the directions of discovered dependences
+ /// in the DirSet field of Bound. Returns the number of distinct
+ /// dependences discovered. If the dependence is disproved,
+ /// it will return 0.
+ unsigned exploreDirections(unsigned Level, CoefficientInfo *A,
+ CoefficientInfo *B, BoundInfo *Bound,
+ const SmallBitVector &Loops,
+ unsigned &DepthExpanded, const SCEV *Delta) const;
+
+ /// testBounds - Returns true iff the current bounds are plausible.
+ bool testBounds(unsigned char DirKind, unsigned Level, BoundInfo *Bound,
+ const SCEV *Delta) const;
+
+ /// findBoundsALL - Computes the upper and lower bounds for level K
+ /// using the * direction. Records them in Bound.
+ void findBoundsALL(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsLT - Computes the upper and lower bounds for level K
+ /// using the < direction. Records them in Bound.
+ void findBoundsLT(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsGT - Computes the upper and lower bounds for level K
+ /// using the > direction. Records them in Bound.
+ void findBoundsGT(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsEQ - Computes the upper and lower bounds for level K
+ /// using the = direction. Records them in Bound.
+ void findBoundsEQ(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound,
+ unsigned K) const;
+
+ /// intersectConstraints - Updates X with the intersection
+ /// of the Constraints X and Y. Returns true if X has changed.
+ bool intersectConstraints(Constraint *X, const Constraint *Y);
+
+ /// propagate - Review the constraints, looking for opportunities
+ /// to simplify a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagate(const SCEV *&Src, const SCEV *&Dst, SmallBitVector &Loops,
+ SmallVectorImpl<Constraint> &Constraints, bool &Consistent);
+
+ /// propagateDistance - Attempt to propagate a distance
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagateDistance(const SCEV *&Src, const SCEV *&Dst,
+ Constraint &CurConstraint, bool &Consistent);
+
+ /// propagatePoint - Attempt to propagate a point
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ bool propagatePoint(const SCEV *&Src, const SCEV *&Dst,
+ Constraint &CurConstraint);
+
+ /// propagateLine - Attempt to propagate a line
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagateLine(const SCEV *&Src, const SCEV *&Dst,
+ Constraint &CurConstraint, bool &Consistent);
+
+ /// findCoefficient - Given a linear SCEV,
+ /// return the coefficient corresponding to specified loop.
+ /// If there isn't one, return the SCEV constant 0.
+ /// For example, given a*i + b*j + c*k, returning the coefficient
+ /// corresponding to the j loop would yield b.
+ const SCEV *findCoefficient(const SCEV *Expr, const Loop *TargetLoop) const;
+
+ /// zeroCoefficient - Given a linear SCEV,
+ /// return the SCEV given by zeroing out the coefficient
+ /// corresponding to the specified loop.
+ /// For example, given a*i + b*j + c*k, zeroing the coefficient
+ /// corresponding to the j loop would yield a*i + c*k.
+ const SCEV *zeroCoefficient(const SCEV *Expr, const Loop *TargetLoop) const;
+
+ /// addToCoefficient - Given a linear SCEV Expr,
+ /// return the SCEV given by adding some Value to the
+ /// coefficient corresponding to the specified TargetLoop.
+ /// For example, given a*i + b*j + c*k, adding 1 to the coefficient
+ /// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
+ const SCEV *addToCoefficient(const SCEV *Expr, const Loop *TargetLoop,
+ const SCEV *Value) const;
+
+ /// updateDirection - Update direction vector entry
+ /// based on the current constraint.
+ void updateDirection(Dependence::DVEntry &Level,
+ const Constraint &CurConstraint) const;
+
+ /// Given a linear access function, tries to recover subscripts
+ /// for each dimension of the array element access.
+ bool tryDelinearize(Instruction *Src, Instruction *Dst,
+ SmallVectorImpl<Subscript> &Pair);
+
+ /// Tries to delinearize \p Src and \p Dst access functions for a fixed size
+ /// multi-dimensional array. Calls tryDelinearizeFixedSizeImpl() to
+ /// delinearize \p Src and \p Dst separately,
+ bool tryDelinearizeFixedSize(Instruction *Src, Instruction *Dst,
+ const SCEV *SrcAccessFn, const SCEV *DstAccessFn,
+ SmallVectorImpl<const SCEV *> &SrcSubscripts,
+ SmallVectorImpl<const SCEV *> &DstSubscripts);
+
+ /// Tries to delinearize access function for a multi-dimensional array with
+ /// symbolic runtime sizes.
+ /// Returns true upon success and false otherwise.
+ bool
+ tryDelinearizeParametricSize(Instruction *Src, Instruction *Dst,
+ const SCEV *SrcAccessFn, const SCEV *DstAccessFn,
+ SmallVectorImpl<const SCEV *> &SrcSubscripts,
+ SmallVectorImpl<const SCEV *> &DstSubscripts);
+
+ /// checkSubscript - Helper function for checkSrcSubscript and
+ /// checkDstSubscript to avoid duplicate code
+ bool checkSubscript(const SCEV *Expr, const Loop *LoopNest,
+ SmallBitVector &Loops, bool IsSrc);
+}; // class DependenceInfo
+
+/// AnalysisPass to compute dependence information in a function
+class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
+public:
+ typedef DependenceInfo Result;
+ LLVM_ABI Result run(Function &F, FunctionAnalysisManager &FAM);
+
+private:
+ LLVM_ABI static AnalysisKey Key;
+ friend struct AnalysisInfoMixin<DependenceAnalysis>;
+}; // class DependenceAnalysis
+
+/// Printer pass to dump DA results.
+struct DependenceAnalysisPrinterPass
+ : public PassInfoMixin<DependenceAnalysisPrinterPass> {
+ DependenceAnalysisPrinterPass(raw_ostream &OS, bool NormalizeResults = false)
+ : OS(OS), NormalizeResults(NormalizeResults) {}
+
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+
+ static bool isRequired() { return true; }
+
+private:
+ raw_ostream &OS;
+ bool NormalizeResults;
+}; // class DependenceAnalysisPrinterPass
+
+/// Legacy pass manager pass to access dependence information
+class LLVM_ABI DependenceAnalysisWrapperPass : public FunctionPass {
+public:
+ static char ID; // Class identification, replacement for typeinfo
+ DependenceAnalysisWrapperPass();
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &) const override;
+ void print(raw_ostream &, const Module * = nullptr) const override;
+ DependenceInfo &getDI() const;
+
+private:
+ std::unique_ptr<DependenceInfo> info;
+}; // class DependenceAnalysisWrapperPass
+
+/// createDependenceAnalysisPass - This creates an instance of the
+/// DependenceAnalysis wrapper pass.
+LLVM_ABI FunctionPass *createDependenceAnalysisWrapperPass();
} // namespace llvm
diff --git a/llvm/include/llvm/Analysis/InlineCost.h b/llvm/include/llvm/Analysis/InlineCost.h
index 93b0a8d..ae86f35 100644
--- a/llvm/include/llvm/Analysis/InlineCost.h
+++ b/llvm/include/llvm/Analysis/InlineCost.h
@@ -336,7 +336,11 @@ LLVM_ABI std::optional<InlineCostFeatures> getInliningCostFeatures(
ProfileSummaryInfo *PSI = nullptr,
OptimizationRemarkEmitter *ORE = nullptr);
-/// Minimal filter to detect invalid constructs for inlining.
+/// Check if it is mechanically possible to inline the function \p Callee, based
+/// on the contents of the function.
+///
+/// See also \p CanInlineCallSite as an additional precondition necessary to
+/// perform a valid inline in a particular use context.
LLVM_ABI InlineResult isInlineViable(Function &Callee);
// This pass is used to annotate instructions during the inline process for
diff --git a/llvm/include/llvm/Analysis/LoopInfo.h b/llvm/include/llvm/Analysis/LoopInfo.h
index a7a6a27..f80744e 100644
--- a/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/llvm/include/llvm/Analysis/LoopInfo.h
@@ -59,11 +59,12 @@ public:
};
/// Return true if the specified value is loop invariant.
- bool isLoopInvariant(const Value *V) const;
+ bool isLoopInvariant(const Value *V, bool HasCoroSuspendInst = false) const;
/// Return true if all the operands of the specified instruction are loop
/// invariant.
- bool hasLoopInvariantOperands(const Instruction *I) const;
+ bool hasLoopInvariantOperands(const Instruction *I,
+ bool HasCoroSuspendInst = false) const;
/// If the given value is an instruction inside of the loop and it can be
/// hoisted, do so to make it trivially loop-invariant.
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index abdbca0..7683ec1 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -731,6 +731,13 @@ public:
return 0;
break;
}
+ case Instruction::PtrToAddr: {
+ unsigned DstSize = Dst->getScalarSizeInBits();
+ assert(DstSize == DL.getAddressSizeInBits(Src));
+ if (DL.isLegalInteger(DstSize))
+ return 0;
+ break;
+ }
case Instruction::PtrToInt: {
unsigned DstSize = Dst->getScalarSizeInBits();
if (DL.isLegalInteger(DstSize) &&
@@ -878,9 +885,6 @@ public:
switch (ICA.getID()) {
default:
break;
- case Intrinsic::experimental_vector_histogram_add:
- // For now, we want explicit support from the target for histograms.
- return InstructionCost::getInvalid();
case Intrinsic::allow_runtime_check:
case Intrinsic::allow_ubsan_check:
case Intrinsic::annotation:
@@ -1439,6 +1443,7 @@ public:
Op2Info, Operands, I);
}
case Instruction::IntToPtr:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::SIToFP:
case Instruction::UIToFP:
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index a2311d2..e6a0eae 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -319,6 +319,7 @@ enum Kind {
kw_fptoui,
kw_fptosi,
kw_inttoptr,
+ kw_ptrtoaddr,
kw_ptrtoint,
kw_bitcast,
kw_addrspacecast,
diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h
index 89abca0..cc4af3d 100644
--- a/llvm/include/llvm/BinaryFormat/DXContainer.h
+++ b/llvm/include/llvm/BinaryFormat/DXContainer.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DXILABI.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/TargetParser/Triple.h"
@@ -157,6 +158,8 @@ enum class FeatureFlags : uint64_t {
static_assert((uint64_t)FeatureFlags::NextUnusedBit <= 1ull << 63,
"Shader flag bits exceed enum size.");
+LLVM_ABI ArrayRef<EnumEntry<llvm::dxil::ResourceClass>> getResourceClasses();
+
#define ROOT_SIGNATURE_FLAG(Num, Val) Val = Num,
enum class RootFlags : uint32_t {
#include "DXContainerConstants.def"
diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index dc78eb4..1c7d346 100644
--- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -456,7 +456,8 @@ enum CastOpcodes {
CAST_PTRTOINT = 9,
CAST_INTTOPTR = 10,
CAST_BITCAST = 11,
- CAST_ADDRSPACECAST = 12
+ CAST_ADDRSPACECAST = 12,
+ CAST_PTRTOADDR = 13,
};
/// UnaryOpcodes - These are values used in the bitcode files to encode which
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 1fcedcd..2892c0c 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -2105,6 +2105,10 @@ public:
}
case Intrinsic::get_active_lane_mask:
case Intrinsic::experimental_vector_match:
+ case Intrinsic::experimental_vector_histogram_add:
+ case Intrinsic::experimental_vector_histogram_uadd_sat:
+ case Intrinsic::experimental_vector_histogram_umax:
+ case Intrinsic::experimental_vector_histogram_umin:
return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
case Intrinsic::modf:
case Intrinsic::sincos:
@@ -2457,6 +2461,51 @@ public:
return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
cast<VectorType>(ICA.getArgTypes()[0]), {},
CostKind, 0, cast<VectorType>(RetTy));
+ case Intrinsic::experimental_vector_histogram_add:
+ case Intrinsic::experimental_vector_histogram_uadd_sat:
+ case Intrinsic::experimental_vector_histogram_umax:
+ case Intrinsic::experimental_vector_histogram_umin: {
+ FixedVectorType *PtrsTy = dyn_cast<FixedVectorType>(ICA.getArgTypes()[0]);
+ Type *EltTy = ICA.getArgTypes()[1];
+
+ // Targets with scalable vectors must handle this on their own.
+ if (!PtrsTy)
+ return InstructionCost::getInvalid();
+
+ Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
+ InstructionCost Cost = 0;
+ Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
+ CostKind, 1, nullptr, nullptr);
+ Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
+ CostKind);
+ switch (IID) {
+ default:
+ llvm_unreachable("Unhandled histogram update operation.");
+ case Intrinsic::experimental_vector_histogram_add:
+ Cost +=
+ thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
+ break;
+ case Intrinsic::experimental_vector_histogram_uadd_sat: {
+ IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
+ Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
+ break;
+ }
+ case Intrinsic::experimental_vector_histogram_umax: {
+ IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
+ Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
+ break;
+ }
+ case Intrinsic::experimental_vector_histogram_umin: {
+ IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
+ Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
+ break;
+ }
+ }
+ Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
+ CostKind);
+ Cost *= PtrsTy->getNumElements();
+ return Cost;
+ }
case Intrinsic::get_active_lane_mask: {
Type *ArgTy = ICA.getArgTypes()[0];
EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 75c0517..8d98255 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -50,14 +50,12 @@ public:
struct BaseArgInfo {
Type *Ty;
SmallVector<ISD::ArgFlagsTy, 4> Flags;
- bool IsFixed;
BaseArgInfo(Type *Ty,
- ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
- bool IsFixed = true)
- : Ty(Ty), Flags(Flags), IsFixed(IsFixed) {}
+ ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>())
+ : Ty(Ty), Flags(Flags) {}
- BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
+ BaseArgInfo() : Ty(nullptr) {}
};
struct ArgInfo : public BaseArgInfo {
@@ -81,8 +79,8 @@ public:
ArgInfo(ArrayRef<Register> Regs, Type *Ty, unsigned OrigIndex,
ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
- bool IsFixed = true, const Value *OrigValue = nullptr)
- : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs), OrigValue(OrigValue),
+ const Value *OrigValue = nullptr)
+ : BaseArgInfo(Ty, Flags), Regs(Regs), OrigValue(OrigValue),
OrigArgIndex(OrigIndex) {
if (!Regs.empty() && Flags.empty())
this->Flags.push_back(ISD::ArgFlagsTy());
@@ -93,9 +91,8 @@ public:
}
ArgInfo(ArrayRef<Register> Regs, const Value &OrigValue, unsigned OrigIndex,
- ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
- bool IsFixed = true)
- : ArgInfo(Regs, OrigValue.getType(), OrigIndex, Flags, IsFixed, &OrigValue) {}
+ ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>())
+ : ArgInfo(Regs, OrigValue.getType(), OrigIndex, Flags, &OrigValue) {}
ArgInfo() = default;
};
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 6fd05c8..3d7ccd5 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -486,6 +486,10 @@ private:
bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
}
+ bool translatePtrToAddr(const User &U, MachineIRBuilder &MIRBuilder) {
+ // FIXME: this is not correct for pointers with addr width != pointer width
+ return translatePtrToInt(U, MIRBuilder);
+ }
bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
}
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 6f2ad33..dc00db9 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -238,8 +238,6 @@ class SelectionDAG {
LLVMContext *Context;
CodeGenOptLevel OptLevel;
- bool DivergentTarget = false;
-
UniformityInfo *UA = nullptr;
FunctionLoweringInfo * FLI = nullptr;
@@ -473,16 +471,14 @@ public:
Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
UniformityInfo *UA, ProfileSummaryInfo *PSIin,
BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI,
- FunctionVarLocs const *FnVarLocs, bool HasDivergency);
+ FunctionVarLocs const *FnVarLocs);
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
MachineFunctionAnalysisManager &AM,
const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA,
ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin,
- MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs,
- bool HasDivergency) {
- init(NewMF, NewORE, nullptr, LibraryInfo, UA, PSIin, BFIin, MMI, FnVarLocs,
- HasDivergency);
+ MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs) {
+ init(NewMF, NewORE, nullptr, LibraryInfo, UA, PSIin, BFIin, MMI, FnVarLocs);
MFAM = &AM;
}
@@ -1260,6 +1256,9 @@ public:
/// stack arguments from being clobbered.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain);
+ std::pair<SDValue, SDValue> getMemcmp(SDValue Chain, const SDLoc &dl,
+ SDValue Dst, SDValue Src, SDValue Size,
+ const CallInst *CI);
/* \p CI if not null is the memset call being lowered.
* \p OverrideTailCall is an optional parameter that can be used to override
* the tail call optimization decision. */
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index a6a3928..5241a51 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -57,7 +57,9 @@ public:
AssumptionCache *AC = nullptr;
GCFunctionInfo *GFI = nullptr;
SSPLayoutInfo *SP = nullptr;
+#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
TargetTransformInfo *TTI = nullptr;
+#endif
CodeGenOptLevel OptLevel;
const TargetInstrInfo *TII;
const TargetLowering *TLI;
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGTargetInfo.h b/llvm/include/llvm/CodeGen/SelectionDAGTargetInfo.h
index 463f0ec..fd00f81 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGTargetInfo.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGTargetInfo.h
@@ -23,6 +23,7 @@
namespace llvm {
+class CallInst;
class SelectionDAG;
//===----------------------------------------------------------------------===//
@@ -118,8 +119,7 @@ public:
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
SDValue Op1, SDValue Op2, SDValue Op3,
- MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const {
+ const CallInst *CI) const {
return std::make_pair(SDValue(), SDValue());
}
diff --git a/llvm/include/llvm/CodeGen/TargetCallingConv.h b/llvm/include/llvm/CodeGen/TargetCallingConv.h
index a28c7a9..ca76c04 100644
--- a/llvm/include/llvm/CodeGen/TargetCallingConv.h
+++ b/llvm/include/llvm/CodeGen/TargetCallingConv.h
@@ -54,6 +54,8 @@ namespace ISD {
unsigned IsInConsecutiveRegs : 1;
unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate
unsigned IsPointer : 1;
+ /// Whether this is part of a variable argument list (non-fixed).
+ unsigned IsVarArg : 1;
unsigned ByValOrByRefSize = 0; ///< Byval or byref struct size
@@ -67,7 +69,7 @@ namespace ISD {
IsSwiftError(0), IsCFGuardTarget(0), IsHva(0), IsHvaStart(0),
IsSecArgPass(0), MemAlign(0), OrigAlign(0),
IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
- IsCopyElisionCandidate(0), IsPointer(0) {
+ IsCopyElisionCandidate(0), IsPointer(0), IsVarArg(0) {
static_assert(sizeof(*this) == 4 * sizeof(unsigned), "flags are too big");
}
@@ -145,6 +147,9 @@ namespace ISD {
bool isPointer() const { return IsPointer; }
void setPointer() { IsPointer = 1; }
+ bool isVarArg() const { return IsVarArg; }
+ void setVarArg() { IsVarArg = 1; }
+
Align getNonZeroMemAlign() const {
return decodeMaybeAlign(MemAlign).valueOrOne();
}
@@ -239,9 +244,6 @@ namespace ISD {
MVT VT;
EVT ArgVT;
- /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
- bool IsFixed = false;
-
/// Index original Function's argument.
unsigned OrigArgIndex;
@@ -251,10 +253,9 @@ namespace ISD {
unsigned PartOffset;
OutputArg() = default;
- OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, bool isfixed,
- unsigned origIdx, unsigned partOffs)
- : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
- PartOffset(partOffs) {
+ OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, unsigned origIdx,
+ unsigned partOffs)
+ : Flags(flags), OrigArgIndex(origIdx), PartOffset(partOffs) {
VT = vt;
ArgVT = argvt;
}
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 01f8fb5..172b01a 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -547,10 +547,10 @@ public:
// intermediate results in f32 precision and range.
virtual bool softPromoteHalfType() const { return false; }
- // Return true if, for soft-promoted half, the half type should be passed
- // passed to and returned from functions as f32. The default behavior is to
- // pass as i16. If soft-promoted half is not used, this function is ignored
- // and values are always passed and returned as f32.
+ // Return true if, for soft-promoted half, the half type should be passed to
+ // and returned from functions as f32. The default behavior is to pass as
+ // i16. If soft-promoted half is not used, this function is ignored and
+ // values are always passed and returned as f32.
virtual bool useFPRegsForHalfType() const { return false; }
// There are two general methods for expanding a BUILD_VECTOR node:
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 206ad4a..f706591 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -31,6 +31,7 @@
namespace llvm {
class CanonicalLoopInfo;
+class ScanInfo;
struct TargetRegionEntryInfo;
class OffloadEntriesInfoManager;
class OpenMPIRBuilder;
@@ -707,6 +708,9 @@ public:
LLVM_ABI InsertPointOrErrorTy createCancellationPoint(
const LocationDescription &Loc, omp::Directive CanceledDirective);
+ /// Creates a ScanInfo object, allocates and returns the pointer.
+ Expected<ScanInfo *> scanInfoInitialize();
+
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
@@ -750,6 +754,42 @@ public:
LoopBodyGenCallbackTy BodyGenCB, Value *TripCount,
const Twine &Name = "loop");
+ /// Generator for the control flow structure of an OpenMP canonical loops if
+ /// the parent directive has an `inscan` modifier specified.
+ /// If the `inscan` modifier is specified, the region of the parent is
+ /// expected to have a `scan` directive. Based on the clauses in
+ /// scan directive, the body of the loop is split into two loops: Input loop
+ /// and Scan Loop. Input loop contains the code generated for input phase of
+ /// scan and Scan loop contains the code generated for scan phase of scan.
+ /// From the bodyGen callback of these loops, `createScan` would be called
+ /// when a scan directive is encountered from the loop body. `createScan`
+ /// based on whether 1. inclusive or exclusive scan is specified and, 2. input
+ /// loop or scan loop is generated, lowers the body of the for loop
+ /// accordingly.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param BodyGenCB Callback that will generate the loop body code.
+ /// \param Start Value of the loop counter for the first iterations.
+ /// \param Stop Loop counter values past this will stop the loop.
+ /// \param Step Loop counter increment after each iteration; negative
+ /// means counting down.
+ /// \param IsSigned Whether Start, Stop and Step are signed integers.
+ /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
+ /// counter.
+ /// \param ComputeIP Insertion point for instructions computing the trip
+ /// count. Can be used to ensure the trip count is available
+ /// at the outermost loop of a loop nest. If not set,
+ /// defaults to the preheader of the generated loop.
+ /// \param Name Base name used to derive BB and instruction names.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ ///
+ /// \returns A vector containing Loop Info of Input Loop and Scan Loop.
+ Expected<SmallVector<llvm::CanonicalLoopInfo *>> createCanonicalScanLoops(
+ const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
+ Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
+ InsertPointTy ComputeIP, const Twine &Name, ScanInfo *ScanRedInfo);
+
/// Calculate the trip count of a canonical loop.
///
/// This allows specifying user-defined loop counter values using increment,
@@ -818,13 +858,17 @@ public:
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
+ /// \param InScan Whether loop has a scan reduction specified.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
LLVM_ABI Expected<CanonicalLoopInfo *> createCanonicalLoop(
const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
- InsertPointTy ComputeIP = {}, const Twine &Name = "loop");
+ InsertPointTy ComputeIP = {}, const Twine &Name = "loop",
+ bool InScan = false, ScanInfo *ScanRedInfo = nullptr);
/// Collapse a loop nest into a single loop.
///
@@ -1556,6 +1600,47 @@ private:
ArrayRef<OpenMPIRBuilder::ReductionInfo> ReductionInfos,
Function *ReduceFn, AttributeList FuncAttrs);
+ /// Helper function for CreateCanonicalScanLoops to create InputLoop
+ /// in the firstGen and Scan Loop in the SecondGen
+ /// \param InputLoopGen Callback for generating the loop for input phase
+ /// \param ScanLoopGen Callback for generating the loop for scan phase
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ ///
+ /// \return error if any produced, else return success.
+ Error emitScanBasedDirectiveIR(
+ llvm::function_ref<Error()> InputLoopGen,
+ llvm::function_ref<Error(LocationDescription Loc)> ScanLoopGen,
+ ScanInfo *ScanRedInfo);
+
+ /// Creates the basic blocks required for scan reduction.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ void createScanBBs(ScanInfo *ScanRedInfo);
+
+ /// Dynamically allocates the buffer needed for scan reduction.
+ /// \param AllocaIP The IP where possibly-shared pointer of buffer needs to
+ /// be declared.
+ /// \param ScanVars Scan Variables.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ ///
+ /// \return error if any produced, else return success.
+ Error emitScanBasedDirectiveDeclsIR(InsertPointTy AllocaIP,
+ ArrayRef<llvm::Value *> ScanVars,
+ ArrayRef<llvm::Type *> ScanVarsType,
+ ScanInfo *ScanRedInfo);
+
+ /// Copies the result back to the reduction variable.
+ /// \param ReductionInfos Array type containing the ReductionOps.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ ///
+ /// \return error if any produced, else return success.
+ Error emitScanBasedDirectiveFinalsIR(
+ ArrayRef<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos,
+ ScanInfo *ScanInfo);
+
/// This function emits a helper that gathers Reduce lists from the first
/// lane of every active warp to lanes in the first warp.
///
@@ -2184,6 +2269,9 @@ public:
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
+ /// Collection of owned ScanInfo objects that eventually need to be free'd.
+ std::forward_list<ScanInfo> ScanInfos;
+
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
@@ -2639,6 +2727,48 @@ public:
FinalizeCallbackTy FiniCB,
Value *Filter);
+ /// This function performs the scan reduction of the values updated in
+ /// the input phase. The reduction logic needs to be emitted between input
+ /// and scan loop returned by `CreateCanonicalScanLoops`. The following
+ /// is the code that is generated, `buffer` and `span` are expected to be
+ /// populated before executing the generated code.
+ /// \code{c}
+ /// for (int k = 0; k != ceil(log2(span)); ++k) {
+ /// i=pow(2,k)
+ /// for (size cnt = last_iter; cnt >= i; --cnt)
+ /// buffer[cnt] op= buffer[cnt-i];
+ /// }
+ /// \endcode
+ /// \param Loc The insert and source location description.
+ /// \param ReductionInfos Array type containing the ReductionOps.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ ///
+ /// \returns The insertion position *after* the masked.
+ InsertPointOrErrorTy emitScanReduction(
+ const LocationDescription &Loc,
+ ArrayRef<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos,
+ ScanInfo *ScanRedInfo);
+
+ /// This directive split and directs the control flow to input phase
+ /// blocks or scan phase blocks based on 1. whether input loop or scan loop
+ /// is executed, 2. whether exclusive or inclusive scan is used.
+ ///
+ /// \param Loc The insert and source location description.
+ /// \param AllocaIP The IP where the temporary buffer for scan reduction
+ // needs to be allocated.
+ /// \param ScanVars Scan Variables.
+ /// \param IsInclusive Whether it is an inclusive or exclusive scan.
+ /// \param ScanRedInfo Pointer to the ScanInfo objected created using
+ /// `ScanInfoInitialize`.
+ ///
+ /// \returns The insertion position *after* the scan.
+ InsertPointOrErrorTy createScan(const LocationDescription &Loc,
+ InsertPointTy AllocaIP,
+ ArrayRef<llvm::Value *> ScanVars,
+ ArrayRef<llvm::Type *> ScanVarsType,
+ bool IsInclusive, ScanInfo *ScanRedInfo);
+
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
@@ -3286,7 +3416,8 @@ private:
emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
- bool IsXBinopExpr);
+ bool IsXBinopExpr, bool IsIgnoreDenormalMode,
+ bool IsFineGrainedMemory, bool IsRemoteMemory);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
@@ -3359,7 +3490,9 @@ public:
LLVM_ABI InsertPointOrErrorTy createAtomicUpdate(
const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
- AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr);
+ AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr,
+ bool IsIgnoreDenormalMode = false, bool IsFineGrainedMemory = false,
+ bool IsRemoteMemory = false);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
@@ -3394,7 +3527,9 @@ public:
const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X,
AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
- bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr);
+ bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr,
+ bool IsIgnoreDenormalMode = false, bool IsFineGrainedMemory = false,
+ bool IsRemoteMemory = false);
/// Emit atomic compare for constructs: --- Only scalar data types
/// cond-expr-stmt:
@@ -3774,6 +3909,93 @@ public:
LLVM_ABI void invalidate();
};
+/// ScanInfo holds the information to assist in lowering of Scan reduction.
+/// Before lowering, the body of the for loop specifying scan reduction is
+/// expected to have the following structure
+///
+/// Loop Body Entry
+/// |
+/// Code before the scan directive
+/// |
+/// Scan Directive
+/// |
+/// Code after the scan directive
+/// |
+/// Loop Body Exit
+/// When `createCanonicalScanLoops` is executed, the bodyGen callback of it
+/// transforms the body to:
+///
+/// Loop Body Entry
+/// |
+/// OMPScanDispatch
+///
+/// OMPBeforeScanBlock
+/// |
+/// OMPScanLoopExit
+/// |
+/// Loop Body Exit
+///
+/// The insert point is updated to the first insert point of OMPBeforeScanBlock.
+/// It dominates the control flow of code generated until
+/// scan directive is encountered and OMPAfterScanBlock dominates the
+/// control flow of code generated after scan is encountered. The successor
+/// of OMPScanDispatch can be OMPBeforeScanBlock or OMPAfterScanBlock based
+/// on 1.whether it is in Input phase or Scan Phase , 2. whether it is an
+/// exclusive or inclusive scan. This jump is added when `createScan` is
+/// executed. If input loop is being generated, if it is inclusive scan,
+/// `OMPAfterScanBlock` succeeds `OMPScanDispatch` , if exclusive,
+/// `OMPBeforeScanBlock` succeeds `OMPDispatch` and vice versa for scan loop. At
+/// the end of the input loop, temporary buffer is populated and at the
+/// beginning of the scan loop, temporary buffer is read. After scan directive
+/// is encountered, insertion point is updated to `OMPAfterScanBlock` as it is
+/// expected to dominate the code after the scan directive. Both Before and
+/// After scan blocks are succeeded by `OMPScanLoopExit`.
+/// Temporary buffer allocations are done in `ScanLoopInit` block before the
+/// lowering of for-loop. The results are copied back to reduction variable in
+/// `ScanLoopFinish` block.
+class ScanInfo {
+public:
+ /// Dominates the body of the loop before scan directive
+ llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
+
+ /// Dominates the body of the loop before scan directive
+ llvm::BasicBlock *OMPAfterScanBlock = nullptr;
+
+ /// Controls the flow to before or after scan blocks
+ llvm::BasicBlock *OMPScanDispatch = nullptr;
+
+ /// Exit block of loop body
+ llvm::BasicBlock *OMPScanLoopExit = nullptr;
+
+ /// Block before loop body where scan initializations are done
+ llvm::BasicBlock *OMPScanInit = nullptr;
+
+ /// Block after loop body where scan finalizations are done
+ llvm::BasicBlock *OMPScanFinish = nullptr;
+
+ /// If true, it indicates Input phase is lowered; else it indicates
+ /// ScanPhase is lowered
+ bool OMPFirstScanLoop = false;
+
+ /// Maps the private reduction variable to the pointer of the temporary
+ /// buffer
+ llvm::SmallDenseMap<llvm::Value *, llvm::Value *> *ScanBuffPtrs;
+
+ /// Keeps track of value of iteration variable for input/scan loop to be
+ /// used for Scan directive lowering
+ llvm::Value *IV;
+
+ /// Stores the span of canonical loop being lowered to be used for temporary
+ /// buffer allocation or Finalization.
+ llvm::Value *Span;
+
+ ScanInfo() {
+ ScanBuffPtrs = new llvm::SmallDenseMap<llvm::Value *, llvm::Value *>();
+ }
+
+ ~ScanInfo() { delete (ScanBuffPtrs); }
+};
+
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index 9c9fc88..e06e6ad 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -1158,6 +1158,8 @@ public:
LLVM_ABI static Constant *getXor(Constant *C1, Constant *C2);
LLVM_ABI static Constant *getTrunc(Constant *C, Type *Ty,
bool OnlyIfReduced = false);
+ LLVM_ABI static Constant *getPtrToAddr(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
LLVM_ABI static Constant *getPtrToInt(Constant *C, Type *Ty,
bool OnlyIfReduced = false);
LLVM_ABI static Constant *getIntToPtr(Constant *C, Type *Ty,
diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h
index f1f0c189..a4e9d16 100644
--- a/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -2506,10 +2506,8 @@ public:
class DILocation : public MDNode {
friend class LLVMContextImpl;
friend class MDNode;
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
uint64_t AtomGroup : 61;
uint64_t AtomRank : 3;
-#endif
DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
unsigned Column, uint64_t AtomGroup, uint8_t AtomRank,
@@ -2539,20 +2537,8 @@ class DILocation : public MDNode {
}
public:
- uint64_t getAtomGroup() const {
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
- return AtomGroup;
-#else
- return 0;
-#endif
- }
- uint8_t getAtomRank() const {
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
- return AtomRank;
-#else
- return 0;
-#endif
- }
+ uint64_t getAtomGroup() const { return AtomGroup; }
+ uint8_t getAtomRank() const { return AtomRank; }
const DILocation *getWithoutAtom() const {
if (!getAtomGroup() && !getAtomRank())
diff --git a/llvm/include/llvm/IR/GenericFloatingPointPredicateUtils.h b/llvm/include/llvm/IR/GenericFloatingPointPredicateUtils.h
index 8aac9d5..448a6e9 100644
--- a/llvm/include/llvm/IR/GenericFloatingPointPredicateUtils.h
+++ b/llvm/include/llvm/IR/GenericFloatingPointPredicateUtils.h
@@ -135,6 +135,12 @@ public:
if (Mode.Input != DenormalMode::IEEE)
return {Invalid, fcAllFlags, fcAllFlags};
+ auto ExactClass = [IsFabs, Src](FPClassTest Mask) {
+ if (IsFabs)
+ Mask = llvm::inverse_fabs(Mask);
+ return exactClass(Src, Mask);
+ };
+
switch (Pred) {
case FCmpInst::FCMP_OEQ: // Match x == 0.0
return exactClass(Src, fcZero);
@@ -151,26 +157,24 @@ public:
case FCmpInst::FCMP_UNO:
return exactClass(Src, fcNan);
case FCmpInst::FCMP_OGT: // x > 0
- return exactClass(Src, fcPosSubnormal | fcPosNormal | fcPosInf);
+ return ExactClass(fcPosSubnormal | fcPosNormal | fcPosInf);
case FCmpInst::FCMP_UGT: // isnan(x) || x > 0
- return exactClass(Src, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan);
+ return ExactClass(fcPosSubnormal | fcPosNormal | fcPosInf | fcNan);
case FCmpInst::FCMP_OGE: // x >= 0
- return exactClass(Src, fcPositive | fcNegZero);
+ return ExactClass(fcPositive | fcNegZero);
case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0
- return exactClass(Src, fcPositive | fcNegZero | fcNan);
+ return ExactClass(fcPositive | fcNegZero | fcNan);
case FCmpInst::FCMP_OLT: // x < 0
- return exactClass(Src, fcNegSubnormal | fcNegNormal | fcNegInf);
+ return ExactClass(fcNegSubnormal | fcNegNormal | fcNegInf);
case FCmpInst::FCMP_ULT: // isnan(x) || x < 0
- return exactClass(Src, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan);
+ return ExactClass(fcNegSubnormal | fcNegNormal | fcNegInf | fcNan);
case FCmpInst::FCMP_OLE: // x <= 0
- return exactClass(Src, fcNegative | fcPosZero);
+ return ExactClass(fcNegative | fcPosZero);
case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0
- return exactClass(Src, fcNegative | fcPosZero | fcNan);
+ return ExactClass(fcNegative | fcPosZero | fcNan);
default:
llvm_unreachable("all compare types are handled");
}
-
- return {Invalid, fcAllFlags, fcAllFlags};
}
const bool IsDenormalRHS = (OrigClass & fcSubnormal) == OrigClass;
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index d106ded..783f8f6 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -812,15 +812,10 @@ public:
LLVM_ABI CallInst *CreateFPMinimumReduce(Value *Src);
/// Create a lifetime.start intrinsic.
- ///
- /// If the pointer isn't i8* it will be converted.
- LLVM_ABI CallInst *CreateLifetimeStart(Value *Ptr,
- ConstantInt *Size = nullptr);
+ LLVM_ABI CallInst *CreateLifetimeStart(Value *Ptr);
/// Create a lifetime.end intrinsic.
- ///
- /// If the pointer isn't i8* it will be converted.
- LLVM_ABI CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
+ LLVM_ABI CallInst *CreateLifetimeEnd(Value *Ptr);
/// Create a call to invariant.start intrinsic.
///
@@ -2192,7 +2187,10 @@ public:
return CreateCast(Instruction::FPExt, V, DestTy, Name, FPMathTag,
FMFSource);
}
-
+ Value *CreatePtrToAddr(Value *V, const Twine &Name = "") {
+ return CreateCast(Instruction::PtrToInt, V,
+ BB->getDataLayout().getAddressType(V->getType()), Name);
+ }
Value *CreatePtrToInt(Value *V, Type *DestTy,
const Twine &Name = "") {
return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 6d5398b..8e4dc64 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -183,6 +183,7 @@ public:
RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst);}
RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst);}
RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst);}
+ RetTy visitPtrToAddrInst(PtrToAddrInst &I) { DELEGATE(CastInst);}
RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst);}
RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst);}
RetTy visitAddrSpaceCastInst(AddrSpaceCastInst &I) { DELEGATE(CastInst);}
diff --git a/llvm/include/llvm/IR/Instruction.def b/llvm/include/llvm/IR/Instruction.def
index a5ad92f..face6a9 100644
--- a/llvm/include/llvm/IR/Instruction.def
+++ b/llvm/include/llvm/IR/Instruction.def
@@ -190,35 +190,36 @@ HANDLE_CAST_INST(43, UIToFP , UIToFPInst ) // UInt -> floating point
HANDLE_CAST_INST(44, SIToFP , SIToFPInst ) // SInt -> floating point
HANDLE_CAST_INST(45, FPTrunc , FPTruncInst ) // Truncate floating point
HANDLE_CAST_INST(46, FPExt , FPExtInst ) // Extend floating point
-HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer
-HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst) // Integer -> Pointer
-HANDLE_CAST_INST(49, BitCast , BitCastInst ) // Type cast
-HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
- LAST_CAST_INST(50)
+HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer (bitcast)
+HANDLE_CAST_INST(48, PtrToAddr, PtrToAddrInst) // Pointer -> Address
+HANDLE_CAST_INST(49, IntToPtr, IntToPtrInst) // Integer -> Pointer
+HANDLE_CAST_INST(50, BitCast , BitCastInst ) // Type cast
+HANDLE_CAST_INST(51, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
+ LAST_CAST_INST(51)
- FIRST_FUNCLETPAD_INST(51)
-HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst)
-HANDLE_FUNCLETPAD_INST(52, CatchPad , CatchPadInst)
- LAST_FUNCLETPAD_INST(52)
+ FIRST_FUNCLETPAD_INST(52)
+HANDLE_FUNCLETPAD_INST(52, CleanupPad, CleanupPadInst)
+HANDLE_FUNCLETPAD_INST(53, CatchPad , CatchPadInst)
+ LAST_FUNCLETPAD_INST(53)
// Other operators...
- FIRST_OTHER_INST(53)
-HANDLE_OTHER_INST(53, ICmp , ICmpInst ) // Integer comparison instruction
-HANDLE_OTHER_INST(54, FCmp , FCmpInst ) // Floating point comparison instr.
-HANDLE_OTHER_INST(55, PHI , PHINode ) // PHI node instruction
-HANDLE_OTHER_INST(56, Call , CallInst ) // Call a function
-HANDLE_OTHER_INST(57, Select , SelectInst ) // select instruction
-HANDLE_USER_INST (58, UserOp1, Instruction) // May be used internally in a pass
-HANDLE_USER_INST (59, UserOp2, Instruction) // Internal to passes only
-HANDLE_OTHER_INST(60, VAArg , VAArgInst ) // vaarg instruction
-HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector
-HANDLE_OTHER_INST(62, InsertElement, InsertElementInst) // insert into vector
-HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
-HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate
-HANDLE_OTHER_INST(65, InsertValue, InsertValueInst) // insert into aggregate
-HANDLE_OTHER_INST(66, LandingPad, LandingPadInst) // Landing pad instruction.
-HANDLE_OTHER_INST(67, Freeze, FreezeInst) // Freeze instruction.
- LAST_OTHER_INST(67)
+ FIRST_OTHER_INST(54)
+HANDLE_OTHER_INST(54, ICmp , ICmpInst ) // Integer comparison instruction
+HANDLE_OTHER_INST(55, FCmp , FCmpInst ) // Floating point comparison instr.
+HANDLE_OTHER_INST(56, PHI , PHINode ) // PHI node instruction
+HANDLE_OTHER_INST(57, Call , CallInst ) // Call a function
+HANDLE_OTHER_INST(58, Select , SelectInst ) // select instruction
+HANDLE_USER_INST (59, UserOp1, Instruction) // May be used internally in a pass
+HANDLE_USER_INST (60, UserOp2, Instruction) // Internal to passes only
+HANDLE_OTHER_INST(61, VAArg , VAArgInst ) // vaarg instruction
+HANDLE_OTHER_INST(62, ExtractElement, ExtractElementInst)// extract from vector
+HANDLE_OTHER_INST(63, InsertElement, InsertElementInst) // insert into vector
+HANDLE_OTHER_INST(64, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
+HANDLE_OTHER_INST(65, ExtractValue, ExtractValueInst)// extract from aggregate
+HANDLE_OTHER_INST(66, InsertValue, InsertValueInst) // insert into aggregate
+HANDLE_OTHER_INST(67, LandingPad, LandingPadInst) // Landing pad instruction.
+HANDLE_OTHER_INST(68, Freeze, FreezeInst) // Freeze instruction.
+ LAST_OTHER_INST(68)
#undef FIRST_TERM_INST
#undef HANDLE_TERM_INST
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 6f69b68..95a0a7f 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -4949,6 +4949,46 @@ public:
}
};
+/// This class represents a cast from a pointer to an address (non-capturing
+/// ptrtoint).
+class PtrToAddrInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+
+ /// Clone an identical PtrToAddrInst.
+ PtrToAddrInst *cloneImpl() const;
+
+public:
+ /// Constructor with insert-before-instruction semantics
+ PtrToAddrInst(Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ InsertPosition InsertBefore =
+ nullptr ///< Where to insert the new instruction
+ );
+
+ /// Gets the pointer operand.
+ Value *getPointerOperand() { return getOperand(0); }
+ /// Gets the pointer operand.
+ const Value *getPointerOperand() const { return getOperand(0); }
+ /// Gets the operand index of the pointer operand.
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ /// Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Instruction *I) {
+ return I->getOpcode() == PtrToAddr;
+ }
+ static bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
//===----------------------------------------------------------------------===//
// BitCastInst Class
//===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h
index 48735b0..9577d01 100644
--- a/llvm/include/llvm/IR/Intrinsics.h
+++ b/llvm/include/llvm/IR/Intrinsics.h
@@ -104,12 +104,6 @@ namespace Intrinsic {
LLVM_ABI Function *getOrInsertDeclaration(Module *M, ID id,
ArrayRef<Type *> Tys = {});
- LLVM_DEPRECATED("Use getOrInsertDeclaration instead",
- "getOrInsertDeclaration")
- inline Function *getDeclaration(Module *M, ID id, ArrayRef<Type *> Tys = {}) {
- return getOrInsertDeclaration(M, id, Tys);
- }
-
/// Look up the Function declaration of the intrinsic \p id in the Module
/// \p M and return it if it exists. Otherwise, return nullptr. This version
/// supports non-overloaded intrinsics.
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 1a23276..e0ee123 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1644,16 +1644,12 @@ def int_ucmp : DefaultAttrsIntrinsic<
//===------------------------- Memory Use Markers -------------------------===//
//
-def int_lifetime_start : DefaultAttrsIntrinsic<[],
- [llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly,
- NoCapture<ArgIndex<1>>,
- ImmArg<ArgIndex<0>>]>;
-def int_lifetime_end : DefaultAttrsIntrinsic<[],
- [llvm_i64_ty, llvm_anyptr_ty],
- [IntrArgMemOnly,
- NoCapture<ArgIndex<1>>,
- ImmArg<ArgIndex<0>>]>;
+def int_lifetime_start
+ : DefaultAttrsIntrinsic<[], [llvm_anyptr_ty],
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
+def int_lifetime_end
+ : DefaultAttrsIntrinsic<[], [llvm_anyptr_ty],
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_invariant_start : DefaultAttrsIntrinsic<[llvm_ptr_ty],
[llvm_i64_ty, llvm_anyptr_ty],
[IntrArgMemOnly,
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index ca6e212..6d53bf8 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -296,22 +296,22 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
def int_aarch64_neon_sqrdmlah : AdvSIMD_3IntArg_Intrinsic;
def int_aarch64_neon_sqrdmlsh : AdvSIMD_3IntArg_Intrinsic;
- // Vector Polynominal Multiply
- def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Long Multiply
- def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
- def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
- def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
-
- // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
- // it with a v16i8.
- def int_aarch64_neon_pmull64 :
- DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
-
- // Vector Extending Multiply
- def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
- let IntrProperties = [IntrNoMem, Commutative];
+ let IntrProperties = [IntrNoMem, Commutative] in {
+ // Vector Polynominal Multiply
+ def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Long Multiply
+ def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+
+ // 64-bit polynomial multiply really returns an i128, which is not legal.
+ // Fake it with a v16i8.
+ def int_aarch64_neon_pmull64
+ : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty]>;
+
+ // Vector Extending Multiply
+ def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic;
}
// Vector Saturating Doubling Long Multiply
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 967d166..1bcc442 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -137,6 +137,7 @@
def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
+def llvm_constant_ptr_ty: LLVMQualPointerType<4>; // (const)ptr
def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
def llvm_shared_cluster_ptr_ty : LLVMQualPointerType<7>; // (shared_cluster)ptr
@@ -2212,15 +2213,17 @@ def int_nvvm_cp_async_bulk_tensor_prefetch_tile_gather4_2d
// Intrinsics for Prefetch and Prefetchu
let IntrProperties = [IntrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>] in {
foreach level = ["L1", "L2"] in {
- def int_nvvm_prefetch_ # level : Intrinsic<[], [llvm_ptr_ty]>;
- def int_nvvm_prefetch_global_ # level : Intrinsic<[], [llvm_global_ptr_ty]>;
- def int_nvvm_prefetch_local_ # level : Intrinsic<[], [llvm_local_ptr_ty]>;
+ def int_nvvm_prefetch_ # level : DefaultAttrsIntrinsic<[], [llvm_ptr_ty]>;
+ def int_nvvm_prefetch_global_ # level : DefaultAttrsIntrinsic<[], [llvm_global_ptr_ty]>;
+ def int_nvvm_prefetch_local_ # level : DefaultAttrsIntrinsic<[], [llvm_local_ptr_ty]>;
}
+ def int_nvvm_prefetch_tensormap : DefaultAttrsIntrinsic<[], [llvm_anyptr_ty]>;
+
foreach eviction_priority = ["evict_normal", "evict_last"] in
- def int_nvvm_prefetch_global_L2_ # eviction_priority : Intrinsic<[], [llvm_global_ptr_ty]>;
+ def int_nvvm_prefetch_global_L2_ # eviction_priority : DefaultAttrsIntrinsic<[], [llvm_global_ptr_ty]>;
- def int_nvvm_prefetchu_L1 : Intrinsic<[], [llvm_ptr_ty]>;
+ def int_nvvm_prefetchu_L1 : DefaultAttrsIntrinsic<[], [llvm_ptr_ty]>;
}
// applypriority
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 99f975f..243100f 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1717,7 +1717,7 @@ let TargetPrefix = "riscv" in {
llvm_anyint_ty],
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
- // Input: (pointer, offset, mask, vl)
+ // Input: (pointer, stride, mask, vl)
def int_riscv_sseg # nf # _load_mask
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
!listsplat(LLVMMatchType<0>,
@@ -1736,6 +1736,17 @@ let TargetPrefix = "riscv" in {
[llvm_anyptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty]),
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
+
+ // Input: (<stored values>..., pointer, stride, mask, vl)
+ def int_riscv_sseg # nf # _store_mask
+ : DefaultAttrsIntrinsic<[],
+ !listconcat([llvm_anyvector_ty],
+ !listsplat(LLVMMatchType<0>,
+ !add(nf, -1)),
+ [llvm_anyptr_ty, llvm_anyint_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyint_ty]),
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
}
} // TargetPrefix = "riscv"
diff --git a/llvm/include/llvm/IR/Operator.h b/llvm/include/llvm/IR/Operator.h
index 8344eae..10816c0 100644
--- a/llvm/include/llvm/IR/Operator.h
+++ b/llvm/include/llvm/IR/Operator.h
@@ -595,6 +595,37 @@ struct OperandTraits<PtrToIntOperator>
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PtrToIntOperator, Value)
+class PtrToAddrOperator
+ : public ConcreteOperator<Operator, Instruction::PtrToAddr> {
+ friend class PtrToAddr;
+ friend class ConstantExpr;
+
+public:
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ Value *getPointerOperand() { return getOperand(0); }
+ const Value *getPointerOperand() const { return getOperand(0); }
+
+ static unsigned getPointerOperandIndex() {
+ return 0U; // get index for modifying correct operand
+ }
+
+ /// Method to return the pointer operand as a PointerType.
+ Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
+
+ /// Method to return the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return cast<PointerType>(getPointerOperandType())->getAddressSpace();
+ }
+};
+
+template <>
+struct OperandTraits<PtrToAddrOperator>
+ : public FixedNumOperandTraits<PtrToAddrOperator, 1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PtrToAddrOperator, Value)
+
class BitCastOperator
: public ConcreteOperator<Operator, Instruction::BitCast> {
friend class BitCastInst;
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td
index df472d4..eadf3ea 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.td
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.td
@@ -276,6 +276,7 @@ foreach FPTy = ["F32", "F64", "F128", "PPCF128"] in {
}
// Memory
+def MEMCMP : RuntimeLibcall;
def MEMCPY : RuntimeLibcall;
def MEMMOVE : RuntimeLibcall;
def MEMSET : RuntimeLibcall;
@@ -1990,12 +1991,14 @@ defset list<RuntimeLibcallImpl> PPCRuntimeLibcalls = {
}
defset list<RuntimeLibcallImpl> PPC64AIXCallList = {
+ def ___memcmp64 : RuntimeLibcallImpl<MEMCMP>;
def ___memmove64 : RuntimeLibcallImpl<MEMCPY>;
def ___memset64 : RuntimeLibcallImpl<MEMSET>;
def ___bzero64 : RuntimeLibcallImpl<BZERO>;
}
defset list<RuntimeLibcallImpl> PPC32AIXCallList = {
+ def ___memcmp : RuntimeLibcallImpl<MEMCMP>;
def ___memmove : RuntimeLibcallImpl<MEMMOVE>;
def ___memset : RuntimeLibcallImpl<MEMSET>;
def ___bzero : RuntimeLibcallImpl<BZERO>;
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index e976f47..a1762fc 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -534,6 +534,10 @@ private:
Align Alignment;
/// The section index in the assemblers section list.
unsigned Ordinal = 0;
+ // If not -1u, the first linker-relaxable fragment's order within the
+ // subsection. When present, the offset between two locations crossing this
+ // fragment may not be fully resolved.
+ unsigned FirstLinkerRelaxable = -1u;
/// Whether this section has had instructions emitted into it.
bool HasInstructions : 1;
@@ -543,10 +547,6 @@ private:
bool IsText : 1;
bool IsBss : 1;
- /// Whether the section contains linker-relaxable fragments. If true, the
- /// offset between two locations may not be fully resolved.
- bool LinkerRelaxable : 1;
-
MCFragment DummyFragment;
// Mapping from subsection number to fragment list. At layout time, the
@@ -601,8 +601,9 @@ public:
bool isRegistered() const { return IsRegistered; }
void setIsRegistered(bool Value) { IsRegistered = Value; }
- bool isLinkerRelaxable() const { return LinkerRelaxable; }
- void setLinkerRelaxable() { LinkerRelaxable = true; }
+ unsigned firstLinkerRelaxable() const { return FirstLinkerRelaxable; }
+ bool isLinkerRelaxable() const { return FirstLinkerRelaxable != -1u; }
+ void setFirstLinkerRelaxable(unsigned Order) { FirstLinkerRelaxable = Order; }
MCFragment &getDummyFragment() { return DummyFragment; }
diff --git a/llvm/include/llvm/ProfileData/InstrProfWriter.h b/llvm/include/llvm/ProfileData/InstrProfWriter.h
index f339fe2..1b24425 100644
--- a/llvm/include/llvm/ProfileData/InstrProfWriter.h
+++ b/llvm/include/llvm/ProfileData/InstrProfWriter.h
@@ -226,8 +226,6 @@ private:
void addRecord(StringRef Name, uint64_t Hash, InstrProfRecord &&I,
uint64_t Weight, function_ref<void(Error)> Warn);
bool shouldEncodeData(const ProfilingData &PD);
- /// Add \p Trace using reservoir sampling.
- void addTemporalProfileTrace(TemporalProfTraceTy Trace);
/// Add a memprof record for a function identified by its \p Id.
void addMemProfRecord(const GlobalValue::GUID Id,
diff --git a/llvm/include/llvm/SandboxIR/Instruction.h b/llvm/include/llvm/SandboxIR/Instruction.h
index 4e3ff19..e1c1ca0 100644
--- a/llvm/include/llvm/SandboxIR/Instruction.h
+++ b/llvm/include/llvm/SandboxIR/Instruction.h
@@ -2278,6 +2278,8 @@ class CastInst : public UnaryInstruction {
return Opcode::FPToSI;
case llvm::Instruction::FPExt:
return Opcode::FPExt;
+ case llvm::Instruction::PtrToAddr:
+ return Opcode::PtrToAddr;
case llvm::Instruction::PtrToInt:
return Opcode::PtrToInt;
case llvm::Instruction::IntToPtr:
@@ -2364,6 +2366,8 @@ class FPToUIInst final : public CastInstImpl<Instruction::Opcode::FPToUI> {};
class FPToSIInst final : public CastInstImpl<Instruction::Opcode::FPToSI> {};
class IntToPtrInst final : public CastInstImpl<Instruction::Opcode::IntToPtr> {
};
+class PtrToAddrInst final
+ : public CastInstImpl<Instruction::Opcode::PtrToAddr> {};
class PtrToIntInst final : public CastInstImpl<Instruction::Opcode::PtrToInt> {
};
class BitCastInst final : public CastInstImpl<Instruction::Opcode::BitCast> {};
diff --git a/llvm/include/llvm/SandboxIR/Values.def b/llvm/include/llvm/SandboxIR/Values.def
index a55abbd..72683e4 100644
--- a/llvm/include/llvm/SandboxIR/Values.def
+++ b/llvm/include/llvm/SandboxIR/Values.def
@@ -118,6 +118,7 @@ DEF_INSTR(Cast, OPCODES(\
OP(FPToUI) \
OP(FPToSI) \
OP(FPExt) \
+ OP(PtrToAddr) \
OP(PtrToInt) \
OP(IntToPtr) \
OP(SIToFP) \
diff --git a/llvm/include/llvm/Support/Atomic.h b/llvm/include/llvm/Support/Atomic.h
index a8445fd..c2d9ae2 100644
--- a/llvm/include/llvm/Support/Atomic.h
+++ b/llvm/include/llvm/Support/Atomic.h
@@ -17,6 +17,7 @@
#ifndef LLVM_SUPPORT_ATOMIC_H
#define LLVM_SUPPORT_ATOMIC_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
// Windows will at times define MemoryFence.
@@ -26,16 +27,15 @@
namespace llvm {
namespace sys {
- void MemoryFence();
+ LLVM_ABI void MemoryFence();
#ifdef _MSC_VER
- typedef long cas_flag;
+ typedef long cas_flag;
#else
- typedef uint32_t cas_flag;
+ typedef uint32_t cas_flag;
#endif
- cas_flag CompareAndSwap(volatile cas_flag* ptr,
- cas_flag new_value,
- cas_flag old_value);
+ LLVM_ABI cas_flag CompareAndSwap(volatile cas_flag *ptr, cas_flag new_value,
+ cas_flag old_value);
}
}
diff --git a/llvm/include/llvm/Support/DebugLog.h b/llvm/include/llvm/Support/DebugLog.h
index a331295..a94e578 100644
--- a/llvm/include/llvm/Support/DebugLog.h
+++ b/llvm/include/llvm/Support/DebugLog.h
@@ -56,6 +56,16 @@ namespace llvm {
DEBUGLOG_WITH_STREAM_AND_TYPE(llvm::dbgs(), LEVEL, DEBUG_TYPE)
#define LDBG_LOG_LEVEL_1() LDBG_LOG_LEVEL(1)
+// We want the filename without the full path. We are using the __FILE__ macro
+// and a constexpr function to strip the path prefix. We can avoid the frontend
+// repeated evaluation of __FILE__ by using the __FILE_NAME__ when defined
+// (gcc and clang do) which contains the file name already.
+#if defined(__FILE_NAME__)
+#define __LLVM_FILE_NAME__ __FILE_NAME__
+#else
+#define __LLVM_FILE_NAME__ ::llvm::impl::getShortFileName(__FILE__)
+#endif
+
#define DEBUGLOG_WITH_STREAM_TYPE_FILE_AND_LINE(STREAM, LEVEL, TYPE, FILE, \
LINE) \
for (bool _c = \
@@ -69,17 +79,8 @@ namespace llvm {
#define DEBUGLOG_WITH_STREAM_TYPE_AND_FILE(STREAM, LEVEL, TYPE, FILE) \
DEBUGLOG_WITH_STREAM_TYPE_FILE_AND_LINE(STREAM, LEVEL, TYPE, FILE, __LINE__)
-// When __SHORT_FILE__ is not defined, the File is the full path,
-// otherwise __SHORT_FILE__ is defined in CMake to provide the file name
-// without the path prefix.
-#if defined(__SHORT_FILE__)
#define DEBUGLOG_WITH_STREAM_AND_TYPE(STREAM, LEVEL, TYPE) \
- DEBUGLOG_WITH_STREAM_TYPE_AND_FILE(STREAM, LEVEL, TYPE, __SHORT_FILE__)
-#else
-#define DEBUGLOG_WITH_STREAM_AND_TYPE(STREAM, LEVEL, TYPE) \
- DEBUGLOG_WITH_STREAM_TYPE_AND_FILE(STREAM, LEVEL, TYPE, \
- ::llvm::impl::getShortFileName(__FILE__))
-#endif
+ DEBUGLOG_WITH_STREAM_TYPE_AND_FILE(STREAM, LEVEL, TYPE, __LLVM_FILE_NAME__)
namespace impl {
diff --git a/llvm/include/llvm/Support/GraphWriter.h b/llvm/include/llvm/Support/GraphWriter.h
index 39a4c0b..a8784ed 100644
--- a/llvm/include/llvm/Support/GraphWriter.h
+++ b/llvm/include/llvm/Support/GraphWriter.h
@@ -61,8 +61,8 @@ enum Name {
LLVM_ABI bool DisplayGraph(StringRef Filename, bool wait = true,
GraphProgram::Name program = GraphProgram::DOT);
-template<typename GraphType>
-class GraphWriter {
+template <typename GraphType, typename Derived> class GraphWriterBase {
+protected:
raw_ostream &O;
const GraphType &G;
bool RenderUsingHTML = false;
@@ -75,9 +75,15 @@ class GraphWriter {
DOTTraits DTraits;
static_assert(std::is_pointer_v<NodeRef>,
- "FIXME: Currently GraphWriter requires the NodeRef type to be "
- "a pointer.\nThe pointer usage should be moved to "
- "DOTGraphTraits, and removed from GraphWriter itself.");
+ "FIXME: Currently GraphWriterBase requires the NodeRef type to "
+ "be a pointer.\nThe pointer usage should be moved to "
+ "DOTGraphTraits, and removed from GraphWriterBase itself.");
+
+ // Cast the 'this' pointer to the derived type and return a reference.
+ Derived &getDerived() { return *static_cast<Derived *>(this); }
+ const Derived &getDerived() const {
+ return *static_cast<const Derived *>(this);
+ }
// Writes the edge labels of the node to O and returns true if there are any
// edge labels not equal to the empty string "".
@@ -118,23 +124,24 @@ class GraphWriter {
}
public:
- GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
+ GraphWriterBase(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
DTraits = DOTTraits(SN);
RenderUsingHTML = DTraits.renderNodesUsingHTML();
}
+ virtual ~GraphWriterBase() {}
void writeGraph(const std::string &Title = "") {
// Output the header for the graph...
- writeHeader(Title);
+ getDerived().writeHeader(Title);
// Emit all of the nodes in the graph...
- writeNodes();
+ getDerived().writeNodes();
// Output any customizations on the graph
- DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, *this);
+ DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, getDerived());
// Output the end of the graph
- writeFooter();
+ getDerived().writeFooter();
}
void writeHeader(const std::string &Title) {
@@ -166,8 +173,8 @@ public:
void writeNodes() {
// Loop over the graph, printing it out...
for (const auto Node : nodes<GraphType>(G))
- if (!isNodeHidden(Node))
- writeNode(Node);
+ if (!getDerived().isNodeHidden(Node))
+ getDerived().writeNode(Node);
}
bool isNodeHidden(NodeRef Node) { return DTraits.isNodeHidden(Node, G); }
@@ -302,9 +309,9 @@ public:
if (DTraits.getEdgeSourceLabel(Node, EI).empty())
edgeidx = -1;
- emitEdge(static_cast<const void*>(Node), edgeidx,
- static_cast<const void*>(TargetNode), DestPort,
- DTraits.getEdgeAttributes(Node, EI, G));
+ getDerived().emitEdge(static_cast<const void *>(Node), edgeidx,
+ static_cast<const void *>(TargetNode), DestPort,
+ DTraits.getEdgeAttributes(Node, EI, G));
}
}
@@ -357,10 +364,17 @@ public:
}
};
-template<typename GraphType>
+template <typename GraphType>
+class GraphWriter : public GraphWriterBase<GraphType, GraphWriter<GraphType>> {
+public:
+ GraphWriter(raw_ostream &o, const GraphType &g, bool SN)
+ : GraphWriterBase<GraphType, GraphWriter<GraphType>>(o, g, SN) {}
+ ~GraphWriter() override {}
+};
+
+template <typename GraphType>
raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
- bool ShortNames = false,
- const Twine &Title = "") {
+ bool ShortNames = false, const Twine &Title = "") {
// Start the graph emission process...
GraphWriter<GraphType> W(O, G, ShortNames);
diff --git a/llvm/include/llvm/Target/TargetCallingConv.td b/llvm/include/llvm/Target/TargetCallingConv.td
index d0533ca..75d8ae4 100644
--- a/llvm/include/llvm/Target/TargetCallingConv.td
+++ b/llvm/include/llvm/Target/TargetCallingConv.td
@@ -97,6 +97,12 @@ class CCIfVarArg<CCAction A> : CCIf<"State.isVarArg()", A> {}
/// CCIfNotVarArg - If the current function is not vararg - apply the action
class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {}
+/// Apply the action if argument is fixed (not vararg).
+class CCIfArgFixed<CCAction A> : CCIf<"!ArgFlags.isVarArg()", A>;
+
+/// Apply the action if argument is vararg (not fixed).
+class CCIfArgVarArg<CCAction A> : CCIf<"ArgFlags.isVarArg()", A>;
+
/// CCIfPtrAddrSpace - If the top-level parent of the current argument has
/// pointer type in the specified address-space.
class CCIfPtrAddrSpace<int AS, CCAction A>
diff --git a/llvm/include/llvm/TextAPI/Architecture.def b/llvm/include/llvm/TextAPI/Architecture.def
index 5d310c7..53877cf 100644
--- a/llvm/include/llvm/TextAPI/Architecture.def
+++ b/llvm/include/llvm/TextAPI/Architecture.def
@@ -43,3 +43,8 @@ ARCHINFO(arm64e, arm64e, MachO::CPU_TYPE_ARM64, MachO::CPU_SUBTYPE_ARM64E, 64)
/// ARM64_32 architectures sorted by cpu sub type id
///
ARCHINFO(arm64_32, arm64_32, MachO::CPU_TYPE_ARM64_32, MachO::CPU_SUBTYPE_ARM64_32_V8, 32)
+
+///
+/// RISCV32 architectures sorted by cpu sub type id
+///
+ARCHINFO(riscv32, riscv32, MachO::CPU_TYPE_RISCV, MachO::CPU_SUBTYPE_RISCV_ALL, 32)
diff --git a/llvm/include/llvm/Transforms/Utils/Cloning.h b/llvm/include/llvm/Transforms/Utils/Cloning.h
index 6b56230..cfa06a5 100644
--- a/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -279,6 +279,9 @@ public:
/// `InlinedCalls` above is used.
SmallVector<CallBase *, 8> InlinedCallSites;
+ Value *ConvergenceControlToken = nullptr;
+ Instruction *CallSiteEHPad = nullptr;
+
/// Update profile for callee as well as cloned version. We need to do this
/// for regular inlining, but not for inlining from sample profile loader.
bool UpdateProfile;
@@ -287,9 +290,36 @@ public:
StaticAllocas.clear();
InlinedCalls.clear();
InlinedCallSites.clear();
+ ConvergenceControlToken = nullptr;
+ CallSiteEHPad = nullptr;
}
};
+/// Check if it is legal to perform inlining of the function called by \p CB
+/// into the caller at this particular use, and sets fields in \p IFI.
+///
+/// This does not consider whether it is possible for the function callee itself
+/// to be inlined; for that see isInlineViable.
+LLVM_ABI InlineResult CanInlineCallSite(const CallBase &CB,
+ InlineFunctionInfo &IFI);
+
+/// This should generally not be used, use InlineFunction instead.
+///
+/// Perform mechanical inlining of \p CB into the caller.
+///
+/// This does not perform any legality or profitability checks for the
+/// inlining. This assumes that CanInlineCallSite was already called, populated
+/// \p IFI, and returned InlineResult::success.
+///
+/// Also assumes that isInlineViable returned InlineResult::success for the
+/// called function.
+LLVM_ABI void InlineFunctionImpl(CallBase &CB, InlineFunctionInfo &IFI,
+ bool MergeAttributes = false,
+ AAResults *CalleeAAR = nullptr,
+ bool InsertLifetime = true,
+ Function *ForwardVarArgsTo = nullptr,
+ OptimizationRemarkEmitter *ORE = nullptr);
+
/// This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs
@@ -328,7 +358,8 @@ LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
bool MergeAttributes = false,
AAResults *CalleeAAR = nullptr,
bool InsertLifetime = true,
- Function *ForwardVarArgsTo = nullptr);
+ Function *ForwardVarArgsTo = nullptr,
+ OptimizationRemarkEmitter *ORE = nullptr);
/// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
/// Blocks.
diff --git a/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index e4d2f9d..723f6ae 100644
--- a/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -185,7 +185,8 @@ LLVM_ABI bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *,
TargetLibraryInfo *, Loop *, MemorySSAUpdater &,
ScalarEvolution *, ICFLoopSafetyInfo *,
SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *,
- bool, bool AllowSpeculation);
+ bool, bool AllowSpeculation,
+ bool HasCoroSuspendInst = false);
/// Return true if the induction variable \p IV in a Loop whose latch is
/// \p LatchBlock would become dead if the exit test \p Cond were removed.
diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp
index 076f417..bd0d417 100644
--- a/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/llvm/lib/Analysis/CaptureTracking.cpp
@@ -359,6 +359,12 @@ UseCaptureInfo llvm::DetermineUseCaptureKind(const Use &U, const Value *Base) {
case Instruction::AddrSpaceCast:
// The original value is not captured via this if the new value isn't.
return UseCaptureInfo::passthrough();
+ case Instruction::PtrToAddr:
+ // We treat ptrtoaddr as a location-independent capture of the address even
+ // if it is ultimately not used. Continuing recursive analysis after
+ // ptrtoaddr would be possible, but we'd need logic to do that correctly,
+ // which is not the same as the current pointer following logic.
+ return CaptureComponents::Address;
case Instruction::ICmp: {
unsigned Idx = U.getOperandNo();
unsigned OtherIdx = 1 - Idx;
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index dd98b62..c14cb9e 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1485,6 +1485,9 @@ Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
switch (Opcode) {
default:
llvm_unreachable("Missing case");
+ case Instruction::PtrToAddr:
+ // TODO: Add some of the ptrtoint folds here as well.
+ break;
case Instruction::PtrToInt:
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
Constant *FoldedValue = nullptr;
diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp
index 329bd35..761c566 100644
--- a/llvm/lib/Analysis/Delinearization.cpp
+++ b/llvm/lib/Analysis/Delinearization.cpp
@@ -24,6 +24,7 @@
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -32,6 +33,11 @@ using namespace llvm;
#define DL_NAME "delinearize"
#define DEBUG_TYPE DL_NAME
+static cl::opt<bool> UseFixedSizeArrayHeuristic(
+ "delinearize-use-fixed-size-array-heuristic", cl::init(false), cl::Hidden,
+ cl::desc("When printing analysis, use the heuristic for fixed-size arrays "
+ "if the default delinearizetion fails."));
+
// Return true when S contains at least an undef value.
static inline bool containsUndefs(const SCEV *S) {
return SCEVExprContains(S, [](const SCEV *S) {
@@ -480,6 +486,184 @@ void llvm::delinearize(ScalarEvolution &SE, const SCEV *Expr,
});
}
+static std::optional<APInt> tryIntoAPInt(const SCEV *S) {
+ if (const auto *Const = dyn_cast<SCEVConstant>(S))
+ return Const->getAPInt();
+ return std::nullopt;
+}
+
+/// Collects the absolute values of constant steps for all induction variables.
+/// Returns true if we can prove that all step recurrences are constants and \p
+/// Expr is divisible by \p ElementSize. Each step recurrence is stored in \p
+/// Steps after divided by \p ElementSize.
+static bool collectConstantAbsSteps(ScalarEvolution &SE, const SCEV *Expr,
+ SmallVectorImpl<uint64_t> &Steps,
+ uint64_t ElementSize) {
+ // End of recursion. The constant value also must be a multiple of
+ // ElementSize.
+ if (const auto *Const = dyn_cast<SCEVConstant>(Expr)) {
+ const uint64_t Mod = Const->getAPInt().urem(ElementSize);
+ return Mod == 0;
+ }
+
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Expr);
+ if (!AR || !AR->isAffine())
+ return false;
+
+ const SCEV *Step = AR->getStepRecurrence(SE);
+ std::optional<APInt> StepAPInt = tryIntoAPInt(Step);
+ if (!StepAPInt)
+ return false;
+
+ APInt Q;
+ uint64_t R;
+ APInt::udivrem(StepAPInt->abs(), ElementSize, Q, R);
+ if (R != 0)
+ return false;
+
+ // Bail out when the step is too large.
+ std::optional<uint64_t> StepVal = Q.tryZExtValue();
+ if (!StepVal)
+ return false;
+
+ Steps.push_back(*StepVal);
+ return collectConstantAbsSteps(SE, AR->getStart(), Steps, ElementSize);
+}
+
+bool llvm::findFixedSizeArrayDimensions(ScalarEvolution &SE, const SCEV *Expr,
+ SmallVectorImpl<uint64_t> &Sizes,
+ const SCEV *ElementSize) {
+ if (!ElementSize)
+ return false;
+
+ std::optional<APInt> ElementSizeAPInt = tryIntoAPInt(ElementSize);
+ if (!ElementSizeAPInt || *ElementSizeAPInt == 0)
+ return false;
+
+ std::optional<uint64_t> ElementSizeConst = ElementSizeAPInt->tryZExtValue();
+
+ // Early exit when ElementSize is not a positive constant.
+ if (!ElementSizeConst)
+ return false;
+
+ if (!collectConstantAbsSteps(SE, Expr, Sizes, *ElementSizeConst) ||
+ Sizes.empty()) {
+ Sizes.clear();
+ return false;
+ }
+
+ // At this point, Sizes contains the absolute step recurrences for all
+ // induction variables. Each step recurrence must be a multiple of the size of
+ // the array element. Assuming that the each value represents the size of an
+ // array for each dimension, attempts to restore the length of each dimension
+ // by dividing the step recurrence by the next smaller value. For example, if
+ // we have the following AddRec SCEV:
+ //
+ // AddRec: {{{0,+,2048}<%for.i>,+,256}<%for.j>,+,8}<%for.k> (ElementSize=8)
+ //
+ // Then Sizes will become [256, 32, 1] after sorted. We don't know the size of
+ // the outermost dimension, the next dimension will be computed as 256 / 32 =
+ // 8, and the last dimension will be computed as 32 / 1 = 32. Thus it results
+ // in like Arr[UnknownSize][8][32] with elements of size 8 bytes, where Arr is
+ // a base pointer.
+ //
+ // TODO: Catch more cases, e.g., when a step recurrence is not divisible by
+ // the next smaller one, like A[i][3*j].
+ llvm::sort(Sizes.rbegin(), Sizes.rend());
+ Sizes.erase(llvm::unique(Sizes), Sizes.end());
+
+ // The last element in Sizes should be ElementSize. At this point, all values
+ // in Sizes are assumed to be divided by ElementSize, so replace it with 1.
+ assert(Sizes.back() != 0 && "Unexpected zero size in Sizes.");
+ Sizes.back() = 1;
+
+ for (unsigned I = 0; I + 1 < Sizes.size(); I++) {
+ uint64_t PrevSize = Sizes[I + 1];
+ if (Sizes[I] % PrevSize) {
+ Sizes.clear();
+ return false;
+ }
+ Sizes[I] /= PrevSize;
+ }
+
+ // Finally, the last element in Sizes should be ElementSize.
+ Sizes.back() = *ElementSizeConst;
+ return true;
+}
+
+/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
+/// sizes of an array access, assuming that the array is a fixed size array.
+///
+/// E.g., if we have the code like as follows:
+///
+/// double A[42][8][32];
+/// for i
+/// for j
+/// for k
+/// use A[i][j][k]
+///
+/// The access function will be represented as an AddRec SCEV like:
+///
+/// AddRec: {{{0,+,2048}<%for.i>,+,256}<%for.j>,+,8}<%for.k> (ElementSize=8)
+///
+/// Then findFixedSizeArrayDimensions infers the size of each dimension of the
+/// array based on the fact that the value of the step recurrence is a multiple
+/// of the size of the corresponding array element. In the above example, it
+/// results in the following:
+///
+/// CHECK: ArrayDecl[UnknownSize][8][32] with elements of 8 bytes.
+///
+/// Finally each subscript will be computed as follows:
+///
+/// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
+///
+/// Note that this function doesn't check the range of possible values for each
+/// subscript, so the caller should perform additional boundary checks if
+/// necessary.
+///
+/// Also note that this function doesn't guarantee that the original array size
+/// is restored "correctly". For example, in the following case:
+///
+/// double A[42][4][64];
+/// double B[42][8][32];
+/// for i
+/// for j
+/// for k
+/// use A[i][j][k]
+/// use B[i][2*j][k]
+///
+/// The access function for both accesses will be the same:
+///
+/// AddRec: {{{0,+,2048}<%for.i>,+,512}<%for.j>,+,8}<%for.k> (ElementSize=8)
+///
+/// The array sizes for both A and B will be computed as
+/// ArrayDecl[UnknownSize][4][64], which matches for A, but not for B.
+///
+/// TODO: At the moment, this function can handle only simple cases. For
+/// example, we cannot handle a case where a step recurrence is not divisible
+/// by the next smaller step recurrence, e.g., A[i][3*j].
+bool llvm::delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) {
+
+ // First step: find the fixed array size.
+ SmallVector<uint64_t, 4> ConstSizes;
+ if (!findFixedSizeArrayDimensions(SE, Expr, ConstSizes, ElementSize)) {
+ Sizes.clear();
+ return false;
+ }
+
+ // Convert the constant size to SCEV.
+ for (uint64_t Size : ConstSizes)
+ Sizes.push_back(SE.getConstant(Expr->getType(), Size));
+
+ // Second step: compute the access functions for each subscript.
+ computeAccessFunctions(SE, Expr, Subscripts, Sizes);
+
+ return !Subscripts.empty();
+}
+
bool llvm::getIndexExpressionsFromGEP(ScalarEvolution &SE,
const GetElementPtrInst *GEP,
SmallVectorImpl<const SCEV *> &Subscripts,
@@ -586,9 +770,21 @@ void printDelinearization(raw_ostream &O, Function *F, LoopInfo *LI,
O << "AccessFunction: " << *AccessFn << "\n";
SmallVector<const SCEV *, 3> Subscripts, Sizes;
+
+ auto IsDelinearizationFailed = [&]() {
+ return Subscripts.size() == 0 || Sizes.size() == 0 ||
+ Subscripts.size() != Sizes.size();
+ };
+
delinearize(*SE, AccessFn, Subscripts, Sizes, SE->getElementSize(&Inst));
- if (Subscripts.size() == 0 || Sizes.size() == 0 ||
- Subscripts.size() != Sizes.size()) {
+ if (UseFixedSizeArrayHeuristic && IsDelinearizationFailed()) {
+ Subscripts.clear();
+ Sizes.clear();
+ delinearizeFixedSizeArray(*SE, AccessFn, Subscripts, Sizes,
+ SE->getElementSize(&Inst));
+ }
+
+ if (IsDelinearizationFailed()) {
O << "failed to delinearize\n";
continue;
}
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp
index f1473b2..835e270 100644
--- a/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -180,8 +180,8 @@ static void dumpExampleDependence(raw_ostream &OS, DependenceInfo *DA,
for (inst_iterator SrcI = inst_begin(F), SrcE = inst_end(F); SrcI != SrcE;
++SrcI) {
if (SrcI->mayReadOrWriteMemory()) {
- for (inst_iterator DstI = SrcI, DstE = inst_end(F);
- DstI != DstE; ++DstI) {
+ for (inst_iterator DstI = SrcI, DstE = inst_end(F); DstI != DstE;
+ ++DstI) {
if (DstI->mayReadOrWriteMemory()) {
OS << "Src:" << *SrcI << " --> Dst:" << *DstI << "\n";
OS << " da analyze - ";
@@ -203,7 +203,7 @@ static void dumpExampleDependence(raw_ostream &OS, DependenceInfo *DA,
// Normalize negative direction vectors if required by clients.
if (NormalizeResults && D->normalize(&SE))
- OS << "normalized - ";
+ OS << "normalized - ";
D->dump(OS);
for (unsigned Level = 1; Level <= D->getLevels(); Level++) {
if (D->isSplitable(Level)) {
@@ -227,8 +227,8 @@ static void dumpExampleDependence(raw_ostream &OS, DependenceInfo *DA,
void DependenceAnalysisWrapperPass::print(raw_ostream &OS,
const Module *) const {
- dumpExampleDependence(OS, info.get(),
- getAnalysis<ScalarEvolutionWrapperPass>().getSE(), false);
+ dumpExampleDependence(
+ OS, info.get(), getAnalysis<ScalarEvolutionWrapperPass>().getSE(), false);
}
PreservedAnalyses
@@ -249,33 +249,26 @@ bool Dependence::isInput() const {
return Src->mayReadFromMemory() && Dst->mayReadFromMemory();
}
-
// Returns true if this is an output dependence.
bool Dependence::isOutput() const {
return Src->mayWriteToMemory() && Dst->mayWriteToMemory();
}
-
// Returns true if this is an flow (aka true) dependence.
bool Dependence::isFlow() const {
return Src->mayWriteToMemory() && Dst->mayReadFromMemory();
}
-
// Returns true if this is an anti dependence.
bool Dependence::isAnti() const {
return Src->mayReadFromMemory() && Dst->mayWriteToMemory();
}
-
// Returns true if a particular level is scalar; that is,
// if no subscript in the source or destination mention the induction
// variable associated with the loop at this level.
// Leave this out of line, so it will serve as a virtual method anchor
-bool Dependence::isScalar(unsigned level) const {
- return false;
-}
-
+bool Dependence::isScalar(unsigned level) const { return false; }
//===----------------------------------------------------------------------===//
// FullDependence methods
@@ -338,8 +331,7 @@ bool FullDependence::normalize(ScalarEvolution *SE) {
DV[Level - 1].Direction = RevDirection;
// Reverse the dependence distance as well.
if (DV[Level - 1].Distance != nullptr)
- DV[Level - 1].Distance =
- SE->getNegativeSCEV(DV[Level - 1].Distance);
+ DV[Level - 1].Distance = SE->getNegativeSCEV(DV[Level - 1].Distance);
}
LLVM_DEBUG(dbgs() << "After normalizing negative direction vectors:\n";
@@ -355,14 +347,12 @@ unsigned FullDependence::getDirection(unsigned Level) const {
return DV[Level - 1].Direction;
}
-
// Returns the distance (or NULL) associated with a particular level.
const SCEV *FullDependence::getDistance(unsigned Level) const {
assert(0 < Level && Level <= Levels && "Level out of range");
return DV[Level - 1].Distance;
}
-
// Returns true if a particular level is scalar; that is,
// if no subscript in the source or destination mention the induction
// variable associated with the loop at this level.
@@ -371,7 +361,6 @@ bool FullDependence::isScalar(unsigned Level) const {
return DV[Level - 1].Scalar;
}
-
// Returns true if peeling the first iteration from this loop
// will break this dependence.
bool FullDependence::isPeelFirst(unsigned Level) const {
@@ -379,7 +368,6 @@ bool FullDependence::isPeelFirst(unsigned Level) const {
return DV[Level - 1].PeelFirst;
}
-
// Returns true if peeling the last iteration from this loop
// will break this dependence.
bool FullDependence::isPeelLast(unsigned Level) const {
@@ -387,14 +375,12 @@ bool FullDependence::isPeelLast(unsigned Level) const {
return DV[Level - 1].PeelLast;
}
-
// Returns true if splitting this loop will break the dependence.
bool FullDependence::isSplitable(unsigned Level) const {
assert(0 < Level && Level <= Levels && "Level out of range");
return DV[Level - 1].Splitable;
}
-
//===----------------------------------------------------------------------===//
// DependenceInfo::Constraint methods
@@ -405,7 +391,6 @@ const SCEV *DependenceInfo::Constraint::getX() const {
return A;
}
-
// If constraint is a point <X, Y>, returns Y.
// Otherwise assert.
const SCEV *DependenceInfo::Constraint::getY() const {
@@ -413,7 +398,6 @@ const SCEV *DependenceInfo::Constraint::getY() const {
return B;
}
-
// If constraint is a line AX + BY = C, returns A.
// Otherwise assert.
const SCEV *DependenceInfo::Constraint::getA() const {
@@ -422,7 +406,6 @@ const SCEV *DependenceInfo::Constraint::getA() const {
return A;
}
-
// If constraint is a line AX + BY = C, returns B.
// Otherwise assert.
const SCEV *DependenceInfo::Constraint::getB() const {
@@ -431,7 +414,6 @@ const SCEV *DependenceInfo::Constraint::getB() const {
return B;
}
-
// If constraint is a line AX + BY = C, returns C.
// Otherwise assert.
const SCEV *DependenceInfo::Constraint::getC() const {
@@ -440,7 +422,6 @@ const SCEV *DependenceInfo::Constraint::getC() const {
return C;
}
-
// If constraint is a distance, returns D.
// Otherwise assert.
const SCEV *DependenceInfo::Constraint::getD() const {
@@ -448,7 +429,6 @@ const SCEV *DependenceInfo::Constraint::getD() const {
return SE->getNegativeSCEV(C);
}
-
// Returns the loop associated with this constraint.
const Loop *DependenceInfo::Constraint::getAssociatedLoop() const {
assert((Kind == Distance || Kind == Line || Kind == Point) &&
@@ -499,17 +479,16 @@ LLVM_DUMP_METHOD void DependenceInfo::Constraint::dump(raw_ostream &OS) const {
else if (isPoint())
OS << " Point is <" << *getX() << ", " << *getY() << ">\n";
else if (isDistance())
- OS << " Distance is " << *getD() <<
- " (" << *getA() << "*X + " << *getB() << "*Y = " << *getC() << ")\n";
+ OS << " Distance is " << *getD() << " (" << *getA() << "*X + " << *getB()
+ << "*Y = " << *getC() << ")\n";
else if (isLine())
- OS << " Line is " << *getA() << "*X + " <<
- *getB() << "*Y = " << *getC() << "\n";
+ OS << " Line is " << *getA() << "*X + " << *getB() << "*Y = " << *getC()
+ << "\n";
else
llvm_unreachable("unknown constraint type in Constraint::dump");
}
#endif
-
// Updates X with the intersection
// of the Constraints X and Y. Returns true if X has changed.
// Corresponds to Figure 4 from the paper
@@ -591,15 +570,14 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
const SCEV *A1B2 = SE->getMulExpr(X->getA(), Y->getB());
const SCEV *A2B1 = SE->getMulExpr(Y->getA(), X->getB());
const SCEVConstant *C1A2_C2A1 =
- dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1A2, C2A1));
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1A2, C2A1));
const SCEVConstant *C1B2_C2B1 =
- dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1B2, C2B1));
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1B2, C2B1));
const SCEVConstant *A1B2_A2B1 =
- dyn_cast<SCEVConstant>(SE->getMinusSCEV(A1B2, A2B1));
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(A1B2, A2B1));
const SCEVConstant *A2B1_A1B2 =
- dyn_cast<SCEVConstant>(SE->getMinusSCEV(A2B1, A1B2));
- if (!C1B2_C2B1 || !C1A2_C2A1 ||
- !A1B2_A2B1 || !A2B1_A1B2)
+ dyn_cast<SCEVConstant>(SE->getMinusSCEV(A2B1, A1B2));
+ if (!C1B2_C2B1 || !C1A2_C2A1 || !A1B2_A2B1 || !A2B1_A1B2)
return false;
APInt Xtop = C1B2_C2B1->getAPInt();
APInt Xbot = A1B2_A2B1->getAPInt();
@@ -626,8 +604,8 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
++DeltaSuccesses;
return true;
}
- if (const SCEVConstant *CUB =
- collectConstantUpperBound(X->getAssociatedLoop(), Prod1->getType())) {
+ if (const SCEVConstant *CUB = collectConstantUpperBound(
+ X->getAssociatedLoop(), Prod1->getType())) {
const APInt &UpperBound = CUB->getAPInt();
LLVM_DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n");
if (Xq.sgt(UpperBound) || Yq.sgt(UpperBound)) {
@@ -636,8 +614,7 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
return true;
}
}
- X->setPoint(SE->getConstant(Xq),
- SE->getConstant(Yq),
+ X->setPoint(SE->getConstant(Xq), SE->getConstant(Yq),
X->getAssociatedLoop());
++DeltaSuccesses;
return true;
@@ -667,7 +644,6 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
return false;
}
-
//===----------------------------------------------------------------------===//
// DependenceInfo methods
@@ -737,8 +713,7 @@ void Dependence::dump(raw_ostream &OS) const {
// tbaa, non-overlapping regions etc), then it is known there is no dependecy.
// Otherwise the underlying objects are checked to see if they point to
// different identifiable objects.
-static AliasResult underlyingObjectsAlias(AAResults *AA,
- const DataLayout &DL,
+static AliasResult underlyingObjectsAlias(AAResults *AA, const DataLayout &DL,
const MemoryLocation &LocA,
const MemoryLocation &LocB) {
// Check the original locations (minus size) for noalias, which can happen for
@@ -773,8 +748,7 @@ static AliasResult underlyingObjectsAlias(AAResults *AA,
// Returns true if the load or store can be analyzed. Atomic and volatile
// operations have properties which this analysis does not understand.
-static
-bool isLoadOrStore(const Instruction *I) {
+static bool isLoadOrStore(const Instruction *I) {
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return LI->isUnordered();
else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
@@ -782,7 +756,6 @@ bool isLoadOrStore(const Instruction *I) {
return false;
}
-
// Examines the loop nesting of the Src and Dst
// instructions and establishes their shared loops. Sets the variables
// CommonLevels, SrcLevels, and MaxLevels.
@@ -860,14 +833,12 @@ void DependenceInfo::establishNestingLevels(const Instruction *Src,
MaxLevels -= CommonLevels;
}
-
// Given one of the loops containing the source, return
// its level index in our numbering scheme.
unsigned DependenceInfo::mapSrcLoop(const Loop *SrcLoop) const {
return SrcLoop->getLoopDepth();
}
-
// Given one of the loops containing the destination,
// return its level index in our numbering scheme.
unsigned DependenceInfo::mapDstLoop(const Loop *DstLoop) const {
@@ -880,7 +851,6 @@ unsigned DependenceInfo::mapDstLoop(const Loop *DstLoop) const {
return D;
}
-
// Returns true if Expression is loop invariant in LoopNest.
bool DependenceInfo::isLoopInvariant(const SCEV *Expression,
const Loop *LoopNest) const {
@@ -896,8 +866,6 @@ bool DependenceInfo::isLoopInvariant(const SCEV *Expression,
return SE->isLoopInvariant(Expression, LoopNest->getOutermostLoop());
}
-
-
// Finds the set of loops from the LoopNest that
// have a level <= CommonLevels and are referred to by the SCEV Expression.
void DependenceInfo::collectCommonLoops(const SCEV *Expression,
@@ -924,9 +892,9 @@ void DependenceInfo::unifySubscriptType(ArrayRef<Subscript *> Pairs) {
IntegerType *SrcTy = dyn_cast<IntegerType>(Src->getType());
IntegerType *DstTy = dyn_cast<IntegerType>(Dst->getType());
if (SrcTy == nullptr || DstTy == nullptr) {
- assert(SrcTy == DstTy && "This function only unify integer types and "
- "expect Src and Dst share the same type "
- "otherwise.");
+ assert(SrcTy == DstTy &&
+ "This function only unify integer types and "
+ "expect Src and Dst share the same type otherwise.");
continue;
}
if (SrcTy->getBitWidth() > widestWidthSeen) {
@@ -939,7 +907,6 @@ void DependenceInfo::unifySubscriptType(ArrayRef<Subscript *> Pairs) {
}
}
-
assert(widestWidthSeen > 0);
// Now extend each pair to the widest seen.
@@ -949,9 +916,9 @@ void DependenceInfo::unifySubscriptType(ArrayRef<Subscript *> Pairs) {
IntegerType *SrcTy = dyn_cast<IntegerType>(Src->getType());
IntegerType *DstTy = dyn_cast<IntegerType>(Dst->getType());
if (SrcTy == nullptr || DstTy == nullptr) {
- assert(SrcTy == DstTy && "This function only unify integer types and "
- "expect Src and Dst share the same type "
- "otherwise.");
+ assert(SrcTy == DstTy &&
+ "This function only unify integer types and "
+ "expect Src and Dst share the same type otherwise.");
continue;
}
if (SrcTy->getBitWidth() < widestWidthSeen)
@@ -1028,7 +995,6 @@ bool DependenceInfo::checkDstSubscript(const SCEV *Dst, const Loop *LoopNest,
return checkSubscript(Dst, LoopNest, Loops, false);
}
-
// Examines the subscript pair (the Src and Dst SCEVs)
// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
// Collects the associated loops in a set.
@@ -1049,14 +1015,12 @@ DependenceInfo::classifyPair(const SCEV *Src, const Loop *SrcLoopNest,
return Subscript::ZIV;
if (N == 1)
return Subscript::SIV;
- if (N == 2 && (SrcLoops.count() == 0 ||
- DstLoops.count() == 0 ||
+ if (N == 2 && (SrcLoops.count() == 0 || DstLoops.count() == 0 ||
(SrcLoops.count() == 1 && DstLoops.count() == 1)))
return Subscript::RDIV;
return Subscript::MIV;
}
-
// A wrapper around SCEV::isKnownPredicate.
// Looks for cases where we're interested in comparing for equality.
// If both X and Y have been identically sign or zero extended,
@@ -1069,12 +1033,9 @@ DependenceInfo::classifyPair(const SCEV *Src, const Loop *SrcLoopNest,
// involving symbolics.
bool DependenceInfo::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *X,
const SCEV *Y) const {
- if (Pred == CmpInst::ICMP_EQ ||
- Pred == CmpInst::ICMP_NE) {
- if ((isa<SCEVSignExtendExpr>(X) &&
- isa<SCEVSignExtendExpr>(Y)) ||
- (isa<SCEVZeroExtendExpr>(X) &&
- isa<SCEVZeroExtendExpr>(Y))) {
+ if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
+ if ((isa<SCEVSignExtendExpr>(X) && isa<SCEVSignExtendExpr>(Y)) ||
+ (isa<SCEVZeroExtendExpr>(X) && isa<SCEVZeroExtendExpr>(Y))) {
const SCEVIntegralCastExpr *CX = cast<SCEVIntegralCastExpr>(X);
const SCEVIntegralCastExpr *CY = cast<SCEVIntegralCastExpr>(Y);
const SCEV *Xop = CX->getOperand();
@@ -1111,7 +1072,10 @@ bool DependenceInfo::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *X,
}
}
-/// Compare to see if S is less than Size, using isKnownNegative(S - max(Size, 1))
+/// Compare to see if S is less than Size, using
+///
+/// isKnownNegative(S - Size)
+///
/// with some extra checking if S is an AddRec and we can prove less-than using
/// the loop bounds.
bool DependenceInfo::isKnownLessThan(const SCEV *S, const SCEV *Size) const {
@@ -1126,21 +1090,34 @@ bool DependenceInfo::isKnownLessThan(const SCEV *S, const SCEV *Size) const {
Size = SE->getTruncateOrZeroExtend(Size, MaxType);
// Special check for addrecs using BE taken count
- const SCEV *Bound = SE->getMinusSCEV(S, Size);
- if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Bound)) {
- if (AddRec->isAffine()) {
+ if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S))
+ if (AddRec->isAffine() && AddRec->hasNoSignedWrap()) {
const SCEV *BECount = SE->getBackedgeTakenCount(AddRec->getLoop());
- if (!isa<SCEVCouldNotCompute>(BECount)) {
- const SCEV *Limit = AddRec->evaluateAtIteration(BECount, *SE);
- if (SE->isKnownNegative(Limit))
- return true;
- }
+ const SCEV *Start = AddRec->getStart();
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE);
+ const SCEV *Diff0 = SE->getMinusSCEV(Start, Size);
+ const SCEV *Diff1 = SE->getMinusSCEV(End, Size);
+
+ // If the value of Step is non-negative and the AddRec is non-wrap, it
+ // reaches its maximum at the last iteration. So it's enouth to check
+ // whether End - Size is negative.
+ if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1))
+ return true;
+
+ // If the value of Step is non-positive and the AddRec is non-wrap, the
+ // initial value is its maximum.
+ if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0))
+ return true;
+
+ // Even if we don't know the sign of Step, either Start or End must be
+ // the maximum value of the AddRec since it is non-wrap.
+ if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1))
+ return true;
}
- }
// Check using normal isKnownNegative
- const SCEV *LimitedBound =
- SE->getMinusSCEV(S, SE->getSMaxExpr(Size, SE->getOne(Size->getType())));
+ const SCEV *LimitedBound = SE->getMinusSCEV(S, Size);
return SE->isKnownNegative(LimitedBound);
}
@@ -1178,7 +1155,6 @@ const SCEV *DependenceInfo::collectUpperBound(const Loop *L, Type *T) const {
return nullptr;
}
-
// Calls collectUpperBound(), then attempts to cast it to SCEVConstant.
// If the cast fails, returns NULL.
const SCEVConstant *DependenceInfo::collectConstantUpperBound(const Loop *L,
@@ -1188,7 +1164,6 @@ const SCEVConstant *DependenceInfo::collectConstantUpperBound(const Loop *L,
return nullptr;
}
-
// testZIV -
// When we have a pair of subscripts of the form [c1] and [c2],
// where c1 and c2 are both loop invariant, we attack it using
@@ -1218,7 +1193,6 @@ bool DependenceInfo::testZIV(const SCEV *Src, const SCEV *Dst,
return false; // possibly dependent
}
-
// strongSIVtest -
// From the paper, Practical Dependence Testing, Section 4.2.1
//
@@ -1270,9 +1244,9 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound);
LLVM_DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n");
const SCEV *AbsDelta =
- SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta);
+ SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta);
const SCEV *AbsCoeff =
- SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff);
+ SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff);
const SCEV *Product = SE->getMulExpr(UpperBound, AbsCoeff);
if (isKnownPredicate(CmpInst::ICMP_SGT, AbsDelta, Product)) {
// Distance greater than trip count - no dependence
@@ -1286,7 +1260,7 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
if (isa<SCEVConstant>(Delta) && isa<SCEVConstant>(Coeff)) {
APInt ConstDelta = cast<SCEVConstant>(Delta)->getAPInt();
APInt ConstCoeff = cast<SCEVConstant>(Coeff)->getAPInt();
- APInt Distance = ConstDelta; // these need to be initialized
+ APInt Distance = ConstDelta; // these need to be initialized
APInt Remainder = ConstDelta;
APInt::sdivrem(ConstDelta, ConstCoeff, Distance, Remainder);
LLVM_DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
@@ -1307,29 +1281,25 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
else
Result.DV[Level].Direction &= Dependence::DVEntry::EQ;
++StrongSIVsuccesses;
- }
- else if (Delta->isZero()) {
+ } else if (Delta->isZero()) {
// since 0/X == 0
Result.DV[Level].Distance = Delta;
NewConstraint.setDistance(Delta, CurLoop);
Result.DV[Level].Direction &= Dependence::DVEntry::EQ;
++StrongSIVsuccesses;
- }
- else {
+ } else {
if (Coeff->isOne()) {
LLVM_DEBUG(dbgs() << "\t Distance = " << *Delta << "\n");
Result.DV[Level].Distance = Delta; // since X/1 == X
NewConstraint.setDistance(Delta, CurLoop);
- }
- else {
+ } else {
Result.Consistent = false;
- NewConstraint.setLine(Coeff,
- SE->getNegativeSCEV(Coeff),
+ NewConstraint.setLine(Coeff, SE->getNegativeSCEV(Coeff),
SE->getNegativeSCEV(Delta), CurLoop);
}
// maybe we can get a useful direction
- bool DeltaMaybeZero = !SE->isKnownNonZero(Delta);
+ bool DeltaMaybeZero = !SE->isKnownNonZero(Delta);
bool DeltaMaybePositive = !SE->isKnownNonPositive(Delta);
bool DeltaMaybeNegative = !SE->isKnownNonNegative(Delta);
bool CoeffMaybePositive = !SE->isKnownNonPositive(Coeff);
@@ -1353,7 +1323,6 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
return false;
}
-
// weakCrossingSIVtest -
// From the paper, Practical Dependence Testing, Section 4.2.2
//
@@ -1447,8 +1416,8 @@ bool DependenceInfo::weakCrossingSIVtest(
if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
const SCEV *ConstantTwo = SE->getConstant(UpperBound->getType(), 2);
- const SCEV *ML = SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound),
- ConstantTwo);
+ const SCEV *ML =
+ SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound), ConstantTwo);
LLVM_DEBUG(dbgs() << "\t ML = " << *ML << "\n");
if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, ML)) {
// Delta too big, no dependence
@@ -1498,7 +1467,6 @@ bool DependenceInfo::weakCrossingSIVtest(
return false;
}
-
// Kirch's algorithm, from
//
// Optimizing Supercompilers for Supercomputers
@@ -1519,9 +1487,11 @@ static bool findGCD(unsigned Bits, const APInt &AM, const APInt &BM,
APInt R = G0;
APInt::sdivrem(G0, G1, Q, R);
while (R != 0) {
+ // clang-format off
APInt A2 = A0 - Q*A1; A0 = A1; A1 = A2;
APInt B2 = B0 - Q*B1; B0 = B1; B1 = B2;
G0 = G1; G1 = R;
+ // clang-format on
APInt::sdivrem(G0, G1, Q, R);
}
G = G1;
@@ -1543,8 +1513,7 @@ static APInt floorOfQuotient(const APInt &A, const APInt &B) {
APInt::sdivrem(A, B, Q, R);
if (R == 0)
return Q;
- if ((A.sgt(0) && B.sgt(0)) ||
- (A.slt(0) && B.slt(0)))
+ if ((A.sgt(0) && B.sgt(0)) || (A.slt(0) && B.slt(0)))
return Q;
else
return Q - 1;
@@ -1556,8 +1525,7 @@ static APInt ceilingOfQuotient(const APInt &A, const APInt &B) {
APInt::sdivrem(A, B, Q, R);
if (R == 0)
return Q;
- if ((A.sgt(0) && B.sgt(0)) ||
- (A.slt(0) && B.slt(0)))
+ if ((A.sgt(0) && B.sgt(0)) || (A.slt(0) && B.slt(0)))
return Q + 1;
else
return Q;
@@ -1733,17 +1701,14 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
return Result.DV[Level].Direction == Dependence::DVEntry::NONE;
}
-
// Return true if the divisor evenly divides the dividend.
-static
-bool isRemainderZero(const SCEVConstant *Dividend,
- const SCEVConstant *Divisor) {
+static bool isRemainderZero(const SCEVConstant *Dividend,
+ const SCEVConstant *Divisor) {
const APInt &ConstDividend = Dividend->getAPInt();
const APInt &ConstDivisor = Divisor->getAPInt();
return ConstDividend.srem(ConstDivisor) == 0;
}
-
// weakZeroSrcSIVtest -
// From the paper, Practical Dependence Testing, Section 4.2.2
//
@@ -1807,11 +1772,11 @@ bool DependenceInfo::weakZeroSrcSIVtest(const SCEV *DstCoeff,
const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
if (!ConstCoeff)
return false;
- const SCEV *AbsCoeff =
- SE->isKnownNegative(ConstCoeff) ?
- SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
+ const SCEV *AbsCoeff = SE->isKnownNegative(ConstCoeff)
+ ? SE->getNegativeSCEV(ConstCoeff)
+ : ConstCoeff;
const SCEV *NewDelta =
- SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+ SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
// check that Delta/SrcCoeff < iteration count
// really check NewDelta < count*AbsCoeff
@@ -1853,7 +1818,6 @@ bool DependenceInfo::weakZeroSrcSIVtest(const SCEV *DstCoeff,
return false;
}
-
// weakZeroDstSIVtest -
// From the paper, Practical Dependence Testing, Section 4.2.2
//
@@ -1916,11 +1880,11 @@ bool DependenceInfo::weakZeroDstSIVtest(const SCEV *SrcCoeff,
const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
if (!ConstCoeff)
return false;
- const SCEV *AbsCoeff =
- SE->isKnownNegative(ConstCoeff) ?
- SE->getNegativeSCEV(ConstCoeff) : ConstCoeff;
+ const SCEV *AbsCoeff = SE->isKnownNegative(ConstCoeff)
+ ? SE->getNegativeSCEV(ConstCoeff)
+ : ConstCoeff;
const SCEV *NewDelta =
- SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
+ SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta;
// check that Delta/SrcCoeff < iteration count
// really check NewDelta < count*AbsCoeff
@@ -1962,7 +1926,6 @@ bool DependenceInfo::weakZeroDstSIVtest(const SCEV *SrcCoeff,
return false;
}
-
// exactRDIVtest - Tests the RDIV subscript pair for dependence.
// Things of the form [c1 + a*i] and [c2 + b*j],
// where i and j are induction variable, c1 and c2 are loop invariant,
@@ -2084,7 +2047,6 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
return TL.sgt(TU);
}
-
// symbolicRDIVtest -
// In Section 4.5 of the Practical Dependence Testing paper,the authors
// introduce a special case of Banerjee's Inequalities (also called the
@@ -2167,8 +2129,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
return true;
}
}
- }
- else if (SE->isKnownNonPositive(A2)) {
+ } else if (SE->isKnownNonPositive(A2)) {
// a1 >= 0 && a2 <= 0
if (N1 && N2) {
// make sure that c2 - c1 <= a1*N1 - a2*N2
@@ -2187,8 +2148,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
return true;
}
}
- }
- else if (SE->isKnownNonPositive(A1)) {
+ } else if (SE->isKnownNonPositive(A1)) {
if (SE->isKnownNonNegative(A2)) {
// a1 <= 0 && a2 >= 0
if (N1 && N2) {
@@ -2207,8 +2167,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
++SymbolicRDIVindependence;
return true;
}
- }
- else if (SE->isKnownNonPositive(A2)) {
+ } else if (SE->isKnownNonPositive(A2)) {
// a1 <= 0 && a2 <= 0
if (N1) {
// make sure that a1*N1 <= c2 - c1
@@ -2233,7 +2192,6 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
return false;
}
-
// testSIV -
// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 - a2*i]
// where i is an induction variable, c1 and c2 are loop invariant, and a1 and
@@ -2260,17 +2218,17 @@ bool DependenceInfo::testSIV(const SCEV *Src, const SCEV *Dst, unsigned &Level,
Level = mapSrcLoop(CurLoop);
bool disproven;
if (SrcCoeff == DstCoeff)
- disproven = strongSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
- Level, Result, NewConstraint);
+ disproven = strongSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, Level,
+ Result, NewConstraint);
else if (SrcCoeff == SE->getNegativeSCEV(DstCoeff))
disproven = weakCrossingSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
Level, Result, NewConstraint, SplitIter);
else
disproven = exactSIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop,
Level, Result, NewConstraint);
- return disproven ||
- gcdMIVtest(Src, Dst, Result) ||
- symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, CurLoop);
+ return disproven || gcdMIVtest(Src, Dst, Result) ||
+ symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop,
+ CurLoop);
}
if (SrcAddRec) {
const SCEV *SrcConst = SrcAddRec->getStart();
@@ -2278,9 +2236,9 @@ bool DependenceInfo::testSIV(const SCEV *Src, const SCEV *Dst, unsigned &Level,
const SCEV *DstConst = Dst;
const Loop *CurLoop = SrcAddRec->getLoop();
Level = mapSrcLoop(CurLoop);
- return weakZeroDstSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop,
- Level, Result, NewConstraint) ||
- gcdMIVtest(Src, Dst, Result);
+ return weakZeroDstSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, Level,
+ Result, NewConstraint) ||
+ gcdMIVtest(Src, Dst, Result);
}
if (DstAddRec) {
const SCEV *DstConst = DstAddRec->getStart();
@@ -2288,15 +2246,14 @@ bool DependenceInfo::testSIV(const SCEV *Src, const SCEV *Dst, unsigned &Level,
const SCEV *SrcConst = Src;
const Loop *CurLoop = DstAddRec->getLoop();
Level = mapDstLoop(CurLoop);
- return weakZeroSrcSIVtest(DstCoeff, SrcConst, DstConst,
- CurLoop, Level, Result, NewConstraint) ||
- gcdMIVtest(Src, Dst, Result);
+ return weakZeroSrcSIVtest(DstCoeff, SrcConst, DstConst, CurLoop, Level,
+ Result, NewConstraint) ||
+ gcdMIVtest(Src, Dst, Result);
}
llvm_unreachable("SIV test expected at least one AddRec");
return false;
}
-
// testRDIV -
// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j]
// where i and j are induction variables, c1 and c2 are loop invariant,
@@ -2333,46 +2290,37 @@ bool DependenceInfo::testRDIV(const SCEV *Src, const SCEV *Dst,
DstConst = DstAddRec->getStart();
DstCoeff = DstAddRec->getStepRecurrence(*SE);
DstLoop = DstAddRec->getLoop();
- }
- else if (SrcAddRec) {
+ } else if (SrcAddRec) {
if (const SCEVAddRecExpr *tmpAddRec =
- dyn_cast<SCEVAddRecExpr>(SrcAddRec->getStart())) {
+ dyn_cast<SCEVAddRecExpr>(SrcAddRec->getStart())) {
SrcConst = tmpAddRec->getStart();
SrcCoeff = tmpAddRec->getStepRecurrence(*SE);
SrcLoop = tmpAddRec->getLoop();
DstConst = Dst;
DstCoeff = SE->getNegativeSCEV(SrcAddRec->getStepRecurrence(*SE));
DstLoop = SrcAddRec->getLoop();
- }
- else
+ } else
llvm_unreachable("RDIV reached by surprising SCEVs");
- }
- else if (DstAddRec) {
+ } else if (DstAddRec) {
if (const SCEVAddRecExpr *tmpAddRec =
- dyn_cast<SCEVAddRecExpr>(DstAddRec->getStart())) {
+ dyn_cast<SCEVAddRecExpr>(DstAddRec->getStart())) {
DstConst = tmpAddRec->getStart();
DstCoeff = tmpAddRec->getStepRecurrence(*SE);
DstLoop = tmpAddRec->getLoop();
SrcConst = Src;
SrcCoeff = SE->getNegativeSCEV(DstAddRec->getStepRecurrence(*SE));
SrcLoop = DstAddRec->getLoop();
- }
- else
+ } else
llvm_unreachable("RDIV reached by surprising SCEVs");
- }
- else
+ } else
llvm_unreachable("RDIV expected at least one AddRec");
- return exactRDIVtest(SrcCoeff, DstCoeff,
- SrcConst, DstConst,
- SrcLoop, DstLoop,
+ return exactRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, SrcLoop, DstLoop,
Result) ||
- gcdMIVtest(Src, Dst, Result) ||
- symbolicRDIVtest(SrcCoeff, DstCoeff,
- SrcConst, DstConst,
- SrcLoop, DstLoop);
+ gcdMIVtest(Src, Dst, Result) ||
+ symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, SrcLoop,
+ DstLoop);
}
-
// Tests the single-subscript MIV pair (Src and Dst) for dependence.
// Return true if dependence disproved.
// Can sometimes refine direction vectors.
@@ -2383,7 +2331,7 @@ bool DependenceInfo::testMIV(const SCEV *Src, const SCEV *Dst,
LLVM_DEBUG(dbgs() << " dst = " << *Dst << "\n");
Result.Consistent = false;
return gcdMIVtest(Src, Dst, Result) ||
- banerjeeMIVtest(Src, Dst, Loops, Result);
+ banerjeeMIVtest(Src, Dst, Loops, Result);
}
// Given a product, e.g., 10*X*Y, returns the first constant operand,
@@ -2428,7 +2376,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
// we can't quit the loop just because the GCD == 1.
const SCEV *Coefficients = Src;
while (const SCEVAddRecExpr *AddRec =
- dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
// If the coefficient is the product of a constant and other stuff,
// we can use the constant in the GCD computation.
@@ -2446,7 +2394,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
// we can't quit the loop just because the GCD == 1.
Coefficients = Dst;
while (const SCEVAddRecExpr *AddRec =
- dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
// If the coefficient is the product of a constant and other stuff,
// we can use the constant in the GCD computation.
@@ -2468,16 +2416,14 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
if (isa<SCEVConstant>(Operand)) {
assert(!Constant && "Surprised to find multiple constants");
Constant = cast<SCEVConstant>(Operand);
- }
- else if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Operand)) {
+ } else if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Operand)) {
// Search for constant operand to participate in GCD;
// If none found; return false.
std::optional<APInt> ConstOp = getConstantPart(Product);
if (!ConstOp)
return false;
ExtraGCD = APIntOps::GreatestCommonDivisor(ExtraGCD, ConstOp->abs());
- }
- else
+ } else
return false;
}
}
@@ -2512,7 +2458,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
bool Improved = false;
Coefficients = Src;
while (const SCEVAddRecExpr *AddRec =
- dyn_cast<SCEVAddRecExpr>(Coefficients)) {
+ dyn_cast<SCEVAddRecExpr>(Coefficients)) {
Coefficients = AddRec->getStart();
const Loop *CurLoop = AddRec->getLoop();
RunningGCD = ExtraGCD;
@@ -2578,7 +2524,6 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
return false;
}
-
//===----------------------------------------------------------------------===//
// banerjeeMIVtest -
// Use Banerjee's Inequalities to test an MIV subscript pair.
@@ -2652,8 +2597,8 @@ bool DependenceInfo::banerjeeMIVtest(const SCEV *Src, const SCEV *Dst,
if (testBounds(Dependence::DVEntry::ALL, 0, Bound, Delta)) {
// Explore the direction vector hierarchy.
unsigned DepthExpanded = 0;
- unsigned NewDeps = exploreDirections(1, A, B, Bound,
- Loops, DepthExpanded, Delta);
+ unsigned NewDeps =
+ exploreDirections(1, A, B, Bound, Loops, DepthExpanded, Delta);
if (NewDeps > 0) {
bool Improved = false;
for (unsigned K = 1; K <= CommonLevels; ++K) {
@@ -2670,23 +2615,20 @@ bool DependenceInfo::banerjeeMIVtest(const SCEV *Src, const SCEV *Dst,
}
if (Improved)
++BanerjeeSuccesses;
- }
- else {
+ } else {
++BanerjeeIndependence;
Disproved = true;
}
- }
- else {
+ } else {
++BanerjeeIndependence;
Disproved = true;
}
- delete [] Bound;
- delete [] A;
- delete [] B;
+ delete[] Bound;
+ delete[] A;
+ delete[] B;
return Disproved;
}
-
// Hierarchically expands the direction vector
// search space, combining the directions of discovered dependences
// in the DirSet field of Bound. Returns the number of distinct
@@ -2788,27 +2730,26 @@ unsigned DependenceInfo::exploreDirections(unsigned Level, CoefficientInfo *A,
// test bounds for <, *, *, ...
if (testBounds(Dependence::DVEntry::LT, Level, Bound, Delta))
- NewDeps += exploreDirections(Level + 1, A, B, Bound,
- Loops, DepthExpanded, Delta);
+ NewDeps += exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded,
+ Delta);
// Test bounds for =, *, *, ...
if (testBounds(Dependence::DVEntry::EQ, Level, Bound, Delta))
- NewDeps += exploreDirections(Level + 1, A, B, Bound,
- Loops, DepthExpanded, Delta);
+ NewDeps += exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded,
+ Delta);
// test bounds for >, *, *, ...
if (testBounds(Dependence::DVEntry::GT, Level, Bound, Delta))
- NewDeps += exploreDirections(Level + 1, A, B, Bound,
- Loops, DepthExpanded, Delta);
+ NewDeps += exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded,
+ Delta);
Bound[Level].Direction = Dependence::DVEntry::ALL;
return NewDeps;
- }
- else
- return exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta);
+ } else
+ return exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded,
+ Delta);
}
-
// Returns true iff the current bounds are plausible.
bool DependenceInfo::testBounds(unsigned char DirKind, unsigned Level,
BoundInfo *Bound, const SCEV *Delta) const {
@@ -2822,7 +2763,6 @@ bool DependenceInfo::testBounds(unsigned char DirKind, unsigned Level,
return true;
}
-
// Computes the upper and lower bounds for level K
// using the * direction. Records them in Bound.
// Wolfe gives the equations
@@ -2840,17 +2780,16 @@ bool DependenceInfo::testBounds(unsigned char DirKind, unsigned Level,
// and the upper bound is always >= 0.
void DependenceInfo::findBoundsALL(CoefficientInfo *A, CoefficientInfo *B,
BoundInfo *Bound, unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::ALL] = nullptr; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::ALL] = nullptr; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::ALL] =
+ nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::ALL] =
+ nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
- Bound[K].Lower[Dependence::DVEntry::ALL] =
- SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart),
- Bound[K].Iterations);
- Bound[K].Upper[Dependence::DVEntry::ALL] =
- SE->getMulExpr(SE->getMinusSCEV(A[K].PosPart, B[K].NegPart),
- Bound[K].Iterations);
- }
- else {
+ Bound[K].Lower[Dependence::DVEntry::ALL] = SE->getMulExpr(
+ SE->getMinusSCEV(A[K].NegPart, B[K].PosPart), Bound[K].Iterations);
+ Bound[K].Upper[Dependence::DVEntry::ALL] = SE->getMulExpr(
+ SE->getMinusSCEV(A[K].PosPart, B[K].NegPart), Bound[K].Iterations);
+ } else {
// If the difference is 0, we won't need to know the number of iterations.
if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].NegPart, B[K].PosPart))
Bound[K].Lower[Dependence::DVEntry::ALL] =
@@ -2861,7 +2800,6 @@ void DependenceInfo::findBoundsALL(CoefficientInfo *A, CoefficientInfo *B,
}
}
-
// Computes the upper and lower bounds for level K
// using the = direction. Records them in Bound.
// Wolfe gives the equations
@@ -2879,18 +2817,19 @@ void DependenceInfo::findBoundsALL(CoefficientInfo *A, CoefficientInfo *B,
// and the upper bound is always >= 0.
void DependenceInfo::findBoundsEQ(CoefficientInfo *A, CoefficientInfo *B,
BoundInfo *Bound, unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::EQ] = nullptr; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::EQ] = nullptr; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::EQ] =
+ nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::EQ] =
+ nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
const SCEV *NegativePart = getNegativePart(Delta);
Bound[K].Lower[Dependence::DVEntry::EQ] =
- SE->getMulExpr(NegativePart, Bound[K].Iterations);
+ SE->getMulExpr(NegativePart, Bound[K].Iterations);
const SCEV *PositivePart = getPositivePart(Delta);
Bound[K].Upper[Dependence::DVEntry::EQ] =
- SE->getMulExpr(PositivePart, Bound[K].Iterations);
- }
- else {
+ SE->getMulExpr(PositivePart, Bound[K].Iterations);
+ } else {
// If the positive/negative part of the difference is 0,
// we won't need to know the number of iterations.
const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
@@ -2903,7 +2842,6 @@ void DependenceInfo::findBoundsEQ(CoefficientInfo *A, CoefficientInfo *B,
}
}
-
// Computes the upper and lower bounds for level K
// using the < direction. Records them in Bound.
// Wolfe gives the equations
@@ -2919,35 +2857,35 @@ void DependenceInfo::findBoundsEQ(CoefficientInfo *A, CoefficientInfo *B,
// We must be careful to handle the case where the upper bound is unknown.
void DependenceInfo::findBoundsLT(CoefficientInfo *A, CoefficientInfo *B,
BoundInfo *Bound, unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::LT] = nullptr; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::LT] = nullptr; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::LT] =
+ nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::LT] =
+ nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Iter_1 = SE->getMinusSCEV(
Bound[K].Iterations, SE->getOne(Bound[K].Iterations->getType()));
const SCEV *NegPart =
- getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
+ getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
Bound[K].Lower[Dependence::DVEntry::LT] =
- SE->getMinusSCEV(SE->getMulExpr(NegPart, Iter_1), B[K].Coeff);
+ SE->getMinusSCEV(SE->getMulExpr(NegPart, Iter_1), B[K].Coeff);
const SCEV *PosPart =
- getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
+ getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
Bound[K].Upper[Dependence::DVEntry::LT] =
- SE->getMinusSCEV(SE->getMulExpr(PosPart, Iter_1), B[K].Coeff);
- }
- else {
+ SE->getMinusSCEV(SE->getMulExpr(PosPart, Iter_1), B[K].Coeff);
+ } else {
// If the positive/negative part of the difference is 0,
// we won't need to know the number of iterations.
const SCEV *NegPart =
- getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
+ getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff));
if (NegPart->isZero())
Bound[K].Lower[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff);
const SCEV *PosPart =
- getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
+ getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff));
if (PosPart->isZero())
Bound[K].Upper[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff);
}
}
-
// Computes the upper and lower bounds for level K
// using the > direction. Records them in Bound.
// Wolfe gives the equations
@@ -2963,45 +2901,45 @@ void DependenceInfo::findBoundsLT(CoefficientInfo *A, CoefficientInfo *B,
// We must be careful to handle the case where the upper bound is unknown.
void DependenceInfo::findBoundsGT(CoefficientInfo *A, CoefficientInfo *B,
BoundInfo *Bound, unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::GT] = nullptr; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::GT] = nullptr; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::GT] =
+ nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::GT] =
+ nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Iter_1 = SE->getMinusSCEV(
Bound[K].Iterations, SE->getOne(Bound[K].Iterations->getType()));
const SCEV *NegPart =
- getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
+ getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
Bound[K].Lower[Dependence::DVEntry::GT] =
- SE->getAddExpr(SE->getMulExpr(NegPart, Iter_1), A[K].Coeff);
+ SE->getAddExpr(SE->getMulExpr(NegPart, Iter_1), A[K].Coeff);
const SCEV *PosPart =
- getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
+ getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
Bound[K].Upper[Dependence::DVEntry::GT] =
- SE->getAddExpr(SE->getMulExpr(PosPart, Iter_1), A[K].Coeff);
- }
- else {
+ SE->getAddExpr(SE->getMulExpr(PosPart, Iter_1), A[K].Coeff);
+ } else {
// If the positive/negative part of the difference is 0,
// we won't need to know the number of iterations.
- const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
+ const SCEV *NegPart =
+ getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart));
if (NegPart->isZero())
Bound[K].Lower[Dependence::DVEntry::GT] = A[K].Coeff;
- const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
+ const SCEV *PosPart =
+ getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart));
if (PosPart->isZero())
Bound[K].Upper[Dependence::DVEntry::GT] = A[K].Coeff;
}
}
-
// X^+ = max(X, 0)
const SCEV *DependenceInfo::getPositivePart(const SCEV *X) const {
return SE->getSMaxExpr(X, SE->getZero(X->getType()));
}
-
// X^- = min(X, 0)
const SCEV *DependenceInfo::getNegativePart(const SCEV *X) const {
return SE->getSMinExpr(X, SE->getZero(X->getType()));
}
-
// Walks through the subscript,
// collecting each coefficient, the associated loop bounds,
// and recording its positive and negative parts for later use.
@@ -3046,7 +2984,6 @@ DependenceInfo::collectCoeffInfo(const SCEV *Subscript, bool SrcFlag,
return CI;
}
-
// Looks through all the bounds info and
// computes the lower bound given the current direction settings
// at each level. If the lower bound for any level is -inf,
@@ -3062,7 +2999,6 @@ const SCEV *DependenceInfo::getLowerBound(BoundInfo *Bound) const {
return Sum;
}
-
// Looks through all the bounds info and
// computes the upper bound given the current direction settings
// at each level. If the upper bound at any level is +inf,
@@ -3078,7 +3014,6 @@ const SCEV *DependenceInfo::getUpperBound(BoundInfo *Bound) const {
return Sum;
}
-
//===----------------------------------------------------------------------===//
// Constraint manipulation for Delta test.
@@ -3098,7 +3033,6 @@ const SCEV *DependenceInfo::findCoefficient(const SCEV *Expr,
return findCoefficient(AddRec->getStart(), TargetLoop);
}
-
// Given a linear SCEV,
// return the SCEV given by zeroing out the coefficient
// corresponding to the specified loop.
@@ -3112,12 +3046,10 @@ const SCEV *DependenceInfo::zeroCoefficient(const SCEV *Expr,
if (AddRec->getLoop() == TargetLoop)
return AddRec->getStart();
return SE->getAddRecExpr(zeroCoefficient(AddRec->getStart(), TargetLoop),
- AddRec->getStepRecurrence(*SE),
- AddRec->getLoop(),
+ AddRec->getStepRecurrence(*SE), AddRec->getLoop(),
AddRec->getNoWrapFlags());
}
-
// Given a linear SCEV Expr,
// return the SCEV given by adding some Value to the
// coefficient corresponding to the specified TargetLoop.
@@ -3128,17 +3060,13 @@ const SCEV *DependenceInfo::addToCoefficient(const SCEV *Expr,
const SCEV *Value) const {
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr);
if (!AddRec) // create a new addRec
- return SE->getAddRecExpr(Expr,
- Value,
- TargetLoop,
+ return SE->getAddRecExpr(Expr, Value, TargetLoop,
SCEV::FlagAnyWrap); // Worst case, with no info.
if (AddRec->getLoop() == TargetLoop) {
const SCEV *Sum = SE->getAddExpr(AddRec->getStepRecurrence(*SE), Value);
if (Sum->isZero())
return AddRec->getStart();
- return SE->getAddRecExpr(AddRec->getStart(),
- Sum,
- AddRec->getLoop(),
+ return SE->getAddRecExpr(AddRec->getStart(), Sum, AddRec->getLoop(),
AddRec->getNoWrapFlags());
}
if (SE->isLoopInvariant(AddRec, TargetLoop))
@@ -3149,7 +3077,6 @@ const SCEV *DependenceInfo::addToCoefficient(const SCEV *Expr,
AddRec->getNoWrapFlags());
}
-
// Review the constraints, looking for opportunities
// to simplify a subscript pair (Src and Dst).
// Return true if some simplification occurs.
@@ -3178,7 +3105,6 @@ bool DependenceInfo::propagate(const SCEV *&Src, const SCEV *&Dst,
return Result;
}
-
// Attempt to propagate a distance
// constraint into a subscript pair (Src and Dst).
// Return true if some simplification occurs.
@@ -3204,7 +3130,6 @@ bool DependenceInfo::propagateDistance(const SCEV *&Src, const SCEV *&Dst,
return true;
}
-
// Attempt to propagate a line
// constraint into a subscript pair (Src and Dst).
// Return true if some simplification occurs.
@@ -3224,22 +3149,22 @@ bool DependenceInfo::propagateLine(const SCEV *&Src, const SCEV *&Dst,
if (A->isZero()) {
const SCEVConstant *Bconst = dyn_cast<SCEVConstant>(B);
const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
- if (!Bconst || !Cconst) return false;
+ if (!Bconst || !Cconst)
+ return false;
APInt Beta = Bconst->getAPInt();
APInt Charlie = Cconst->getAPInt();
APInt CdivB = Charlie.sdiv(Beta);
assert(Charlie.srem(Beta) == 0 && "C should be evenly divisible by B");
const SCEV *AP_K = findCoefficient(Dst, CurLoop);
- // Src = SE->getAddExpr(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB)));
Src = SE->getMinusSCEV(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB)));
Dst = zeroCoefficient(Dst, CurLoop);
if (!findCoefficient(Src, CurLoop)->isZero())
Consistent = false;
- }
- else if (B->isZero()) {
+ } else if (B->isZero()) {
const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A);
const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
- if (!Aconst || !Cconst) return false;
+ if (!Aconst || !Cconst)
+ return false;
APInt Alpha = Aconst->getAPInt();
APInt Charlie = Cconst->getAPInt();
APInt CdivA = Charlie.sdiv(Alpha);
@@ -3249,11 +3174,11 @@ bool DependenceInfo::propagateLine(const SCEV *&Src, const SCEV *&Dst,
Src = zeroCoefficient(Src, CurLoop);
if (!findCoefficient(Dst, CurLoop)->isZero())
Consistent = false;
- }
- else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) {
+ } else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) {
const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A);
const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
- if (!Aconst || !Cconst) return false;
+ if (!Aconst || !Cconst)
+ return false;
APInt Alpha = Aconst->getAPInt();
APInt Charlie = Cconst->getAPInt();
APInt CdivA = Charlie.sdiv(Alpha);
@@ -3264,8 +3189,7 @@ bool DependenceInfo::propagateLine(const SCEV *&Src, const SCEV *&Dst,
Dst = addToCoefficient(Dst, CurLoop, A_K);
if (!findCoefficient(Dst, CurLoop)->isZero())
Consistent = false;
- }
- else {
+ } else {
// paper is incorrect here, or perhaps just misleading
const SCEV *A_K = findCoefficient(Src, CurLoop);
Src = SE->getMulExpr(Src, A);
@@ -3281,7 +3205,6 @@ bool DependenceInfo::propagateLine(const SCEV *&Src, const SCEV *&Dst,
return true;
}
-
// Attempt to propagate a point
// constraint into a subscript pair (Src and Dst).
// Return true if some simplification occurs.
@@ -3302,7 +3225,6 @@ bool DependenceInfo::propagatePoint(const SCEV *&Src, const SCEV *&Dst,
return true;
}
-
// Update direction vector entry based on the current constraint.
void DependenceInfo::updateDirection(Dependence::DVEntry &Level,
const Constraint &CurConstraint) const {
@@ -3322,34 +3244,28 @@ void DependenceInfo::updateDirection(Dependence::DVEntry &Level,
if (!SE->isKnownNonNegative(Level.Distance)) // if may be negative
NewDirection |= Dependence::DVEntry::GT;
Level.Direction &= NewDirection;
- }
- else if (CurConstraint.isLine()) {
+ } else if (CurConstraint.isLine()) {
Level.Scalar = false;
Level.Distance = nullptr;
// direction should be accurate
- }
- else if (CurConstraint.isPoint()) {
+ } else if (CurConstraint.isPoint()) {
Level.Scalar = false;
Level.Distance = nullptr;
unsigned NewDirection = Dependence::DVEntry::NONE;
- if (!isKnownPredicate(CmpInst::ICMP_NE,
- CurConstraint.getY(),
+ if (!isKnownPredicate(CmpInst::ICMP_NE, CurConstraint.getY(),
CurConstraint.getX()))
// if X may be = Y
NewDirection |= Dependence::DVEntry::EQ;
- if (!isKnownPredicate(CmpInst::ICMP_SLE,
- CurConstraint.getY(),
+ if (!isKnownPredicate(CmpInst::ICMP_SLE, CurConstraint.getY(),
CurConstraint.getX()))
// if Y may be > X
NewDirection |= Dependence::DVEntry::LT;
- if (!isKnownPredicate(CmpInst::ICMP_SGE,
- CurConstraint.getY(),
+ if (!isKnownPredicate(CmpInst::ICMP_SGE, CurConstraint.getY(),
CurConstraint.getX()))
// if Y may be < X
NewDirection |= Dependence::DVEntry::GT;
Level.Direction &= NewDirection;
- }
- else
+ } else
llvm_unreachable("constraint has unexpected kind");
}
@@ -3425,7 +3341,7 @@ bool DependenceInfo::tryDelinearizeFixedSize(
dyn_cast<SCEVUnknown>(SE->getPointerBase(DstAccessFn));
assert(SrcBase && DstBase && SrcBase == DstBase &&
"expected src and dst scev unknowns to be equal");
- });
+ });
SmallVector<int, 4> SrcSizes;
SmallVector<int, 4> DstSizes;
@@ -3737,9 +3653,8 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
Pair[P].Group.resize(Pairs);
removeMatchingExtensions(&Pair[P]);
Pair[P].Classification =
- classifyPair(Pair[P].Src, LI->getLoopFor(Src->getParent()),
- Pair[P].Dst, LI->getLoopFor(Dst->getParent()),
- Pair[P].Loops);
+ classifyPair(Pair[P].Src, LI->getLoopFor(Src->getParent()), Pair[P].Dst,
+ LI->getLoopFor(Dst->getParent()), Pair[P].Loops);
Pair[P].GroupLoops = Pair[P].Loops;
Pair[P].Group.set(P);
LLVM_DEBUG(dbgs() << " subscript " << P << "\n");
@@ -3814,18 +3729,15 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
if (Pair[SI].Classification == Subscript::NonLinear) {
// ignore these, but collect loops for later
++NonlinearSubscriptPairs;
- collectCommonLoops(Pair[SI].Src,
- LI->getLoopFor(Src->getParent()),
+ collectCommonLoops(Pair[SI].Src, LI->getLoopFor(Src->getParent()),
Pair[SI].Loops);
- collectCommonLoops(Pair[SI].Dst,
- LI->getLoopFor(Dst->getParent()),
+ collectCommonLoops(Pair[SI].Dst, LI->getLoopFor(Dst->getParent()),
Pair[SI].Loops);
Result.Consistent = false;
} else if (Pair[SI].Classification == Subscript::ZIV) {
// always separable
Separable.set(SI);
- }
- else {
+ } else {
// SIV, RDIV, or MIV, so check for coupled group
bool Done = true;
for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) {
@@ -3843,8 +3755,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
if (Pair[SI].Group.count() == 1) {
Separable.set(SI);
++SeparableSubscriptPairs;
- }
- else {
+ } else {
Coupled.set(SI);
++CoupledSubscriptPairs;
}
@@ -3950,10 +3861,9 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
Constraints, Result.Consistent)) {
LLVM_DEBUG(dbgs() << "\t Changed\n");
++DeltaPropagations;
- Pair[SJ].Classification =
- classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()),
- Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()),
- Pair[SJ].Loops);
+ Pair[SJ].Classification = classifyPair(
+ Pair[SJ].Src, LI->getLoopFor(Src->getParent()), Pair[SJ].Dst,
+ LI->getLoopFor(Dst->getParent()), Pair[SJ].Loops);
switch (Pair[SJ].Classification) {
case Subscript::ZIV:
LLVM_DEBUG(dbgs() << "ZIV\n");
@@ -3995,8 +3905,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
LLVM_DEBUG(dbgs() << "MIV test\n");
if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
return nullptr;
- }
- else
+ } else
llvm_unreachable("expected only MIV subscripts at this point");
}
@@ -4052,8 +3961,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
break;
}
}
- }
- else {
+ } else {
// On the other hand, if all directions are equal and there's no
// loop-independent dependence possible, then no dependence exists.
bool AllEqual = true;
@@ -4158,9 +4066,8 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep,
Pair[P].Group.resize(Pairs);
removeMatchingExtensions(&Pair[P]);
Pair[P].Classification =
- classifyPair(Pair[P].Src, LI->getLoopFor(Src->getParent()),
- Pair[P].Dst, LI->getLoopFor(Dst->getParent()),
- Pair[P].Loops);
+ classifyPair(Pair[P].Src, LI->getLoopFor(Src->getParent()), Pair[P].Dst,
+ LI->getLoopFor(Dst->getParent()), Pair[P].Loops);
Pair[P].GroupLoops = Pair[P].Loops;
Pair[P].Group.set(P);
}
@@ -4172,15 +4079,12 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep,
for (unsigned SI = 0; SI < Pairs; ++SI) {
if (Pair[SI].Classification == Subscript::NonLinear) {
// ignore these, but collect loops for later
- collectCommonLoops(Pair[SI].Src,
- LI->getLoopFor(Src->getParent()),
+ collectCommonLoops(Pair[SI].Src, LI->getLoopFor(Src->getParent()),
Pair[SI].Loops);
- collectCommonLoops(Pair[SI].Dst,
- LI->getLoopFor(Dst->getParent()),
+ collectCommonLoops(Pair[SI].Dst, LI->getLoopFor(Dst->getParent()),
Pair[SI].Loops);
Result.Consistent = false;
- }
- else if (Pair[SI].Classification == Subscript::ZIV)
+ } else if (Pair[SI].Classification == Subscript::ZIV)
Separable.set(SI);
else {
// SIV, RDIV, or MIV, so check for coupled group
@@ -4214,8 +4118,8 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep,
case Subscript::SIV: {
unsigned Level;
const SCEV *SplitIter = nullptr;
- (void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
- Result, NewConstraint, SplitIter);
+ (void)testSIV(Pair[SI].Src, Pair[SI].Dst, Level, Result, NewConstraint,
+ SplitIter);
if (Level == SplitLevel) {
assert(SplitIter != nullptr);
return SplitIter;
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 78d0887..9a2c9ba 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -276,8 +276,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
// this function is only used when one address use dominates the
// other, which means that they'll always either have the same
// value or one of them will have an undefined value.
- if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
- isa<GetElementPtrInst>(A))
+ if (isa<CastInst>(A) || isa<PHINode>(A) || isa<GetElementPtrInst>(A))
if (const Instruction *BI = dyn_cast<Instruction>(B))
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
return true;
diff --git a/llvm/lib/Analysis/LoopInfo.cpp b/llvm/lib/Analysis/LoopInfo.cpp
index 518a634..6ba6073 100644
--- a/llvm/lib/Analysis/LoopInfo.cpp
+++ b/llvm/lib/Analysis/LoopInfo.cpp
@@ -58,14 +58,26 @@ static cl::opt<bool, true>
// Loop implementation
//
-bool Loop::isLoopInvariant(const Value *V) const {
- if (const Instruction *I = dyn_cast<Instruction>(V))
- return !contains(I);
+bool Loop::isLoopInvariant(const Value *V, bool HasCoroSuspendInst) const {
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ // FIXME: this is semantically inconsistent. We're tracking a proper fix in
+ // issue #149604.
+ // If V is a pointer to stack object and L contains a coro.suspend function
+ // call, then V may not be loop invariant because the ramp function and
+ // resume function have different stack frames.
+ if (HasCoroSuspendInst && isa<AllocaInst>(I))
+ return false;
+ else
+ return !contains(I);
+ }
return true; // All non-instructions are loop invariant
}
-bool Loop::hasLoopInvariantOperands(const Instruction *I) const {
- return all_of(I->operands(), [this](Value *V) { return isLoopInvariant(V); });
+bool Loop::hasLoopInvariantOperands(const Instruction *I,
+ bool HasCoroSuspendInst) const {
+ return all_of(I->operands(), [&](Value *V) {
+ return isLoopInvariant(V, HasCoroSuspendInst);
+ });
}
bool Loop::makeLoopInvariant(Value *V, bool &Changed, Instruction *InsertPt,
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 2b0f212..67c2cfa 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -150,6 +150,10 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
+ Loc = MemoryLocation::getForArgument(II, 0, TLI);
+ // These intrinsics don't really modify the memory, but returning Mod
+ // will allow them to be handled conservatively.
+ return ModRefInfo::Mod;
case Intrinsic::invariant_start:
Loc = MemoryLocation::getForArgument(II, 1, TLI);
// These intrinsics don't really modify the memory, but returning Mod
@@ -441,11 +445,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
Intrinsic::ID ID = II->getIntrinsicID();
switch (ID) {
case Intrinsic::lifetime_start: {
- // FIXME: This only considers queries directly on the invariant-tagged
- // pointer, not on query pointers that are indexed off of them. It'd
- // be nice to handle that at some point (the right approach is to use
- // GetPointerBaseWithConstantOffset).
- MemoryLocation ArgLoc = MemoryLocation::getAfter(II->getArgOperand(1));
+ MemoryLocation ArgLoc = MemoryLocation::getAfter(II->getArgOperand(0));
if (BatchAA.isMustAlias(ArgLoc, MemLoc))
return MemDepResult::getDef(II);
continue;
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index 28a2640..72b643c 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -191,7 +191,7 @@ MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
- assert(ArgIdx == 1 && "Invalid argument index");
+ assert(ArgIdx == 0 && "Invalid argument index");
auto *AI = dyn_cast<AllocaInst>(Arg);
if (!AI)
// lifetime of poison value.
diff --git a/llvm/lib/Analysis/StackLifetime.cpp b/llvm/lib/Analysis/StackLifetime.cpp
index abe4985..1e20fca 100644
--- a/llvm/lib/Analysis/StackLifetime.cpp
+++ b/llvm/lib/Analysis/StackLifetime.cpp
@@ -70,7 +70,7 @@ void StackLifetime::collectMarkers() {
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
if (!II || !II->isLifetimeStartOrEnd())
continue;
- const AllocaInst *AI = dyn_cast<AllocaInst>(II->getArgOperand(1));
+ const AllocaInst *AI = dyn_cast<AllocaInst>(II->getArgOperand(0));
if (!AI)
continue;
auto It = AllocaNumbering.find(AI);
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 1e70228..b0e4b00 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -9147,7 +9147,8 @@ static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst,
return false;
for (unsigned I = 0; I != 2; ++I) {
- if (auto *Operation = dyn_cast<InstTy>(PN->getIncomingValue(I))) {
+ if (auto *Operation = dyn_cast<InstTy>(PN->getIncomingValue(I));
+ Operation && Operation->getNumOperands() >= 2) {
Value *LHS = Operation->getOperand(0);
Value *RHS = Operation->getOperand(1);
if (LHS != PN && RHS != PN)
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index b3b4c37..425ea31 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -81,6 +81,7 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
case Intrinsic::exp:
case Intrinsic::exp10:
case Intrinsic::exp2:
+ case Intrinsic::ldexp:
case Intrinsic::log:
case Intrinsic::log10:
case Intrinsic::log2:
@@ -108,6 +109,8 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
case Intrinsic::canonicalize:
case Intrinsic::fptosi_sat:
case Intrinsic::fptoui_sat:
+ case Intrinsic::lround:
+ case Intrinsic::llround:
case Intrinsic::lrint:
case Intrinsic::llrint:
case Intrinsic::ucmp:
@@ -189,6 +192,8 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
switch (ID) {
case Intrinsic::fptosi_sat:
case Intrinsic::fptoui_sat:
+ case Intrinsic::lround:
+ case Intrinsic::llround:
case Intrinsic::lrint:
case Intrinsic::llrint:
case Intrinsic::vp_lrint:
@@ -203,6 +208,7 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
case Intrinsic::vp_is_fpclass:
return OpdIdx == 0;
case Intrinsic::powi:
+ case Intrinsic::ldexp:
return OpdIdx == -1 || OpdIdx == 1;
default:
return OpdIdx == -1;
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index 520c6a0..3d5bd61 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -928,6 +928,7 @@ lltok::Kind LLLexer::LexIdentifier() {
INSTKEYWORD(fptoui, FPToUI);
INSTKEYWORD(fptosi, FPToSI);
INSTKEYWORD(inttoptr, IntToPtr);
+ INSTKEYWORD(ptrtoaddr, PtrToAddr);
INSTKEYWORD(ptrtoint, PtrToInt);
INSTKEYWORD(bitcast, BitCast);
INSTKEYWORD(addrspacecast, AddrSpaceCast);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 13bef1f..1bc2906 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -4273,6 +4273,7 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) {
case lltok::kw_bitcast:
case lltok::kw_addrspacecast:
case lltok::kw_inttoptr:
+ case lltok::kw_ptrtoaddr:
case lltok::kw_ptrtoint: {
unsigned Opc = Lex.getUIntVal();
Type *DestTy = nullptr;
@@ -7310,6 +7311,7 @@ int LLParser::parseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_fptoui:
case lltok::kw_fptosi:
case lltok::kw_inttoptr:
+ case lltok::kw_ptrtoaddr:
case lltok::kw_ptrtoint:
return parseCast(Inst, PFS, KeywordVal);
case lltok::kw_fptrunc:
diff --git a/llvm/lib/BinaryFormat/DXContainer.cpp b/llvm/lib/BinaryFormat/DXContainer.cpp
index 36d10d0..eb83945 100644
--- a/llvm/lib/BinaryFormat/DXContainer.cpp
+++ b/llvm/lib/BinaryFormat/DXContainer.cpp
@@ -60,6 +60,17 @@ ArrayRef<EnumEntry<SigComponentType>> dxbc::getSigComponentTypes() {
return ArrayRef(SigComponentTypes);
}
+static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = {
+ {"SRV", llvm::dxil::ResourceClass::SRV},
+ {"UAV", llvm::dxil::ResourceClass::UAV},
+ {"CBV", llvm::dxil::ResourceClass::CBuffer},
+ {"Sampler", llvm::dxil::ResourceClass::Sampler},
+};
+
+ArrayRef<EnumEntry<llvm::dxil::ResourceClass>> dxbc::getResourceClasses() {
+ return ArrayRef(ResourceClassNames);
+}
+
static const EnumEntry<RootFlags> RootFlagNames[] = {
#define ROOT_SIGNATURE_FLAG(Val, Enum) {#Enum, RootFlags::Enum},
#include "llvm/BinaryFormat/DXContainerConstants.def"
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 290d873..22a0d0f 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1283,6 +1283,7 @@ static int getDecodedCastOpcode(unsigned Val) {
case bitc::CAST_SITOFP : return Instruction::SIToFP;
case bitc::CAST_FPTRUNC : return Instruction::FPTrunc;
case bitc::CAST_FPEXT : return Instruction::FPExt;
+ case bitc::CAST_PTRTOADDR: return Instruction::PtrToAddr;
case bitc::CAST_PTRTOINT: return Instruction::PtrToInt;
case bitc::CAST_INTTOPTR: return Instruction::IntToPtr;
case bitc::CAST_BITCAST : return Instruction::BitCast;
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 05680fa..a3f8254 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -647,6 +647,7 @@ static unsigned getEncodedCastOpcode(unsigned Opcode) {
case Instruction::SIToFP : return bitc::CAST_SITOFP;
case Instruction::FPTrunc : return bitc::CAST_FPTRUNC;
case Instruction::FPExt : return bitc::CAST_FPEXT;
+ case Instruction::PtrToAddr: return bitc::CAST_PTRTOADDR;
case Instruction::PtrToInt: return bitc::CAST_PTRTOINT;
case Instruction::IntToPtr: return bitc::CAST_INTTOPTR;
case Instruction::BitCast : return bitc::CAST_BITCAST;
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index c72b6e8..23a3543 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -3657,6 +3657,7 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV,
break; // Error
}
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt: {
const DataLayout &DL = getDataLayout();
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 3f3d5dc9..278dd65 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -1915,7 +1915,6 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
// TODO: the "order" argument type is "int", not int32. So
// getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
- ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size);
assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO");
Constant *OrderingVal =
ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering));
@@ -2012,7 +2011,7 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
if (CASExpected) {
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
AllocaCASExpected->setAlignment(AllocaAlignment);
- Builder.CreateLifetimeStart(AllocaCASExpected, SizeVal64);
+ Builder.CreateLifetimeStart(AllocaCASExpected);
Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
Args.push_back(AllocaCASExpected);
}
@@ -2026,7 +2025,7 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
} else {
AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
AllocaValue->setAlignment(AllocaAlignment);
- Builder.CreateLifetimeStart(AllocaValue, SizeVal64);
+ Builder.CreateLifetimeStart(AllocaValue);
Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
Args.push_back(AllocaValue);
}
@@ -2036,7 +2035,7 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
if (!CASExpected && HasResult && !UseSizedLibcall) {
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
AllocaResult->setAlignment(AllocaAlignment);
- Builder.CreateLifetimeStart(AllocaResult, SizeVal64);
+ Builder.CreateLifetimeStart(AllocaResult);
Args.push_back(AllocaResult);
}
@@ -2069,7 +2068,7 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
// And then, extract the results...
if (ValueOperand && !UseSizedLibcall)
- Builder.CreateLifetimeEnd(AllocaValue, SizeVal64);
+ Builder.CreateLifetimeEnd(AllocaValue);
if (CASExpected) {
// The final result from the CAS is {load of 'expected' alloca, bool result
@@ -2078,7 +2077,7 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
Value *V = PoisonValue::get(FinalResultTy);
Value *ExpectedOut = Builder.CreateAlignedLoad(
CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
- Builder.CreateLifetimeEnd(AllocaCASExpected, SizeVal64);
+ Builder.CreateLifetimeEnd(AllocaCASExpected);
V = Builder.CreateInsertValue(V, ExpectedOut, 0);
V = Builder.CreateInsertValue(V, Result, 1);
I->replaceAllUsesWith(V);
@@ -2089,7 +2088,7 @@ bool AtomicExpandImpl::expandAtomicOpToLibcall(
else {
V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
AllocaAlignment);
- Builder.CreateLifetimeEnd(AllocaResult, SizeVal64);
+ Builder.CreateLifetimeEnd(AllocaResult);
}
I->replaceAllUsesWith(V);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 9ba1782..0f3ec8b 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -132,9 +132,10 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
unsigned i = 0;
unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
for (const auto &Arg : CB.args()) {
- ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
- i < NumFixedArgs};
+ ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i)};
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
+ if (i >= NumFixedArgs)
+ OrigArg.Flags[0].setVarArg();
// If we have an explicit sret argument that is an Instruction, (i.e., it
// might point to function-local memory), we can't meaningfully tail-call.
@@ -301,7 +302,7 @@ void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
// double] -> double).
SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
OrigArg.OrigArgIndex, OrigArg.Flags[0],
- OrigArg.IsFixed, OrigArg.OrigValue);
+ OrigArg.OrigValue);
return;
}
@@ -313,7 +314,7 @@ void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
- OrigArg.Flags[0], OrigArg.IsFixed);
+ OrigArg.Flags[0]);
if (NeedsRegBlock)
SplitArgs.back().Flags[0].setInConsecutiveRegs();
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index bbfae57..d30dfa7 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2209,7 +2209,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
: TargetOpcode::LIFETIME_END;
- const AllocaInst *AI = dyn_cast<AllocaInst>(CI.getArgOperand(1));
+ const AllocaInst *AI = dyn_cast<AllocaInst>(CI.getArgOperand(0));
if (!AI || !AI->isStaticAlloca())
return true;
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index 3e99e57..2936d66 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -529,7 +529,7 @@ void MIRParserImpl::setupDebugValueTracking(
unsigned MaxInstrNum = 0;
for (auto &MBB : MF)
for (auto &MI : MBB)
- MaxInstrNum = std::max((unsigned)MI.peekDebugInstrNum(), MaxInstrNum);
+ MaxInstrNum = std::max(MI.peekDebugInstrNum(), MaxInstrNum);
MF.setDebugInstrNumberingCount(MaxInstrNum);
// Load any substitutions.
diff --git a/llvm/lib/CodeGen/RegisterPressure.cpp b/llvm/lib/CodeGen/RegisterPressure.cpp
index ca51b67..5f37890 100644
--- a/llvm/lib/CodeGen/RegisterPressure.cpp
+++ b/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -1001,7 +1001,7 @@ static void computeMaxPressureDelta(ArrayRef<unsigned> OldMaxPressureVec,
++CritIdx;
if (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() == i) {
- int PDiff = (int)PNew - (int)CriticalPSets[CritIdx].getUnitInc();
+ int PDiff = (int)PNew - CriticalPSets[CritIdx].getUnitInc();
if (PDiff > 0) {
Delta.CriticalMax = PressureChange(i);
Delta.CriticalMax.setUnitInc(PDiff);
@@ -1191,7 +1191,7 @@ getUpwardPressureDelta(const MachineInstr *MI, /*const*/ PressureDiff &PDiff,
++CritIdx;
if (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() == PSetID) {
- int CritInc = (int)MNew - (int)CriticalPSets[CritIdx].getUnitInc();
+ int CritInc = (int)MNew - CriticalPSets[CritIdx].getUnitInc();
if (CritInc > 0 && CritInc <= std::numeric_limits<int16_t>::max()) {
Delta.CriticalMax = PressureChange(PSetID);
Delta.CriticalMax.setUnitInc(CritInc);
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 7341914..17703f5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12843,22 +12843,21 @@ SDValue DAGCombiner::visitMHISTOGRAM(SDNode *N) {
SDLoc DL(HG);
EVT MemVT = HG->getMemoryVT();
+ EVT DataVT = Index.getValueType();
MachineMemOperand *MMO = HG->getMemOperand();
ISD::MemIndexType IndexType = HG->getIndexType();
if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
return Chain;
- SDValue Ops[] = {Chain, Inc, Mask, BasePtr, Index,
- HG->getScale(), HG->getIntID()};
- if (refineUniformBase(BasePtr, Index, HG->isIndexScaled(), DAG, DL))
+ if (refineUniformBase(BasePtr, Index, HG->isIndexScaled(), DAG, DL) ||
+ refineIndexType(Index, IndexType, DataVT, DAG)) {
+ SDValue Ops[] = {Chain, Inc, Mask, BasePtr, Index,
+ HG->getScale(), HG->getIntID()};
return DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT, DL, Ops,
MMO, IndexType);
+ }
- EVT DataVT = Index.getValueType();
- if (refineIndexType(Index, IndexType, DataVT, DAG))
- return DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT, DL, Ops,
- MMO, IndexType);
return SDValue();
}
@@ -16343,6 +16342,38 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
DAG, DL);
}
break;
+ case ISD::ABDU:
+ case ISD::ABDS:
+ // (trunc (abdu/abds a, b)) → (abdu/abds (trunc a), (trunc b))
+ if (!LegalOperations || N0.hasOneUse()) {
+ EVT SrcVT = N0.getValueType();
+ EVT TruncVT = VT;
+ unsigned SrcBits = SrcVT.getScalarSizeInBits();
+ unsigned TruncBits = TruncVT.getScalarSizeInBits();
+ unsigned NeededBits = SrcBits - TruncBits;
+
+ SDValue A = N0.getOperand(0);
+ SDValue B = N0.getOperand(1);
+ bool CanFold = false;
+
+ if (N0.getOpcode() == ISD::ABDU) {
+ KnownBits KnownA = DAG.computeKnownBits(A);
+ KnownBits KnownB = DAG.computeKnownBits(B);
+ CanFold = KnownA.countMinLeadingZeros() >= NeededBits &&
+ KnownB.countMinLeadingZeros() >= NeededBits;
+ } else {
+ unsigned SignBitsA = DAG.ComputeNumSignBits(A);
+ unsigned SignBitsB = DAG.ComputeNumSignBits(B);
+ CanFold = SignBitsA > NeededBits && SignBitsB > NeededBits;
+ }
+
+ if (CanFold && TLI.isOperationLegal(N0.getOpcode(), VT)) {
+ SDValue NewA = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, A);
+ SDValue NewB = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, B);
+ return DAG.getNode(N0.getOpcode(), DL, TruncVT, NewA, NewB);
+ }
+ }
+ break;
}
return SDValue();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 649a310..5ef1746 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1371,7 +1371,7 @@ void SelectionDAG::init(MachineFunction &NewMF,
const TargetLibraryInfo *LibraryInfo,
UniformityInfo *NewUA, ProfileSummaryInfo *PSIin,
BlockFrequencyInfo *BFIin, MachineModuleInfo &MMIin,
- FunctionVarLocs const *VarLocs, bool HasDivergency) {
+ FunctionVarLocs const *VarLocs) {
MF = &NewMF;
SDAGISelPass = PassPtr;
ORE = &NewORE;
@@ -1384,7 +1384,6 @@ void SelectionDAG::init(MachineFunction &NewMF,
BFI = BFIin;
MMI = &MMIin;
FnVarLocs = VarLocs;
- DivergentTarget = HasDivergency;
}
SelectionDAG::~SelectionDAG() {
@@ -2331,8 +2330,7 @@ SDValue SelectionDAG::getRegister(Register Reg, EVT VT) {
return SDValue(E, 0);
auto *N = newSDNode<RegisterSDNode>(Reg, VTs);
- N->SDNodeBits.IsDivergent =
- DivergentTarget && TLI->isSDNodeSourceOfDivergence(N, FLI, UA);
+ N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, UA);
CSEMap.InsertNode(N, IP);
InsertNode(N);
return SDValue(N, 0);
@@ -5630,6 +5628,7 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
case ISD::FDIV:
case ISD::FREM:
case ISD::FCOPYSIGN:
+ case ISD::FP_EXTEND:
// No poison except from flags (which is handled above)
return false;
@@ -8888,6 +8887,44 @@ static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
}
}
+std::pair<SDValue, SDValue>
+SelectionDAG::getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Mem0,
+ SDValue Mem1, SDValue Size, const CallInst *CI) {
+ const char *LibCallName = TLI->getLibcallName(RTLIB::MEMCMP);
+ if (!LibCallName)
+ return {};
+
+ // Emit a library call.
+ auto GetEntry = [](Type *Ty, SDValue &SDV) {
+ TargetLowering::ArgListEntry E;
+ E.Ty = Ty;
+ E.Node = SDV;
+ return E;
+ };
+
+ PointerType *PT = PointerType::getUnqual(*getContext());
+ TargetLowering::ArgListTy Args = {
+ GetEntry(PT, Mem0), GetEntry(PT, Mem1),
+ GetEntry(getDataLayout().getIntPtrType(*getContext()), Size)};
+
+ TargetLowering::CallLoweringInfo CLI(*this);
+ bool IsTailCall = false;
+ bool ReturnsFirstArg = CI && funcReturnsFirstArgOfCall(*CI);
+ IsTailCall = CI && CI->isTailCall() &&
+ isInTailCallPosition(*CI, getTarget(), ReturnsFirstArg);
+
+ CLI.setDebugLoc(dl)
+ .setChain(Chain)
+ .setLibCallee(
+ TLI->getLibcallCallingConv(RTLIB::MEMCMP),
+ Type::getInt32Ty(*getContext()),
+ getExternalSymbol(LibCallName, TLI->getPointerTy(getDataLayout())),
+ std::move(Args))
+ .setTailCall(IsTailCall);
+
+ return TLI->LowerCallTo(CLI);
+}
+
SDValue SelectionDAG::getMemcpy(
SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size,
Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI,
@@ -12225,8 +12262,6 @@ static bool gluePropagatesDivergence(const SDNode *Node) {
}
bool SelectionDAG::calculateDivergence(SDNode *N) {
- if (!DivergentTarget)
- return false;
if (TLI->isSDNodeAlwaysUniform(N)) {
assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) &&
"Conflicting divergence information!");
@@ -12246,8 +12281,6 @@ bool SelectionDAG::calculateDivergence(SDNode *N) {
}
void SelectionDAG::updateDivergence(SDNode *N) {
- if (!DivergentTarget)
- return;
SmallVector<SDNode *, 16> Worklist(1, N);
do {
N = Worklist.pop_back_val();
@@ -13808,20 +13841,16 @@ void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
Ops[I].setInitial(Vals[I]);
EVT VT = Ops[I].getValueType();
- // Take care of the Node's operands iff target has divergence
// Skip Chain. It does not carry divergence.
- if (DivergentTarget && VT != MVT::Other &&
+ if (VT != MVT::Other &&
(VT != MVT::Glue || gluePropagatesDivergence(Ops[I].getNode())) &&
Ops[I].getNode()->isDivergent()) {
- // Node is going to be divergent if at least one of its operand is
- // divergent, unless it belongs to the "AlwaysUniform" exemptions.
IsDivergent = true;
}
}
Node->NumOperands = Vals.size();
Node->OperandList = Ops;
- // Check the divergence of the Node itself.
- if (DivergentTarget && !TLI->isSDNodeAlwaysUniform(Node)) {
+ if (!TLI->isSDNodeAlwaysUniform(Node)) {
IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA);
Node->SDNodeBits.IsDivergent = IsDivergent;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index d0815e9..48ab797 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2273,9 +2273,8 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
Flags.setNoExt();
for (unsigned i = 0; i < NumParts; ++i) {
- Outs.push_back(ISD::OutputArg(Flags,
- Parts[i].getValueType().getSimpleVT(),
- VT, /*isfixed=*/true, 0, 0));
+ Outs.push_back(ISD::OutputArg(
+ Flags, Parts[i].getValueType().getSimpleVT(), VT, 0, 0));
OutVals.push_back(Parts[i]);
}
}
@@ -2291,9 +2290,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
assert(SwiftError.getFunctionArg() && "Need a swift error argument");
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
Flags.setSwiftError();
- Outs.push_back(ISD::OutputArg(
- Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
- /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
+ Outs.push_back(ISD::OutputArg(Flags, /*vt=*/TLI.getPointerTy(DL),
+ /*argvt=*/EVT(TLI.getPointerTy(DL)),
+ /*origidx=*/1, /*partOffs=*/0));
// Create SDNode for the swifterror virtual register.
OutVals.push_back(
DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
@@ -3978,6 +3977,11 @@ void SelectionDAGBuilder::visitSIToFP(const User &I) {
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
}
+void SelectionDAGBuilder::visitPtrToAddr(const User &I) {
+ // FIXME: this is not correct for pointers with addr width != pointer width
+ visitPtrToInt(I);
+}
+
void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
@@ -7598,7 +7602,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
if (TM.getOptLevel() == CodeGenOptLevel::None)
return;
- const AllocaInst *LifetimeObject = dyn_cast<AllocaInst>(I.getArgOperand(1));
+ const AllocaInst *LifetimeObject = dyn_cast<AllocaInst>(I.getArgOperand(0));
if (!LifetimeObject)
return;
@@ -9091,7 +9095,7 @@ bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
- getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
+ getValue(Size), &I);
if (Res.first.getNode()) {
processIntegerCallValue(I, Res.first, true);
PendingLoads.push_back(Res.second);
@@ -11124,6 +11128,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
Flags.setOrigAlign(OriginalAlignment);
+ if (i >= CLI.NumFixedArgs)
+ Flags.setVarArg();
if (Args[i].Ty->isPointerTy()) {
Flags.setPointer();
Flags.setPointerAddrSpace(
@@ -11246,8 +11252,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// For scalable vectors the scalable part is currently handled
// by individual targets, so we just use the known minimum size here.
ISD::OutputArg MyFlags(
- Flags, Parts[j].getValueType().getSimpleVT(), VT,
- i < CLI.NumFixedArgs, i,
+ Flags, Parts[j].getValueType().getSimpleVT(), VT, i,
j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index c251755..e0835e6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -574,6 +574,7 @@ private:
void visitFPToSI(const User &I);
void visitUIToFP(const User &I);
void visitSIToFP(const User &I);
+ void visitPtrToAddr(const User &I);
void visitPtrToInt(const User &I);
void visitIntToPtr(const User &I);
void visitBitCast(const User &I);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 26071ed..ece50ed 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -480,10 +480,7 @@ void SelectionDAGISel::initializeAnalysisResults(
MachineModuleInfo &MMI =
MAMP.getCachedResult<MachineModuleAnalysis>(*Fn.getParent())->getMMI();
- TTI = &FAM.getResult<TargetIRAnalysis>(Fn);
-
- CurDAG->init(*MF, *ORE, MFAM, LibInfo, UA, PSI, BFI, MMI, FnVarLocs,
- TTI->hasBranchDivergence(&Fn));
+ CurDAG->init(*MF, *ORE, MFAM, LibInfo, UA, PSI, BFI, MMI, FnVarLocs);
// Now get the optional analyzes if we want to.
// This is based on the possibly changed OptLevel (after optnone is taken
@@ -501,6 +498,10 @@ void SelectionDAGISel::initializeAnalysisResults(
BatchAA = std::nullopt;
SP = &FAM.getResult<SSPLayoutAnalysis>(Fn);
+
+#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
+ TTI = &FAM.getResult<TargetIRAnalysis>(Fn);
+#endif
}
void SelectionDAGISel::initializeAnalysisResults(MachineFunctionPass &MFP) {
@@ -536,10 +537,7 @@ void SelectionDAGISel::initializeAnalysisResults(MachineFunctionPass &MFP) {
MachineModuleInfo &MMI =
MFP.getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
- TTI = &MFP.getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn);
-
- CurDAG->init(*MF, *ORE, &MFP, LibInfo, UA, PSI, BFI, MMI, FnVarLocs,
- TTI->hasBranchDivergence(&Fn));
+ CurDAG->init(*MF, *ORE, &MFP, LibInfo, UA, PSI, BFI, MMI, FnVarLocs);
// Now get the optional analyzes if we want to.
// This is based on the possibly changed OptLevel (after optnone is taken
@@ -558,6 +556,10 @@ void SelectionDAGISel::initializeAnalysisResults(MachineFunctionPass &MFP) {
BatchAA = std::nullopt;
SP = &MFP.getAnalysis<StackProtector>().getLayoutInfo();
+
+#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
+ TTI = &MFP.getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn);
+#endif
}
bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index e235d14..8fbabfa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -5125,10 +5125,11 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
!ISD::isUnsignedIntSetCC(Cond))) &&
isTypeDesirableForOp(ISD::SETCC, N0.getOperand(0).getValueType())) {
EVT NewVT = N0.getOperand(0).getValueType();
- SDValue NewConst = DAG.getConstant(ISD::isSignedIntSetCC(Cond)
- ? C1.sext(NewVT.getSizeInBits())
- : C1.zext(NewVT.getSizeInBits()),
- dl, NewVT);
+ SDValue NewConst = DAG.getConstant(
+ (N0->getFlags().hasNoSignedWrap() && !ISD::isUnsignedIntSetCC(Cond))
+ ? C1.sext(NewVT.getSizeInBits())
+ : C1.zext(NewVT.getSizeInBits()),
+ dl, NewVT);
return DAG.getSetCC(dl, VT, N0.getOperand(0), NewConst, Cond);
}
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 9f525ea..d80a229 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1008,7 +1008,7 @@ unsigned TargetLoweringBase::getBitWidthForCttzElements(
CR = CR.subtract(APInt(64, 1));
unsigned EltWidth = RetTy->getScalarSizeInBits();
- EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
+ EltWidth = std::min(EltWidth, CR.getActiveBits());
EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
return EltWidth;
@@ -1772,7 +1772,7 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i)
- Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
+ Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, 0, 0));
}
}
@@ -1893,6 +1893,7 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
case SIToFP: return ISD::SINT_TO_FP;
case FPTrunc: return ISD::FP_ROUND;
case FPExt: return ISD::FP_EXTEND;
+ case PtrToAddr: return ISD::BITCAST;
case PtrToInt: return ISD::BITCAST;
case IntToPtr: return ISD::BITCAST;
case BitCast: return ISD::BITCAST;
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFGdbIndex.cpp b/llvm/lib/DebugInfo/DWARF/DWARFGdbIndex.cpp
index 987e639..a201fae 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFGdbIndex.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFGdbIndex.cpp
@@ -60,6 +60,20 @@ void DWARFGdbIndex::dumpSymbolTable(raw_ostream &OS) const {
", filled slots:",
SymbolTableOffset, (uint64_t)SymbolTable.size())
<< '\n';
+
+ const auto FindCuVectorId = [&](uint32_t VecOffset) {
+ // Entries in ConstantPoolVectors are sorted by their offset in constant
+ // pool, see how ConstantPoolVectors is populated in parseImpl.
+ const auto *It =
+ llvm::lower_bound(ConstantPoolVectors, VecOffset,
+ [](const auto &ConstantPoolEntry, uint32_t Offset) {
+ return ConstantPoolEntry.first < Offset;
+ });
+ assert(It != ConstantPoolVectors.end() && It->first == VecOffset &&
+ "Invalid symbol table");
+ return It - ConstantPoolVectors.begin();
+ };
+
uint32_t I = -1;
for (const SymTableEntry &E : SymbolTable) {
++I;
@@ -72,13 +86,7 @@ void DWARFGdbIndex::dumpSymbolTable(raw_ostream &OS) const {
StringRef Name = ConstantPoolStrings.substr(
ConstantPoolOffset - StringPoolOffset + E.NameOffset);
- auto CuVector = llvm::find_if(
- ConstantPoolVectors,
- [&](const std::pair<uint32_t, SmallVector<uint32_t, 0>> &V) {
- return V.first == E.VecOffset;
- });
- assert(CuVector != ConstantPoolVectors.end() && "Invalid symbol table");
- uint32_t CuVectorId = CuVector - ConstantPoolVectors.begin();
+ const uint32_t CuVectorId = FindCuVectorId(E.VecOffset);
OS << format(" String name: %s, CU vector index: %d\n", Name.data(),
CuVectorId);
}
diff --git a/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp b/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
index 8e4937d..91a3115 100644
--- a/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
@@ -169,7 +169,7 @@ Error EPCIndirectStubsManager::createStubs(const StubInitsMap &StubInits) {
std::vector<tpctypes::UInt64Write> PtrUpdates;
for (auto &SI : StubInits)
PtrUpdates.push_back({(*AvailableStubInfos)[ASIdx++].PointerAddress,
- static_cast<uint64_t>(SI.second.first.getValue())});
+ SI.second.first.getValue()});
return MemAccess.writeUInt64s(PtrUpdates);
}
default:
diff --git a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
index 79904fc..574883e 100644
--- a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
+++ b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
@@ -92,16 +92,9 @@ static raw_ostream &operator<<(raw_ostream &OS,
return OS;
}
-static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = {
- {"CBV", dxil::ResourceClass::CBuffer},
- {"SRV", dxil::ResourceClass::SRV},
- {"UAV", dxil::ResourceClass::UAV},
- {"Sampler", dxil::ResourceClass::Sampler},
-};
-
static raw_ostream &operator<<(raw_ostream &OS, const ClauseType &Type) {
OS << enumToStringRef(dxil::ResourceClass(llvm::to_underlying(Type)),
- ArrayRef(ResourceClassNames));
+ dxbc::getResourceClasses());
return OS;
}
diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
index 9cf4ed1..1cda308 100644
--- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
+++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
@@ -51,13 +51,6 @@ static std::optional<StringRef> extractMdStringValue(MDNode *Node,
return NodeText->getString();
}
-static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = {
- {"CBV", dxil::ResourceClass::CBuffer},
- {"SRV", dxil::ResourceClass::SRV},
- {"UAV", dxil::ResourceClass::UAV},
- {"Sampler", dxil::ResourceClass::Sampler},
-};
-
namespace {
// We use the OverloadVisit with std::visit to ensure the compiler catches if a
@@ -128,7 +121,7 @@ MDNode *MetadataBuilder::BuildRootDescriptor(const RootDescriptor &Descriptor) {
IRBuilder<> Builder(Ctx);
StringRef ResName =
enumToStringRef(dxil::ResourceClass(to_underlying(Descriptor.Type)),
- ArrayRef(ResourceClassNames));
+ dxbc::getResourceClasses());
assert(!ResName.empty() && "Provided an invalid Resource Class");
SmallString<7> Name({"Root", ResName});
Metadata *Operands[] = {
@@ -170,7 +163,7 @@ MDNode *MetadataBuilder::BuildDescriptorTableClause(
IRBuilder<> Builder(Ctx);
StringRef ResName =
enumToStringRef(dxil::ResourceClass(to_underlying(Clause.Type)),
- ArrayRef(ResourceClassNames));
+ dxbc::getResourceClasses());
assert(!ResName.empty() && "Provided an invalid Resource Class");
Metadata *Operands[] = {
MDString::get(Ctx, ResName),
diff --git a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp
index 9d84aa8..72308a3d 100644
--- a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp
+++ b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp
@@ -29,7 +29,7 @@ bool verifyRegisterValue(uint32_t RegisterValue) {
// This Range is reserverved, therefore invalid, according to the spec
// https://github.com/llvm/wg-hlsl/blob/main/proposals/0002-root-signature-in-clang.md#all-the-values-should-be-legal
bool verifyRegisterSpace(uint32_t RegisterSpace) {
- return !(RegisterSpace >= 0xFFFFFFF0 && RegisterSpace <= 0xFFFFFFFF);
+ return !(RegisterSpace >= 0xFFFFFFF0);
}
bool verifyRootDescriptorFlag(uint32_t Version, uint32_t FlagsVal) {
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 3aa4f7a..ea027e4 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -4014,6 +4014,340 @@ OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
/*Conditional*/ true, /*hasFinalize*/ true);
}
+static llvm::CallInst *emitNoUnwindRuntimeCall(IRBuilder<> &Builder,
+ llvm::FunctionCallee Callee,
+ ArrayRef<llvm::Value *> Args,
+ const llvm::Twine &Name) {
+ llvm::CallInst *Call = Builder.CreateCall(
+ Callee, Args, SmallVector<llvm::OperandBundleDef, 1>(), Name);
+ Call->setDoesNotThrow();
+ return Call;
+}
+
+// Expects input basic block is dominated by BeforeScanBB.
+// Once Scan directive is encountered, the code after scan directive should be
+// dominated by AfterScanBB. Scan directive splits the code sequence to
+// scan and input phase. Based on whether inclusive or exclusive
+// clause is used in the scan directive and whether input loop or scan loop
+// is lowered, it adds jumps to input and scan phase. First Scan loop is the
+// input loop and second is the scan loop. The code generated handles only
+// inclusive scans now.
+OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createScan(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ ArrayRef<llvm::Value *> ScanVars, ArrayRef<llvm::Type *> ScanVarsType,
+ bool IsInclusive, ScanInfo *ScanRedInfo) {
+ if (ScanRedInfo->OMPFirstScanLoop) {
+ llvm::Error Err = emitScanBasedDirectiveDeclsIR(AllocaIP, ScanVars,
+ ScanVarsType, ScanRedInfo);
+ if (Err)
+ return Err;
+ }
+ if (!updateToLocation(Loc))
+ return Loc.IP;
+
+ llvm::Value *IV = ScanRedInfo->IV;
+
+ if (ScanRedInfo->OMPFirstScanLoop) {
+ // Emit buffer[i] = red; at the end of the input phase.
+ for (size_t i = 0; i < ScanVars.size(); i++) {
+ Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]];
+ Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr);
+ Type *DestTy = ScanVarsType[i];
+ Value *Val = Builder.CreateInBoundsGEP(DestTy, Buff, IV, "arrayOffset");
+ Value *Src = Builder.CreateLoad(DestTy, ScanVars[i]);
+
+ Builder.CreateStore(Src, Val);
+ }
+ }
+ Builder.CreateBr(ScanRedInfo->OMPScanLoopExit);
+ emitBlock(ScanRedInfo->OMPScanDispatch,
+ Builder.GetInsertBlock()->getParent());
+
+ if (!ScanRedInfo->OMPFirstScanLoop) {
+ IV = ScanRedInfo->IV;
+ // Emit red = buffer[i]; at the entrance to the scan phase.
+ // TODO: if exclusive scan, the red = buffer[i-1] needs to be updated.
+ for (size_t i = 0; i < ScanVars.size(); i++) {
+ Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]];
+ Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr);
+ Type *DestTy = ScanVarsType[i];
+ Value *SrcPtr =
+ Builder.CreateInBoundsGEP(DestTy, Buff, IV, "arrayOffset");
+ Value *Src = Builder.CreateLoad(DestTy, SrcPtr);
+ Builder.CreateStore(Src, ScanVars[i]);
+ }
+ }
+
+ // TODO: Update it to CreateBr and remove dead blocks
+ llvm::Value *CmpI = Builder.getInt1(true);
+ if (ScanRedInfo->OMPFirstScanLoop == IsInclusive) {
+ Builder.CreateCondBr(CmpI, ScanRedInfo->OMPBeforeScanBlock,
+ ScanRedInfo->OMPAfterScanBlock);
+ } else {
+ Builder.CreateCondBr(CmpI, ScanRedInfo->OMPAfterScanBlock,
+ ScanRedInfo->OMPBeforeScanBlock);
+ }
+ emitBlock(ScanRedInfo->OMPAfterScanBlock,
+ Builder.GetInsertBlock()->getParent());
+ Builder.SetInsertPoint(ScanRedInfo->OMPAfterScanBlock);
+ return Builder.saveIP();
+}
+
+Error OpenMPIRBuilder::emitScanBasedDirectiveDeclsIR(
+ InsertPointTy AllocaIP, ArrayRef<Value *> ScanVars,
+ ArrayRef<Type *> ScanVarsType, ScanInfo *ScanRedInfo) {
+
+ Builder.restoreIP(AllocaIP);
+ // Create the shared pointer at alloca IP.
+ for (size_t i = 0; i < ScanVars.size(); i++) {
+ llvm::Value *BuffPtr =
+ Builder.CreateAlloca(Builder.getPtrTy(), nullptr, "vla");
+ (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]] = BuffPtr;
+ }
+
+ // Allocate temporary buffer by master thread
+ auto BodyGenCB = [&](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) -> Error {
+ Builder.restoreIP(CodeGenIP);
+ Value *AllocSpan =
+ Builder.CreateAdd(ScanRedInfo->Span, Builder.getInt32(1));
+ for (size_t i = 0; i < ScanVars.size(); i++) {
+ Type *IntPtrTy = Builder.getInt32Ty();
+ Constant *Allocsize = ConstantExpr::getSizeOf(ScanVarsType[i]);
+ Allocsize = ConstantExpr::getTruncOrBitCast(Allocsize, IntPtrTy);
+ Value *Buff = Builder.CreateMalloc(IntPtrTy, ScanVarsType[i], Allocsize,
+ AllocSpan, nullptr, "arr");
+ Builder.CreateStore(Buff, (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]]);
+ }
+ return Error::success();
+ };
+ // TODO: Perform finalization actions for variables. This has to be
+ // called for variables which have destructors/finalizers.
+ auto FiniCB = [&](InsertPointTy CodeGenIP) { return llvm::Error::success(); };
+
+ Builder.SetInsertPoint(ScanRedInfo->OMPScanInit->getTerminator());
+ llvm::Value *FilterVal = Builder.getInt32(0);
+ llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
+ createMasked(Builder.saveIP(), BodyGenCB, FiniCB, FilterVal);
+
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.restoreIP(*AfterIP);
+ BasicBlock *InputBB = Builder.GetInsertBlock();
+ if (InputBB->getTerminator())
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ AfterIP = createBarrier(Builder.saveIP(), llvm::omp::OMPD_barrier);
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.restoreIP(*AfterIP);
+
+ return Error::success();
+}
+
+Error OpenMPIRBuilder::emitScanBasedDirectiveFinalsIR(
+ ArrayRef<ReductionInfo> ReductionInfos, ScanInfo *ScanRedInfo) {
+ auto BodyGenCB = [&](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) -> Error {
+ Builder.restoreIP(CodeGenIP);
+ for (ReductionInfo RedInfo : ReductionInfos) {
+ Value *PrivateVar = RedInfo.PrivateVariable;
+ Value *OrigVar = RedInfo.Variable;
+ Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[PrivateVar];
+ Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr);
+
+ Type *SrcTy = RedInfo.ElementType;
+ Value *Val = Builder.CreateInBoundsGEP(SrcTy, Buff, ScanRedInfo->Span,
+ "arrayOffset");
+ Value *Src = Builder.CreateLoad(SrcTy, Val);
+
+ Builder.CreateStore(Src, OrigVar);
+ Builder.CreateFree(Buff);
+ }
+ return Error::success();
+ };
+ // TODO: Perform finalization actions for variables. This has to be
+ // called for variables which have destructors/finalizers.
+ auto FiniCB = [&](InsertPointTy CodeGenIP) { return llvm::Error::success(); };
+
+ if (ScanRedInfo->OMPScanFinish->getTerminator())
+ Builder.SetInsertPoint(ScanRedInfo->OMPScanFinish->getTerminator());
+ else
+ Builder.SetInsertPoint(ScanRedInfo->OMPScanFinish);
+
+ llvm::Value *FilterVal = Builder.getInt32(0);
+ llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
+ createMasked(Builder.saveIP(), BodyGenCB, FiniCB, FilterVal);
+
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.restoreIP(*AfterIP);
+ BasicBlock *InputBB = Builder.GetInsertBlock();
+ if (InputBB->getTerminator())
+ Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
+ AfterIP = createBarrier(Builder.saveIP(), llvm::omp::OMPD_barrier);
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.restoreIP(*AfterIP);
+ return Error::success();
+}
+
+OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::emitScanReduction(
+ const LocationDescription &Loc,
+ ArrayRef<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos,
+ ScanInfo *ScanRedInfo) {
+
+ if (!updateToLocation(Loc))
+ return Loc.IP;
+ auto BodyGenCB = [&](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) -> Error {
+ Builder.restoreIP(CodeGenIP);
+ Function *CurFn = Builder.GetInsertBlock()->getParent();
+ // for (int k = 0; k <= ceil(log2(n)); ++k)
+ llvm::BasicBlock *LoopBB =
+ BasicBlock::Create(CurFn->getContext(), "omp.outer.log.scan.body");
+ llvm::BasicBlock *ExitBB =
+ splitBB(Builder, false, "omp.outer.log.scan.exit");
+ llvm::Function *F = llvm::Intrinsic::getOrInsertDeclaration(
+ Builder.GetInsertBlock()->getModule(),
+ (llvm::Intrinsic::ID)llvm::Intrinsic::log2, Builder.getDoubleTy());
+ llvm::BasicBlock *InputBB = Builder.GetInsertBlock();
+ llvm::Value *Arg =
+ Builder.CreateUIToFP(ScanRedInfo->Span, Builder.getDoubleTy());
+ llvm::Value *LogVal = emitNoUnwindRuntimeCall(Builder, F, Arg, "");
+ F = llvm::Intrinsic::getOrInsertDeclaration(
+ Builder.GetInsertBlock()->getModule(),
+ (llvm::Intrinsic::ID)llvm::Intrinsic::ceil, Builder.getDoubleTy());
+ LogVal = emitNoUnwindRuntimeCall(Builder, F, LogVal, "");
+ LogVal = Builder.CreateFPToUI(LogVal, Builder.getInt32Ty());
+ llvm::Value *NMin1 = Builder.CreateNUWSub(
+ ScanRedInfo->Span,
+ llvm::ConstantInt::get(ScanRedInfo->Span->getType(), 1));
+ Builder.SetInsertPoint(InputBB);
+ Builder.CreateBr(LoopBB);
+ emitBlock(LoopBB, CurFn);
+ Builder.SetInsertPoint(LoopBB);
+
+ PHINode *Counter = Builder.CreatePHI(Builder.getInt32Ty(), 2);
+ // size pow2k = 1;
+ PHINode *Pow2K = Builder.CreatePHI(Builder.getInt32Ty(), 2);
+ Counter->addIncoming(llvm::ConstantInt::get(Builder.getInt32Ty(), 0),
+ InputBB);
+ Pow2K->addIncoming(llvm::ConstantInt::get(Builder.getInt32Ty(), 1),
+ InputBB);
+ // for (size i = n - 1; i >= 2 ^ k; --i)
+ // tmp[i] op= tmp[i-pow2k];
+ llvm::BasicBlock *InnerLoopBB =
+ BasicBlock::Create(CurFn->getContext(), "omp.inner.log.scan.body");
+ llvm::BasicBlock *InnerExitBB =
+ BasicBlock::Create(CurFn->getContext(), "omp.inner.log.scan.exit");
+ llvm::Value *CmpI = Builder.CreateICmpUGE(NMin1, Pow2K);
+ Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ emitBlock(InnerLoopBB, CurFn);
+ Builder.SetInsertPoint(InnerLoopBB);
+ PHINode *IVal = Builder.CreatePHI(Builder.getInt32Ty(), 2);
+ IVal->addIncoming(NMin1, LoopBB);
+ for (ReductionInfo RedInfo : ReductionInfos) {
+ Value *ReductionVal = RedInfo.PrivateVariable;
+ Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[ReductionVal];
+ Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr);
+ Type *DestTy = RedInfo.ElementType;
+ Value *IV = Builder.CreateAdd(IVal, Builder.getInt32(1));
+ Value *LHSPtr =
+ Builder.CreateInBoundsGEP(DestTy, Buff, IV, "arrayOffset");
+ Value *OffsetIval = Builder.CreateNUWSub(IV, Pow2K);
+ Value *RHSPtr =
+ Builder.CreateInBoundsGEP(DestTy, Buff, OffsetIval, "arrayOffset");
+ Value *LHS = Builder.CreateLoad(DestTy, LHSPtr);
+ Value *RHS = Builder.CreateLoad(DestTy, RHSPtr);
+ llvm::Value *Result;
+ InsertPointOrErrorTy AfterIP =
+ RedInfo.ReductionGen(Builder.saveIP(), LHS, RHS, Result);
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.CreateStore(Result, LHSPtr);
+ }
+ llvm::Value *NextIVal = Builder.CreateNUWSub(
+ IVal, llvm::ConstantInt::get(Builder.getInt32Ty(), 1));
+ IVal->addIncoming(NextIVal, Builder.GetInsertBlock());
+ CmpI = Builder.CreateICmpUGE(NextIVal, Pow2K);
+ Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ emitBlock(InnerExitBB, CurFn);
+ llvm::Value *Next = Builder.CreateNUWAdd(
+ Counter, llvm::ConstantInt::get(Counter->getType(), 1));
+ Counter->addIncoming(Next, Builder.GetInsertBlock());
+ // pow2k <<= 1;
+ llvm::Value *NextPow2K = Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
+ Pow2K->addIncoming(NextPow2K, Builder.GetInsertBlock());
+ llvm::Value *Cmp = Builder.CreateICmpNE(Next, LogVal);
+ Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
+ Builder.SetInsertPoint(ExitBB->getFirstInsertionPt());
+ return Error::success();
+ };
+
+ // TODO: Perform finalization actions for variables. This has to be
+ // called for variables which have destructors/finalizers.
+ auto FiniCB = [&](InsertPointTy CodeGenIP) { return llvm::Error::success(); };
+
+ llvm::Value *FilterVal = Builder.getInt32(0);
+ llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
+ createMasked(Builder.saveIP(), BodyGenCB, FiniCB, FilterVal);
+
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.restoreIP(*AfterIP);
+ AfterIP = createBarrier(Builder.saveIP(), llvm::omp::OMPD_barrier);
+
+ if (!AfterIP)
+ return AfterIP.takeError();
+ Builder.restoreIP(*AfterIP);
+ Error Err = emitScanBasedDirectiveFinalsIR(ReductionInfos, ScanRedInfo);
+ if (Err)
+ return Err;
+
+ return AfterIP;
+}
+
+Error OpenMPIRBuilder::emitScanBasedDirectiveIR(
+ llvm::function_ref<Error()> InputLoopGen,
+ llvm::function_ref<Error(LocationDescription Loc)> ScanLoopGen,
+ ScanInfo *ScanRedInfo) {
+
+ {
+ // Emit loop with input phase:
+ // for (i: 0..<num_iters>) {
+ // <input phase>;
+ // buffer[i] = red;
+ // }
+ ScanRedInfo->OMPFirstScanLoop = true;
+ Error Err = InputLoopGen();
+ if (Err)
+ return Err;
+ }
+ {
+ // Emit loop with scan phase:
+ // for (i: 0..<num_iters>) {
+ // red = buffer[i];
+ // <scan phase>;
+ // }
+ ScanRedInfo->OMPFirstScanLoop = false;
+ Error Err = ScanLoopGen(Builder.saveIP());
+ if (Err)
+ return Err;
+ }
+ return Error::success();
+}
+
+void OpenMPIRBuilder::createScanBBs(ScanInfo *ScanRedInfo) {
+ Function *Fun = Builder.GetInsertBlock()->getParent();
+ ScanRedInfo->OMPScanDispatch =
+ BasicBlock::Create(Fun->getContext(), "omp.inscan.dispatch");
+ ScanRedInfo->OMPAfterScanBlock =
+ BasicBlock::Create(Fun->getContext(), "omp.after.scan.bb");
+ ScanRedInfo->OMPBeforeScanBlock =
+ BasicBlock::Create(Fun->getContext(), "omp.before.scan.bb");
+ ScanRedInfo->OMPScanLoopExit =
+ BasicBlock::Create(Fun->getContext(), "omp.scan.loop.exit");
+}
CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore, const Twine &Name) {
@@ -4111,6 +4445,76 @@ OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
return CL;
}
+Expected<ScanInfo *> OpenMPIRBuilder::scanInfoInitialize() {
+ ScanInfos.emplace_front();
+ ScanInfo *Result = &ScanInfos.front();
+ return Result;
+}
+
+Expected<SmallVector<llvm::CanonicalLoopInfo *>>
+OpenMPIRBuilder::createCanonicalScanLoops(
+ const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
+ Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
+ InsertPointTy ComputeIP, const Twine &Name, ScanInfo *ScanRedInfo) {
+ LocationDescription ComputeLoc =
+ ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
+ updateToLocation(ComputeLoc);
+
+ SmallVector<CanonicalLoopInfo *> Result;
+
+ Value *TripCount = calculateCanonicalLoopTripCount(
+ ComputeLoc, Start, Stop, Step, IsSigned, InclusiveStop, Name);
+ ScanRedInfo->Span = TripCount;
+ ScanRedInfo->OMPScanInit = splitBB(Builder, true, "scan.init");
+ Builder.SetInsertPoint(ScanRedInfo->OMPScanInit);
+
+ auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
+ Builder.restoreIP(CodeGenIP);
+ ScanRedInfo->IV = IV;
+ createScanBBs(ScanRedInfo);
+ BasicBlock *InputBlock = Builder.GetInsertBlock();
+ Instruction *Terminator = InputBlock->getTerminator();
+ assert(Terminator->getNumSuccessors() == 1);
+ BasicBlock *ContinueBlock = Terminator->getSuccessor(0);
+ Terminator->setSuccessor(0, ScanRedInfo->OMPScanDispatch);
+ emitBlock(ScanRedInfo->OMPBeforeScanBlock,
+ Builder.GetInsertBlock()->getParent());
+ Builder.CreateBr(ScanRedInfo->OMPScanLoopExit);
+ emitBlock(ScanRedInfo->OMPScanLoopExit,
+ Builder.GetInsertBlock()->getParent());
+ Builder.CreateBr(ContinueBlock);
+ Builder.SetInsertPoint(
+ ScanRedInfo->OMPBeforeScanBlock->getFirstInsertionPt());
+ return BodyGenCB(Builder.saveIP(), IV);
+ };
+
+ const auto &&InputLoopGen = [&]() -> Error {
+ Expected<CanonicalLoopInfo *> LoopInfo = createCanonicalLoop(
+ Builder.saveIP(), BodyGen, Start, Stop, Step, IsSigned, InclusiveStop,
+ ComputeIP, Name, true, ScanRedInfo);
+ if (!LoopInfo)
+ return LoopInfo.takeError();
+ Result.push_back(*LoopInfo);
+ Builder.restoreIP((*LoopInfo)->getAfterIP());
+ return Error::success();
+ };
+ const auto &&ScanLoopGen = [&](LocationDescription Loc) -> Error {
+ Expected<CanonicalLoopInfo *> LoopInfo =
+ createCanonicalLoop(Loc, BodyGen, Start, Stop, Step, IsSigned,
+ InclusiveStop, ComputeIP, Name, true, ScanRedInfo);
+ if (!LoopInfo)
+ return LoopInfo.takeError();
+ Result.push_back(*LoopInfo);
+ Builder.restoreIP((*LoopInfo)->getAfterIP());
+ ScanRedInfo->OMPScanFinish = Builder.GetInsertBlock();
+ return Error::success();
+ };
+ Error Err = emitScanBasedDirectiveIR(InputLoopGen, ScanLoopGen, ScanRedInfo);
+ if (Err)
+ return Err;
+ return Result;
+}
+
Value *OpenMPIRBuilder::calculateCanonicalLoopTripCount(
const LocationDescription &Loc, Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop, const Twine &Name) {
@@ -4174,7 +4578,8 @@ Value *OpenMPIRBuilder::calculateCanonicalLoopTripCount(
Expected<CanonicalLoopInfo *> OpenMPIRBuilder::createCanonicalLoop(
const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
- InsertPointTy ComputeIP, const Twine &Name) {
+ InsertPointTy ComputeIP, const Twine &Name, bool InScan,
+ ScanInfo *ScanRedInfo) {
LocationDescription ComputeLoc =
ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
@@ -4185,6 +4590,8 @@ Expected<CanonicalLoopInfo *> OpenMPIRBuilder::createCanonicalLoop(
Builder.restoreIP(CodeGenIP);
Value *Span = Builder.CreateMul(IV, Step);
Value *IndVar = Builder.CreateAdd(Span, Start);
+ if (InScan)
+ ScanRedInfo->IV = IndVar;
return BodyGenCB(Builder.saveIP(), IndVar);
};
LocationDescription LoopLoc =
@@ -8956,7 +9363,8 @@ OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createAtomicUpdate(
const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
- AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) {
+ AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr,
+ bool IsIgnoreDenormalMode, bool IsFineGrainedMemory, bool IsRemoteMemory) {
assert(!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous");
if (!updateToLocation(Loc))
return Loc.IP;
@@ -8974,9 +9382,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createAtomicUpdate(
"OpenMP atomic does not support LT or GT operations");
});
- Expected<std::pair<Value *, Value *>> AtomicResult =
- emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp,
- X.IsVolatile, IsXBinopExpr);
+ Expected<std::pair<Value *, Value *>> AtomicResult = emitAtomicUpdate(
+ AllocaIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp, X.IsVolatile,
+ IsXBinopExpr, IsIgnoreDenormalMode, IsFineGrainedMemory, IsRemoteMemory);
if (!AtomicResult)
return AtomicResult.takeError();
checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
@@ -9023,7 +9431,8 @@ Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
Expected<std::pair<Value *, Value *>> OpenMPIRBuilder::emitAtomicUpdate(
InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
- AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr) {
+ AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr,
+ bool IsIgnoreDenormalMode, bool IsFineGrainedMemory, bool IsRemoteMemory) {
// TODO: handle the case where XElemTy is not byte-sized or not a power of 2
// or a complex datatype.
bool emitRMWOp = false;
@@ -9046,7 +9455,20 @@ Expected<std::pair<Value *, Value *>> OpenMPIRBuilder::emitAtomicUpdate(
std::pair<Value *, Value *> Res;
if (emitRMWOp) {
- Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
+ AtomicRMWInst *RMWInst =
+ Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
+ if (T.isAMDGPU()) {
+ if (IsIgnoreDenormalMode)
+ RMWInst->setMetadata("amdgpu.ignore.denormal.mode",
+ llvm::MDNode::get(Builder.getContext(), {}));
+ if (!IsFineGrainedMemory)
+ RMWInst->setMetadata("amdgpu.no.fine.grained.memory",
+ llvm::MDNode::get(Builder.getContext(), {}));
+ if (!IsRemoteMemory)
+ RMWInst->setMetadata("amdgpu.no.remote.memory",
+ llvm::MDNode::get(Builder.getContext(), {}));
+ }
+ Res.first = RMWInst;
// not needed except in case of postfix captures. Generate anyway for
// consistency with the else part. Will be removed with any DCE pass.
// AtomicRMWInst::Xchg does not have a coressponding instruction.
@@ -9178,7 +9600,8 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createAtomicCapture(
const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X,
AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
- bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) {
+ bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr,
+ bool IsIgnoreDenormalMode, bool IsFineGrainedMemory, bool IsRemoteMemory) {
if (!updateToLocation(Loc))
return Loc.IP;
@@ -9197,9 +9620,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createAtomicCapture(
// If UpdateExpr is 'x' updated with some `expr` not based on 'x',
// 'x' is simply atomically rewritten with 'expr'.
AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
- Expected<std::pair<Value *, Value *>> AtomicResult =
- emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp,
- X.IsVolatile, IsXBinopExpr);
+ Expected<std::pair<Value *, Value *>> AtomicResult = emitAtomicUpdate(
+ AllocaIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp, X.IsVolatile,
+ IsXBinopExpr, IsIgnoreDenormalMode, IsFineGrainedMemory, IsRemoteMemory);
if (!AtomicResult)
return AtomicResult.takeError();
Value *CapturedVal =
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 7159107..b91fd70 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1311,14 +1311,15 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
break;
case 'l':
- if (Name.starts_with("lifetime.start") ||
- Name.starts_with("lifetime.end")) {
- // Unless remangling is required, do not upgrade the function declaration,
- // but do upgrade the calls.
- if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F))
- NewFn = *Result;
- else
- NewFn = F;
+ if ((Name.starts_with("lifetime.start") ||
+ Name.starts_with("lifetime.end")) &&
+ F->arg_size() == 2) {
+ Intrinsic::ID IID = Name.starts_with("lifetime.start")
+ ? Intrinsic::lifetime_start
+ : Intrinsic::lifetime_end;
+ rename(F);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID,
+ F->getArg(0)->getType());
return true;
}
break;
@@ -5133,21 +5134,20 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
- Value *Size = CI->getArgOperand(0);
- Value *Ptr = CI->getArgOperand(1);
- if (isa<AllocaInst>(Ptr)) {
+ if (CI->arg_size() != 2) {
DefaultCase();
return;
}
+ Value *Ptr = CI->getArgOperand(1);
// Try to strip pointer casts, such that the lifetime works on an alloca.
Ptr = Ptr->stripPointerCasts();
if (isa<AllocaInst>(Ptr)) {
// Don't use NewFn, as we might have looked through an addrspacecast.
if (NewFn->getIntrinsicID() == Intrinsic::lifetime_start)
- NewCall = Builder.CreateLifetimeStart(Ptr, cast<ConstantInt>(Size));
+ NewCall = Builder.CreateLifetimeStart(Ptr);
else
- NewCall = Builder.CreateLifetimeEnd(Ptr, cast<ConstantInt>(Size));
+ NewCall = Builder.CreateLifetimeEnd(Ptr);
break;
}
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index d4ad21e..6b202ba 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -254,6 +254,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
return FoldBitCast(V, DestTy);
case Instruction::AddrSpaceCast:
case Instruction::IntToPtr:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
return nullptr;
}
diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp
index e09c139..2fcdbcc6 100644
--- a/llvm/lib/IR/ConstantRange.cpp
+++ b/llvm/lib/IR/ConstantRange.cpp
@@ -829,6 +829,7 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::IntToPtr:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::AddrSpaceCast:
// Conservatively return getFull set.
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index a3c725b..c7e3113a 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -1567,6 +1567,7 @@ Constant *ConstantExpr::getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
case Instruction::SIToFP:
case Instruction::FPToUI:
case Instruction::FPToSI:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
@@ -2223,6 +2224,8 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty,
llvm_unreachable("Invalid cast opcode");
case Instruction::Trunc:
return getTrunc(C, Ty, OnlyIfReduced);
+ case Instruction::PtrToAddr:
+ return getPtrToAddr(C, Ty, OnlyIfReduced);
case Instruction::PtrToInt:
return getPtrToInt(C, Ty, OnlyIfReduced);
case Instruction::IntToPtr:
@@ -2280,6 +2283,20 @@ Constant *ConstantExpr::getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) {
return getFoldedCast(Instruction::Trunc, C, Ty, OnlyIfReduced);
}
+Constant *ConstantExpr::getPtrToAddr(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(C->getType()->isPtrOrPtrVectorTy() &&
+ "PtrToAddr source must be pointer or pointer vector");
+ assert(DstTy->isIntOrIntVectorTy() &&
+ "PtrToAddr destination must be integer or integer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(cast<VectorType>(C->getType())->getElementCount() ==
+ cast<VectorType>(DstTy)->getElementCount() &&
+ "Invalid cast between a different number of vector elements");
+ return getFoldedCast(Instruction::PtrToAddr, C, DstTy, OnlyIfReduced);
+}
+
Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
assert(C->getType()->isPtrOrPtrVectorTy() &&
@@ -2435,6 +2452,7 @@ bool ConstantExpr::isDesirableCastOp(unsigned Opcode) {
case Instruction::FPToSI:
return false;
case Instruction::Trunc:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
@@ -2457,6 +2475,7 @@ bool ConstantExpr::isSupportedCastOp(unsigned Opcode) {
case Instruction::FPToSI:
return false;
case Instruction::Trunc:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
@@ -3401,6 +3420,7 @@ Instruction *ConstantExpr::getAsInstruction() const {
switch (getOpcode()) {
case Instruction::Trunc:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp
index f1d4549..96065ed 100644
--- a/llvm/lib/IR/DebugInfoMetadata.cpp
+++ b/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -57,15 +57,9 @@ DebugVariable::DebugVariable(const DbgVariableRecord *DVR)
DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
unsigned Column, uint64_t AtomGroup, uint8_t AtomRank,
ArrayRef<Metadata *> MDs, bool ImplicitCode)
- : MDNode(C, DILocationKind, Storage, MDs)
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
- ,
- AtomGroup(AtomGroup), AtomRank(AtomRank)
-#endif
-{
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
+ : MDNode(C, DILocationKind, Storage, MDs), AtomGroup(AtomGroup),
+ AtomRank(AtomRank) {
assert(AtomRank <= 7 && "AtomRank number should fit in 3 bits");
-#endif
if (AtomGroup)
C.updateDILocationAtomGroupWaterline(AtomGroup + 1);
diff --git a/llvm/lib/IR/Globals.cpp b/llvm/lib/IR/Globals.cpp
index 7b799c7..11d33e2 100644
--- a/llvm/lib/IR/Globals.cpp
+++ b/llvm/lib/IR/Globals.cpp
@@ -404,6 +404,7 @@ findBaseObject(const Constant *C, DenseSet<const GlobalAlias *> &Aliases,
return findBaseObject(CE->getOperand(0), Aliases, Op);
}
case Instruction::IntToPtr:
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
case Instruction::BitCast:
case Instruction::GetElementPtr:
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 49c6dc7..614c3a9 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -411,28 +411,16 @@ CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) {
return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum, Src);
}
-CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
+CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
"lifetime.start only applies to pointers.");
- if (!Size)
- Size = getInt64(-1);
- else
- assert(Size->getType() == getInt64Ty() &&
- "lifetime.start requires the size to be an i64");
- Value *Ops[] = { Size, Ptr };
- return CreateIntrinsic(Intrinsic::lifetime_start, {Ptr->getType()}, Ops);
+ return CreateIntrinsic(Intrinsic::lifetime_start, {Ptr->getType()}, {Ptr});
}
-CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
+CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
"lifetime.end only applies to pointers.");
- if (!Size)
- Size = getInt64(-1);
- else
- assert(Size->getType() == getInt64Ty() &&
- "lifetime.end requires the size to be an i64");
- Value *Ops[] = { Size, Ptr };
- return CreateIntrinsic(Intrinsic::lifetime_end, {Ptr->getType()}, Ops);
+ return CreateIntrinsic(Intrinsic::lifetime_end, {Ptr->getType()}, {Ptr});
}
CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index b7cd12a..4540268 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -817,6 +817,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case UIToFP: return "uitofp";
case SIToFP: return "sitofp";
case IntToPtr: return "inttoptr";
+ case PtrToAddr: return "ptrtoaddr";
case PtrToInt: return "ptrtoint";
case BitCast: return "bitcast";
case AddrSpaceCast: return "addrspacecast";
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index b896382..a1751c0 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -2798,6 +2798,7 @@ bool CastInst::isNoopCast(Instruction::CastOps Opcode,
return false;
case Instruction::BitCast:
return true; // BitCast never modifies bits.
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
DestTy->getScalarSizeInBits();
@@ -2855,26 +2856,29 @@ unsigned CastInst::isEliminableCastPair(
// same reason.
const unsigned numCastOps =
Instruction::CastOpsEnd - Instruction::CastOpsBegin;
+ // clang-format off
static const uint8_t CastResults[numCastOps][numCastOps] = {
- // T F F U S F F P I B A -+
- // R Z S P P I I T P 2 N T S |
- // U E E 2 2 2 2 R E I T C C +- secondOp
- // N X X U S F F N X N 2 V V |
- // C T T I I P P C T T P T T -+
- { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
- { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
- { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
- { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
- { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
- { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
- { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
- { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
- { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
- { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
- { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
- { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
+ // T F F U S F F P P I B A -+
+ // R Z S P P I I T P 2 2 N T S |
+ // U E E 2 2 2 2 R E I A T C C +- secondOp
+ // N X X U S F F N X N D 2 V V |
+ // C T T I I P P C T T R P T T -+
+ { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
+ { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
+ { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
+ { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
+ { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
+ { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
+ { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
+ { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
+ { 99,99,99,99,99,99,99,99,99,11,99,99,15, 0}, // IntToPtr |
+ { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
};
+ // clang-format on
// TODO: This logic could be encoded into the table above and handled in the
// switch below.
@@ -3046,6 +3050,7 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
+ case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
case BitCast:
@@ -3347,6 +3352,7 @@ CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
case Instruction::FPToSI:
return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
SrcEC == DstEC;
+ case Instruction::PtrToAddr:
case Instruction::PtrToInt:
if (SrcEC != DstEC)
return false;
@@ -3460,6 +3466,12 @@ PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
+PtrToAddrInst::PtrToAddrInst(Value *S, Type *Ty, const Twine &Name,
+ InsertPosition InsertBefore)
+ : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
+}
+
IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,
InsertPosition InsertBefore)
: CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
@@ -4427,6 +4439,10 @@ PtrToIntInst *PtrToIntInst::cloneImpl() const {
return new PtrToIntInst(getOperand(0), getType());
}
+PtrToAddrInst *PtrToAddrInst::cloneImpl() const {
+ return new PtrToAddrInst(getOperand(0), getType());
+}
+
IntToPtrInst *IntToPtrInst::cloneImpl() const {
return new IntToPtrInst(getOperand(0), getType());
}
diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h
index aa2a60e..e03f993 100644
--- a/llvm/lib/IR/LLVMContextImpl.h
+++ b/llvm/lib/IR/LLVMContextImpl.h
@@ -312,10 +312,8 @@ template <> struct MDNodeKeyImpl<MDTuple> : MDNodeOpsKey {
template <> struct MDNodeKeyImpl<DILocation> {
Metadata *Scope;
Metadata *InlinedAt;
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
uint64_t AtomGroup : 61;
uint64_t AtomRank : 3;
-#endif
unsigned Line;
uint16_t Column;
bool ImplicitCode;
@@ -323,36 +321,24 @@ template <> struct MDNodeKeyImpl<DILocation> {
MDNodeKeyImpl(unsigned Line, uint16_t Column, Metadata *Scope,
Metadata *InlinedAt, bool ImplicitCode, uint64_t AtomGroup,
uint8_t AtomRank)
- : Scope(Scope), InlinedAt(InlinedAt),
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
- AtomGroup(AtomGroup), AtomRank(AtomRank),
-#endif
- Line(Line), Column(Column), ImplicitCode(ImplicitCode) {
- }
+ : Scope(Scope), InlinedAt(InlinedAt), AtomGroup(AtomGroup),
+ AtomRank(AtomRank), Line(Line), Column(Column),
+ ImplicitCode(ImplicitCode) {}
MDNodeKeyImpl(const DILocation *L)
: Scope(L->getRawScope()), InlinedAt(L->getRawInlinedAt()),
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
AtomGroup(L->getAtomGroup()), AtomRank(L->getAtomRank()),
-#endif
Line(L->getLine()), Column(L->getColumn()),
- ImplicitCode(L->isImplicitCode()) {
- }
+ ImplicitCode(L->isImplicitCode()) {}
bool isKeyOf(const DILocation *RHS) const {
return Line == RHS->getLine() && Column == RHS->getColumn() &&
Scope == RHS->getRawScope() && InlinedAt == RHS->getRawInlinedAt() &&
- ImplicitCode == RHS->isImplicitCode()
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
- && AtomGroup == RHS->getAtomGroup() &&
- AtomRank == RHS->getAtomRank();
-#else
- ;
-#endif
+ ImplicitCode == RHS->isImplicitCode() &&
+ AtomGroup == RHS->getAtomGroup() && AtomRank == RHS->getAtomRank();
}
unsigned getHashValue() const {
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
// Hashing AtomGroup and AtomRank substantially impacts performance whether
// Key Instructions is enabled or not. We can't detect whether it's enabled
// here cheaply; avoiding hashing zero values is a good approximation. This
@@ -363,7 +349,6 @@ template <> struct MDNodeKeyImpl<DILocation> {
if (AtomGroup || AtomRank)
return hash_combine(Line, Column, Scope, InlinedAt, ImplicitCode,
AtomGroup, (uint8_t)AtomRank);
-#endif
return hash_combine(Line, Column, Scope, InlinedAt, ImplicitCode);
}
};
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 129ca4a..5928c89 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -747,34 +747,28 @@ const Value *Value::stripAndAccumulateConstantOffsets(
// means when we construct GEPOffset, we need to use the size
// of GEP's pointer type rather than the size of the original
// pointer type.
- unsigned CurBitWidth = DL.getIndexTypeSizeInBits(V->getType());
- if (CurBitWidth == BitWidth) {
- if (!GEP->accumulateConstantOffset(DL, Offset, ExternalAnalysis))
- return V;
- } else {
- APInt GEPOffset(CurBitWidth, 0);
- if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
- return V;
+ APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
+ if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
+ return V;
- // Stop traversal if the pointer offset wouldn't fit in the bit-width
- // provided by the Offset argument. This can happen due to AddrSpaceCast
- // stripping.
- if (GEPOffset.getSignificantBits() > BitWidth)
- return V;
+ // Stop traversal if the pointer offset wouldn't fit in the bit-width
+ // provided by the Offset argument. This can happen due to AddrSpaceCast
+ // stripping.
+ if (GEPOffset.getSignificantBits() > BitWidth)
+ return V;
- // External Analysis can return a result higher/lower than the value
- // represents. We need to detect overflow/underflow.
- APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
- if (!ExternalAnalysis) {
- Offset += GEPOffsetST;
- } else {
- bool Overflow = false;
- APInt OldOffset = Offset;
- Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
- if (Overflow) {
- Offset = OldOffset;
- return V;
- }
+ // External Analysis can return a result higher/lower than the value
+ // represents. We need to detect overflow/underflow.
+ APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
+ if (!ExternalAnalysis) {
+ Offset += GEPOffsetST;
+ } else {
+ bool Overflow = false;
+ APInt OldOffset = Offset;
+ Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
+ if (Overflow) {
+ Offset = OldOffset;
+ return V;
}
}
V = GEP->getPointerOperand();
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index ca3f148..1d3c379 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -566,6 +566,8 @@ private:
void visitUIToFPInst(UIToFPInst &I);
void visitSIToFPInst(SIToFPInst &I);
void visitIntToPtrInst(IntToPtrInst &I);
+ void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
+ void visitPtrToAddrInst(PtrToAddrInst &I);
void visitPtrToIntInst(PtrToIntInst &I);
void visitBitCastInst(BitCastInst &I);
void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
@@ -834,6 +836,7 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
&GV);
Check(GV.getInitializer()->getType()->isSized(),
"Global variable initializer must be sized", &GV);
+ visitConstantExprsRecursively(GV.getInitializer());
// If the global has common linkage, it must have a zero initializer and
// cannot be constant.
if (GV.hasCommonLinkage()) {
@@ -2610,6 +2613,8 @@ void Verifier::visitConstantExpr(const ConstantExpr *CE) {
Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
CE->getType()),
"Invalid bitcast", CE);
+ else if (CE->getOpcode() == Instruction::PtrToAddr)
+ checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
}
void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
@@ -3532,6 +3537,28 @@ void Verifier::visitFPToSIInst(FPToSIInst &I) {
visitInstruction(I);
}
+void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
+ Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
+ Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
+ V);
+
+ if (SrcTy->isVectorTy()) {
+ auto *VSrc = cast<VectorType>(SrcTy);
+ auto *VDest = cast<VectorType>(DestTy);
+ Check(VSrc->getElementCount() == VDest->getElementCount(),
+ "PtrToAddr vector length mismatch", V);
+ }
+
+ Type *AddrTy = DL.getAddressType(SrcTy);
+ Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
+}
+
+void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
+ checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
+ visitInstruction(I);
+}
+
void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
// Get the source and destination types
Type *SrcTy = I.getOperand(0)->getType();
@@ -3547,7 +3574,7 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
auto *VSrc = cast<VectorType>(SrcTy);
auto *VDest = cast<VectorType>(DestTy);
Check(VSrc->getElementCount() == VDest->getElementCount(),
- "PtrToInt Vector width mismatch", &I);
+ "PtrToInt Vector length mismatch", &I);
}
visitInstruction(I);
@@ -3567,7 +3594,7 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
auto *VSrc = cast<VectorType>(SrcTy);
auto *VDest = cast<VectorType>(DestTy);
Check(VSrc->getElementCount() == VDest->getElementCount(),
- "IntToPtr Vector width mismatch", &I);
+ "IntToPtr Vector length mismatch", &I);
}
visitInstruction(I);
}
@@ -6770,7 +6797,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
- Value *Ptr = Call.getArgOperand(1);
+ Value *Ptr = Call.getArgOperand(0);
Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr),
"llvm.lifetime.start/end can only be used on alloca or poison",
&Call);
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index 8c27958..d0c6144 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -443,7 +443,7 @@ void MCObjectStreamer::emitInstToData(const MCInst &Inst,
// MCAssembler::relaxAlign.
auto *Sec = F->getParent();
if (!Sec->isLinkerRelaxable())
- Sec->setLinkerRelaxable();
+ Sec->setFirstLinkerRelaxable(F->getLayoutOrder());
// Do not add data after a linker-relaxable instruction. The difference
// between a new label and a label at or before the linker-relaxable
// instruction cannot be resolved at assemble-time.
diff --git a/llvm/lib/MC/MCSection.cpp b/llvm/lib/MC/MCSection.cpp
index 27ca131..9ed6fd1 100644
--- a/llvm/lib/MC/MCSection.cpp
+++ b/llvm/lib/MC/MCSection.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
MCSection::MCSection(StringRef Name, bool IsText, bool IsBss, MCSymbol *Begin)
: Begin(Begin), HasInstructions(false), IsRegistered(false), IsText(IsText),
- IsBss(IsBss), LinkerRelaxable(false), Name(Name) {
+ IsBss(IsBss), Name(Name) {
DummyFragment.setParent(this);
}
diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp
index 7ca26aa..df807fc 100644
--- a/llvm/lib/ProfileData/InstrProfWriter.cpp
+++ b/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -331,61 +331,34 @@ void InstrProfWriter::addDataAccessProfData(
DataAccessProfileData = std::move(DataAccessProfDataIn);
}
-void InstrProfWriter::addTemporalProfileTrace(TemporalProfTraceTy Trace) {
- assert(Trace.FunctionNameRefs.size() <= MaxTemporalProfTraceLength);
- assert(!Trace.FunctionNameRefs.empty());
- if (TemporalProfTraceStreamSize < TemporalProfTraceReservoirSize) {
- // Simply append the trace if we have not yet hit our reservoir size limit.
- TemporalProfTraces.push_back(std::move(Trace));
- } else {
- // Otherwise, replace a random trace in the stream.
- std::uniform_int_distribution<uint64_t> Distribution(
- 0, TemporalProfTraceStreamSize);
- uint64_t RandomIndex = Distribution(RNG);
- if (RandomIndex < TemporalProfTraces.size())
- TemporalProfTraces[RandomIndex] = std::move(Trace);
- }
- ++TemporalProfTraceStreamSize;
-}
-
void InstrProfWriter::addTemporalProfileTraces(
SmallVectorImpl<TemporalProfTraceTy> &SrcTraces, uint64_t SrcStreamSize) {
+ if (TemporalProfTraces.size() > TemporalProfTraceReservoirSize)
+ TemporalProfTraces.truncate(TemporalProfTraceReservoirSize);
for (auto &Trace : SrcTraces)
if (Trace.FunctionNameRefs.size() > MaxTemporalProfTraceLength)
Trace.FunctionNameRefs.resize(MaxTemporalProfTraceLength);
llvm::erase_if(SrcTraces, [](auto &T) { return T.FunctionNameRefs.empty(); });
- // Assume that the source has the same reservoir size as the destination to
- // avoid needing to record it in the indexed profile format.
- bool IsDestSampled =
- (TemporalProfTraceStreamSize > TemporalProfTraceReservoirSize);
- bool IsSrcSampled = (SrcStreamSize > TemporalProfTraceReservoirSize);
- if (!IsDestSampled && IsSrcSampled) {
- // If one of the traces are sampled, ensure that it belongs to Dest.
- std::swap(TemporalProfTraces, SrcTraces);
- std::swap(TemporalProfTraceStreamSize, SrcStreamSize);
- std::swap(IsDestSampled, IsSrcSampled);
- }
- if (!IsSrcSampled) {
- // If the source stream is not sampled, we add each source trace normally.
- for (auto &Trace : SrcTraces)
- addTemporalProfileTrace(std::move(Trace));
+ // If there are no source traces, it is probably because
+ // --temporal-profile-max-trace-length=0 was set to deliberately remove all
+ // traces. In that case, we do not want to increase the stream size
+ if (SrcTraces.empty())
return;
- }
- // Otherwise, we find the traces that would have been removed if we added
- // the whole source stream.
- SmallSetVector<uint64_t, 8> IndicesToReplace;
- for (uint64_t I = 0; I < SrcStreamSize; I++) {
- std::uniform_int_distribution<uint64_t> Distribution(
- 0, TemporalProfTraceStreamSize);
+ // Add traces until our reservoir is full or we run out of source traces
+ auto SrcTraceIt = SrcTraces.begin();
+ while (TemporalProfTraces.size() < TemporalProfTraceReservoirSize &&
+ SrcTraceIt < SrcTraces.end())
+ TemporalProfTraces.push_back(*SrcTraceIt++);
+ // Our reservoir is full, we need to sample the source stream
+ llvm::shuffle(SrcTraceIt, SrcTraces.end(), RNG);
+ for (uint64_t I = TemporalProfTraces.size();
+ I < SrcStreamSize && SrcTraceIt < SrcTraces.end(); I++) {
+ std::uniform_int_distribution<uint64_t> Distribution(0, I);
uint64_t RandomIndex = Distribution(RNG);
if (RandomIndex < TemporalProfTraces.size())
- IndicesToReplace.insert(RandomIndex);
- ++TemporalProfTraceStreamSize;
+ TemporalProfTraces[RandomIndex] = *SrcTraceIt++;
}
- // Then we insert a random sample of the source traces.
- llvm::shuffle(SrcTraces.begin(), SrcTraces.end(), RNG);
- for (const auto &[Index, Trace] : llvm::zip(IndicesToReplace, SrcTraces))
- TemporalProfTraces[Index] = std::move(Trace);
+ TemporalProfTraceStreamSize += SrcStreamSize;
}
void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
diff --git a/llvm/lib/SandboxIR/Context.cpp b/llvm/lib/SandboxIR/Context.cpp
index fe34037..70ac68a 100644
--- a/llvm/lib/SandboxIR/Context.cpp
+++ b/llvm/lib/SandboxIR/Context.cpp
@@ -256,6 +256,7 @@ Value *Context::getOrCreateValueInternal(llvm::Value *LLVMV, llvm::User *U) {
case llvm::Instruction::FPToUI:
case llvm::Instruction::FPToSI:
case llvm::Instruction::FPExt:
+ case llvm::Instruction::PtrToAddr:
case llvm::Instruction::PtrToInt:
case llvm::Instruction::IntToPtr:
case llvm::Instruction::SIToFP:
diff --git a/llvm/lib/SandboxIR/Instruction.cpp b/llvm/lib/SandboxIR/Instruction.cpp
index 956047c..1a81d18 100644
--- a/llvm/lib/SandboxIR/Instruction.cpp
+++ b/llvm/lib/SandboxIR/Instruction.cpp
@@ -1007,6 +1007,9 @@ static llvm::Instruction::CastOps getLLVMCastOp(Instruction::Opcode Opc) {
return static_cast<llvm::Instruction::CastOps>(llvm::Instruction::FPToSI);
case Instruction::Opcode::FPExt:
return static_cast<llvm::Instruction::CastOps>(llvm::Instruction::FPExt);
+ case Instruction::Opcode::PtrToAddr:
+ return static_cast<llvm::Instruction::CastOps>(
+ llvm::Instruction::PtrToAddr);
case Instruction::Opcode::PtrToInt:
return static_cast<llvm::Instruction::CastOps>(llvm::Instruction::PtrToInt);
case Instruction::Opcode::IntToPtr:
diff --git a/llvm/lib/Support/SmallPtrSet.cpp b/llvm/lib/Support/SmallPtrSet.cpp
index 83143a7..0c22697 100644
--- a/llvm/lib/Support/SmallPtrSet.cpp
+++ b/llvm/lib/Support/SmallPtrSet.cpp
@@ -110,8 +110,7 @@ const void *const *SmallPtrSetImplBase::FindBucketFor(const void *Ptr) const {
/// Grow - Allocate a larger backing store for the buckets and move it over.
///
void SmallPtrSetImplBase::Grow(unsigned NewSize) {
- const void **OldBuckets = CurArray;
- const void **OldEnd = EndPointer();
+ auto OldBuckets = buckets();
bool WasSmall = isSmall();
// Install the new array. Clear all the buckets to empty.
@@ -123,15 +122,14 @@ void SmallPtrSetImplBase::Grow(unsigned NewSize) {
memset(CurArray, -1, NewSize*sizeof(void*));
// Copy over all valid entries.
- for (const void **BucketPtr = OldBuckets; BucketPtr != OldEnd; ++BucketPtr) {
+ for (const void *&Bucket : OldBuckets) {
// Copy over the element if it is valid.
- const void *Elt = *BucketPtr;
- if (Elt != getTombstoneMarker() && Elt != getEmptyMarker())
- *const_cast<void**>(FindBucketFor(Elt)) = const_cast<void*>(Elt);
+ if (Bucket != getTombstoneMarker() && Bucket != getEmptyMarker())
+ *const_cast<void **>(FindBucketFor(Bucket)) = const_cast<void *>(Bucket);
}
if (!WasSmall)
- free(OldBuckets);
+ free(OldBuckets.begin());
NumNonEmpty -= NumTombstones;
NumTombstones = 0;
IsSmall = false;
diff --git a/llvm/lib/Support/StringRef.cpp b/llvm/lib/Support/StringRef.cpp
index dc75878..b6a2f8a 100644
--- a/llvm/lib/Support/StringRef.cpp
+++ b/llvm/lib/Support/StringRef.cpp
@@ -385,7 +385,7 @@ size_t StringRef::count(StringRef Str) const {
return Count;
}
-static unsigned GetAutoSenseRadix(StringRef &Str) {
+unsigned llvm::getAutoSenseRadix(StringRef &Str) {
if (Str.empty())
return 10;
@@ -410,7 +410,7 @@ bool llvm::consumeUnsignedInteger(StringRef &Str, unsigned Radix,
unsigned long long &Result) {
// Autosense radix if not specified.
if (Radix == 0)
- Radix = GetAutoSenseRadix(Str);
+ Radix = getAutoSenseRadix(Str);
// Empty strings (after the radix autosense) are invalid.
if (Str.empty()) return true;
@@ -509,7 +509,7 @@ bool StringRef::consumeInteger(unsigned Radix, APInt &Result) {
// Autosense radix if not specified.
if (Radix == 0)
- Radix = GetAutoSenseRadix(Str);
+ Radix = getAutoSenseRadix(Str);
assert(Radix > 1 && Radix <= 36);
diff --git a/llvm/lib/Support/regcomp.c b/llvm/lib/Support/regcomp.c
index 4ed5982..f5c4778 100644
--- a/llvm/lib/Support/regcomp.c
+++ b/llvm/lib/Support/regcomp.c
@@ -305,7 +305,7 @@ llvm_regcomp(llvm_regex_t *preg, const char *pattern, int cflags) {
return (REG_INVARG);
len = preg->re_endp - pattern;
} else {
- len = strlen((const char *)pattern);
+ len = strlen(pattern);
}
/* do the mallocs early so failure handling is easy */
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 201bfe0..d6a3d59 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -1236,14 +1236,20 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
.add(MI.getOperand(3));
transferImpOps(MI, I, I);
} else {
+ unsigned RegState =
+ getRenamableRegState(MI.getOperand(1).isRenamable()) |
+ getKillRegState(
+ MI.getOperand(1).isKill() &&
+ MI.getOperand(1).getReg() != MI.getOperand(2).getReg() &&
+ MI.getOperand(1).getReg() != MI.getOperand(3).getReg());
BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8
: AArch64::ORRv16i8))
.addReg(DstReg,
RegState::Define |
getRenamableRegState(MI.getOperand(0).isRenamable()))
- .add(MI.getOperand(1))
- .add(MI.getOperand(1));
+ .addReg(MI.getOperand(1).getReg(), RegState)
+ .addReg(MI.getOperand(1).getReg(), RegState);
auto I2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1f5ff4e..3c06c6a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8537,7 +8537,7 @@ static void analyzeCallOperands(const AArch64TargetLowering &TLI,
if (IsCalleeWin64) {
UseVarArgCC = true;
} else {
- UseVarArgCC = !Outs[i].IsFixed;
+ UseVarArgCC = ArgFlags.isVarArg();
}
}
@@ -8982,7 +8982,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
unsigned NumArgs = Outs.size();
for (unsigned i = 0; i != NumArgs; ++i) {
- if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
+ if (Outs[i].Flags.isVarArg() && Outs[i].VT.isScalableVector())
report_fatal_error("Passing SVE types to variadic functions is "
"currently not supported");
}
@@ -14742,6 +14742,106 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
return ResultSLI;
}
+static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64TargetLowering &TLI) {
+ EVT VT = N->getValueType(0);
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
+
+ if (!VT.isVector())
+ return SDValue();
+
+ if (VT.isScalableVector() && !Subtarget.hasSVE2())
+ return SDValue();
+
+ if (VT.isFixedLengthVector() &&
+ (!Subtarget.isNeonAvailable() || TLI.useSVEForFixedLengthVectorVT(VT)))
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+ if (N0.getOpcode() != ISD::AND)
+ return SDValue();
+
+ SDValue N1 = N->getOperand(1);
+ if (N1.getOpcode() != ISD::AND)
+ return SDValue();
+
+ // InstCombine does (not (neg a)) => (add a -1).
+ // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
+ // Loop over all combinations of AND operands.
+ for (int i = 1; i >= 0; --i) {
+ for (int j = 1; j >= 0; --j) {
+ SDValue O0 = N0->getOperand(i);
+ SDValue O1 = N1->getOperand(j);
+ SDValue Sub, Add, SubSibling, AddSibling;
+
+ // Find a SUB and an ADD operand, one from each AND.
+ if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
+ Sub = O0;
+ Add = O1;
+ SubSibling = N0->getOperand(1 - i);
+ AddSibling = N1->getOperand(1 - j);
+ } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
+ Add = O0;
+ Sub = O1;
+ AddSibling = N0->getOperand(1 - i);
+ SubSibling = N1->getOperand(1 - j);
+ } else
+ continue;
+
+ if (!ISD::isConstantSplatVectorAllZeros(Sub.getOperand(0).getNode()))
+ continue;
+
+ // Constant ones is always righthand operand of the Add.
+ if (!ISD::isConstantSplatVectorAllOnes(Add.getOperand(1).getNode()))
+ continue;
+
+ if (Sub.getOperand(1) != Add.getOperand(0))
+ continue;
+
+ return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
+ }
+ }
+
+ // (or (and a b) (and (not a) c)) => (bsl a b c)
+ // We only have to look for constant vectors here since the general, variable
+ // case can be handled in TableGen.
+ unsigned Bits = VT.getScalarSizeInBits();
+ uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
+ for (int i = 1; i >= 0; --i)
+ for (int j = 1; j >= 0; --j) {
+ APInt Val1, Val2;
+
+ if (ISD::isConstantSplatVector(N0->getOperand(i).getNode(), Val1) &&
+ ISD::isConstantSplatVector(N1->getOperand(j).getNode(), Val2) &&
+ (BitMask & ~Val1.getZExtValue()) == Val2.getZExtValue()) {
+ return DAG.getNode(AArch64ISD::BSP, DL, VT, N0->getOperand(i),
+ N0->getOperand(1 - i), N1->getOperand(1 - j));
+ }
+ BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
+ BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
+ if (!BVN0 || !BVN1)
+ continue;
+
+ bool FoundMatch = true;
+ for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
+ ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
+ ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
+ if (!CN0 || !CN1 ||
+ CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
+ FoundMatch = false;
+ break;
+ }
+ }
+ if (FoundMatch)
+ return DAG.getNode(AArch64ISD::BSP, DL, VT, N0->getOperand(i),
+ N0->getOperand(1 - i), N1->getOperand(1 - j));
+ }
+
+ return SDValue();
+}
+
SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
SelectionDAG &DAG) const {
if (useSVEForFixedLengthVectorVT(Op.getValueType(),
@@ -19419,106 +19519,6 @@ static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
return FixConv;
}
-static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- const AArch64TargetLowering &TLI) {
- EVT VT = N->getValueType(0);
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
-
- if (!VT.isVector())
- return SDValue();
-
- if (VT.isScalableVector() && !Subtarget.hasSVE2())
- return SDValue();
-
- if (VT.isFixedLengthVector() &&
- (!Subtarget.isNeonAvailable() || TLI.useSVEForFixedLengthVectorVT(VT)))
- return SDValue();
-
- SDValue N0 = N->getOperand(0);
- if (N0.getOpcode() != ISD::AND)
- return SDValue();
-
- SDValue N1 = N->getOperand(1);
- if (N1.getOpcode() != ISD::AND)
- return SDValue();
-
- // InstCombine does (not (neg a)) => (add a -1).
- // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
- // Loop over all combinations of AND operands.
- for (int i = 1; i >= 0; --i) {
- for (int j = 1; j >= 0; --j) {
- SDValue O0 = N0->getOperand(i);
- SDValue O1 = N1->getOperand(j);
- SDValue Sub, Add, SubSibling, AddSibling;
-
- // Find a SUB and an ADD operand, one from each AND.
- if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
- Sub = O0;
- Add = O1;
- SubSibling = N0->getOperand(1 - i);
- AddSibling = N1->getOperand(1 - j);
- } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
- Add = O0;
- Sub = O1;
- AddSibling = N0->getOperand(1 - i);
- SubSibling = N1->getOperand(1 - j);
- } else
- continue;
-
- if (!ISD::isConstantSplatVectorAllZeros(Sub.getOperand(0).getNode()))
- continue;
-
- // Constant ones is always righthand operand of the Add.
- if (!ISD::isConstantSplatVectorAllOnes(Add.getOperand(1).getNode()))
- continue;
-
- if (Sub.getOperand(1) != Add.getOperand(0))
- continue;
-
- return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
- }
- }
-
- // (or (and a b) (and (not a) c)) => (bsl a b c)
- // We only have to look for constant vectors here since the general, variable
- // case can be handled in TableGen.
- unsigned Bits = VT.getScalarSizeInBits();
- uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
- for (int i = 1; i >= 0; --i)
- for (int j = 1; j >= 0; --j) {
- APInt Val1, Val2;
-
- if (ISD::isConstantSplatVector(N0->getOperand(i).getNode(), Val1) &&
- ISD::isConstantSplatVector(N1->getOperand(j).getNode(), Val2) &&
- (BitMask & ~Val1.getZExtValue()) == Val2.getZExtValue()) {
- return DAG.getNode(AArch64ISD::BSP, DL, VT, N0->getOperand(i),
- N0->getOperand(1 - i), N1->getOperand(1 - j));
- }
- BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
- BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
- if (!BVN0 || !BVN1)
- continue;
-
- bool FoundMatch = true;
- for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
- ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
- ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
- if (!CN0 || !CN1 ||
- CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
- FoundMatch = false;
- break;
- }
- }
- if (FoundMatch)
- return DAG.getNode(AArch64ISD::BSP, DL, VT, N0->getOperand(i),
- N0->getOperand(1 - i), N1->getOperand(1 - j));
- }
-
- return SDValue();
-}
-
// Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
// convert to csel(ccmp(.., cc0)), depending on cc1:
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index d068a12..b033f88 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -7362,7 +7362,9 @@ multiclass SIMDDifferentThreeVectorBD<bit U, bits<4> opc, string asm,
[(set (v8i16 V128:$Rd), (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
def v16i8 : BaseSIMDDifferentThreeVector<U, 0b001, opc,
V128, V128, V128,
- asm#"2", ".8h", ".16b", ".16b", []>;
+ asm#"2", ".8h", ".16b", ".16b",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i8 (extract_high_v16i8 (v16i8 V128:$Rn))),
+ (v8i8 (extract_high_v16i8 (v16i8 V128:$Rm)))))]>;
let Predicates = [HasAES] in {
def v1i64 : BaseSIMDDifferentThreeVector<U, 0b110, opc,
V128, V64, V64,
@@ -7374,10 +7376,6 @@ multiclass SIMDDifferentThreeVectorBD<bit U, bits<4> opc, string asm,
[(set (v16i8 V128:$Rd), (OpNode (extract_high_v2i64 (v2i64 V128:$Rn)),
(extract_high_v2i64 (v2i64 V128:$Rm))))]>;
}
-
- def : Pat<(v8i16 (OpNode (v8i8 (extract_high_v16i8 (v16i8 V128:$Rn))),
- (v8i8 (extract_high_v16i8 (v16i8 V128:$Rm))))),
- (!cast<Instruction>(NAME#"v16i8") V128:$Rn, V128:$Rm)>;
}
multiclass SIMDLongThreeVectorHS<bit U, bits<4> opc, string asm,
@@ -7402,6 +7400,7 @@ multiclass SIMDLongThreeVectorHS<bit U, bits<4> opc, string asm,
(extract_high_v4i32 (v4i32 V128:$Rm))))]>;
}
+let isCommutable = 1 in
multiclass SIMDLongThreeVectorBHSabdl<bit U, bits<4> opc, string asm,
SDPatternOperator OpNode = null_frag> {
def v8i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
@@ -7483,6 +7482,7 @@ multiclass SIMDLongThreeVectorTiedBHSabal<bit U, bits<4> opc,
(extract_high_v4i32 (v4i32 V128:$Rm)))))))]>;
}
+let isCommutable = 1 in
multiclass SIMDLongThreeVectorBHS<bit U, bits<4> opc, string asm,
SDPatternOperator OpNode = null_frag> {
def v8i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ac31236..8cfbff9 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6055,6 +6055,7 @@ defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
+let isCommutable = 1 in
defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
TriOpFrag<(add node:$LHS, (abds node:$MHS, node:$RHS))> >;
@@ -6806,6 +6807,7 @@ defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>
defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
+let isCommutable = 1 in
defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull", AArch64pmull>;
defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal", abds>;
defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl", abds>;
@@ -6822,6 +6824,7 @@ defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal", saddsat>;
defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl", ssubsat>;
defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
int_aarch64_neon_sqdmull>;
+let isCommutable = 0 in
defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
@@ -6836,6 +6839,7 @@ defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", AArch64umull>;
+let isCommutable = 0 in
defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index b97d622..fd4ef2a 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -8,8 +8,8 @@
//
// This pass performs below peephole optimizations on MIR level.
//
-// 1. MOVi32imm + ANDS?Wrr ==> ANDWri + ANDS?Wri
-// MOVi64imm + ANDS?Xrr ==> ANDXri + ANDS?Xri
+// 1. MOVi32imm + (ANDS?|EOR|ORR)Wrr ==> (AND|EOR|ORR)Wri + (ANDS?|EOR|ORR)Wri
+// MOVi64imm + (ANDS?|EOR|ORR)Xrr ==> (AND|EOR|ORR)Xri + (ANDS?|EOR|ORR)Xri
//
// 2. MOVi32imm + ADDWrr ==> ADDWRi + ADDWRi
// MOVi64imm + ADDXrr ==> ADDXri + ADDXri
@@ -128,6 +128,7 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass {
// Strategy used to split logical immediate bitmasks.
enum class SplitStrategy {
Intersect,
+ Disjoint,
};
template <typename T>
bool trySplitLogicalImm(unsigned Opc, MachineInstr &MI,
@@ -163,6 +164,7 @@ INITIALIZE_PASS(AArch64MIPeepholeOpt, "aarch64-mi-peephole-opt",
template <typename T>
static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
T UImm = static_cast<T>(Imm);
+ assert(UImm && (UImm != ~static_cast<T>(0)) && "Invalid immediate!");
// The bitmask immediate consists of consecutive ones. Let's say there is
// constant 0b00000000001000000000010000000000 which does not consist of
@@ -191,18 +193,47 @@ static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
}
template <typename T>
+static bool splitDisjointBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc,
+ T &Imm2Enc) {
+ assert(Imm && (Imm != ~static_cast<T>(0)) && "Invalid immediate!");
+
+ // Try to split a bitmask of the form 0b00000000011000000000011110000000 into
+ // two disjoint masks such as 0b00000000011000000000000000000000 and
+ // 0b00000000000000000000011110000000 where the inclusive/exclusive OR of the
+ // new masks match the original mask.
+ unsigned LowestBitSet = llvm::countr_zero(Imm);
+ unsigned LowestGapBitUnset =
+ LowestBitSet + llvm::countr_one(Imm >> LowestBitSet);
+
+ // Create a mask for the least significant group of consecutive ones.
+ assert(LowestGapBitUnset < sizeof(T) * CHAR_BIT && "Undefined behaviour!");
+ T NewImm1 = (static_cast<T>(1) << LowestGapBitUnset) -
+ (static_cast<T>(1) << LowestBitSet);
+ // Create a disjoint mask for the remaining ones.
+ T NewImm2 = Imm & ~NewImm1;
+
+ // Do not split if NewImm2 is not a valid bitmask immediate.
+ if (!AArch64_AM::isLogicalImmediate(NewImm2, RegSize))
+ return false;
+
+ Imm1Enc = AArch64_AM::encodeLogicalImmediate(NewImm1, RegSize);
+ Imm2Enc = AArch64_AM::encodeLogicalImmediate(NewImm2, RegSize);
+ return true;
+}
+
+template <typename T>
bool AArch64MIPeepholeOpt::trySplitLogicalImm(unsigned Opc, MachineInstr &MI,
SplitStrategy Strategy,
unsigned OtherOpc) {
- // Try below transformation.
+ // Try below transformations.
//
- // MOVi32imm + ANDS?Wrr ==> ANDWri + ANDS?Wri
- // MOVi64imm + ANDS?Xrr ==> ANDXri + ANDS?Xri
+ // MOVi32imm + (ANDS?|EOR|ORR)Wrr ==> (AND|EOR|ORR)Wri + (ANDS?|EOR|ORR)Wri
+ // MOVi64imm + (ANDS?|EOR|ORR)Xrr ==> (AND|EOR|ORR)Xri + (ANDS?|EOR|ORR)Xri
//
// The mov pseudo instruction could be expanded to multiple mov instructions
// later. Let's try to split the constant operand of mov instruction into two
- // bitmask immediates. It makes only two AND instructions instead of multiple
- // mov + and instructions.
+ // bitmask immediates based on the given split strategy. It makes only two
+ // logical instructions instead of multiple mov + logic instructions.
return splitTwoPartImm<T>(
MI,
@@ -224,6 +255,9 @@ bool AArch64MIPeepholeOpt::trySplitLogicalImm(unsigned Opc, MachineInstr &MI,
case SplitStrategy::Intersect:
SplitSucc = splitBitmaskImm(Imm, RegSize, Imm0, Imm1);
break;
+ case SplitStrategy::Disjoint:
+ SplitSucc = splitDisjointBitmaskImm(Imm, RegSize, Imm0, Imm1);
+ break;
}
if (SplitSucc)
return std::make_pair(Opc, !OtherOpc ? Opc : OtherOpc);
@@ -889,6 +923,22 @@ bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
Changed |= trySplitLogicalImm<uint64_t>(
AArch64::ANDXri, MI, SplitStrategy::Intersect, AArch64::ANDSXri);
break;
+ case AArch64::EORWrr:
+ Changed |= trySplitLogicalImm<uint32_t>(AArch64::EORWri, MI,
+ SplitStrategy::Disjoint);
+ break;
+ case AArch64::EORXrr:
+ Changed |= trySplitLogicalImm<uint64_t>(AArch64::EORXri, MI,
+ SplitStrategy::Disjoint);
+ break;
+ case AArch64::ORRWrr:
+ Changed |= trySplitLogicalImm<uint32_t>(AArch64::ORRWri, MI,
+ SplitStrategy::Disjoint);
+ break;
+ case AArch64::ORRXrr:
+ Changed |= trySplitLogicalImm<uint64_t>(AArch64::ORRXri, MI,
+ SplitStrategy::Disjoint);
+ break;
case AArch64::ORRWrs:
Changed |= visitORR(MI);
break;
diff --git a/llvm/lib/Target/AArch64/AArch64Processors.td b/llvm/lib/Target/AArch64/AArch64Processors.td
index adc984a..1bc1d98 100644
--- a/llvm/lib/Target/AArch64/AArch64Processors.td
+++ b/llvm/lib/Target/AArch64/AArch64Processors.td
@@ -22,7 +22,8 @@ def TuneA320 : SubtargetFeature<"a320", "ARMProcFamily", "CortexA320",
FeatureFuseAES,
FeatureFuseAdrpAdd,
FeaturePostRAScheduler,
- FeatureUseWzrToVecMove]>;
+ FeatureUseWzrToVecMove,
+ FeatureUseFixedOverScalableIfEqualCost]>;
def TuneA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
"Cortex-A53 ARM processors", [
@@ -45,7 +46,8 @@ def TuneA510 : SubtargetFeature<"a510", "ARMProcFamily", "CortexA510",
FeatureFuseAES,
FeatureFuseAdrpAdd,
FeaturePostRAScheduler,
- FeatureUseWzrToVecMove
+ FeatureUseWzrToVecMove,
+ FeatureUseFixedOverScalableIfEqualCost
]>;
def TuneA520 : SubtargetFeature<"a520", "ARMProcFamily", "CortexA520",
@@ -53,7 +55,8 @@ def TuneA520 : SubtargetFeature<"a520", "ARMProcFamily", "CortexA520",
FeatureFuseAES,
FeatureFuseAdrpAdd,
FeaturePostRAScheduler,
- FeatureUseWzrToVecMove]>;
+ FeatureUseWzrToVecMove,
+ FeatureUseFixedOverScalableIfEqualCost]>;
def TuneA520AE : SubtargetFeature<"a520ae", "ARMProcFamily", "CortexA520",
"Cortex-A520AE ARM processors", [
@@ -756,7 +759,6 @@ def ProcessorFeatures {
FeatureSB, FeaturePAuth, FeatureSSBS, FeatureSVE, FeatureSVE2,
FeatureComplxNum, FeatureCRC, FeatureDotProd,
FeatureFPARMv8,FeatureFullFP16, FeatureJS, FeatureLSE,
- FeatureUseFixedOverScalableIfEqualCost,
FeatureRAS, FeatureRCPC, FeatureRDM, FeatureFPAC];
list<SubtargetFeature> A520 = [HasV9_2aOps, FeaturePerfMon, FeatureAM,
FeatureMTE, FeatureETE, FeatureSVEBitPerm,
@@ -766,7 +768,6 @@ def ProcessorFeatures {
FeatureSVE, FeatureSVE2, FeatureBF16, FeatureComplxNum, FeatureCRC,
FeatureFPARMv8, FeatureFullFP16, FeatureMatMulInt8, FeatureJS,
FeatureNEON, FeatureLSE, FeatureRAS, FeatureRCPC, FeatureRDM,
- FeatureUseFixedOverScalableIfEqualCost,
FeatureDotProd, FeatureFPAC];
list<SubtargetFeature> A520AE = [HasV9_2aOps, FeaturePerfMon, FeatureAM,
FeatureMTE, FeatureETE, FeatureSVEBitPerm,
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index f136a184..a67bd42 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -585,8 +585,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
ClMaxLifetimes);
if (StandardLifetime) {
IntrinsicInst *Start = Info.LifetimeStart[0];
- uint64_t Size =
- cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
+ uint64_t Size = *Info.AI->getAllocationSize(*DL);
Size = alignTo(Size, kTagGranuleSize);
tagAlloca(AI, Start->getNextNode(), TagPCall, Size);
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 9f05add..5c94aeb 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -554,7 +554,17 @@ static bool isUnpackedVectorVT(EVT VecVT) {
VecVT.getSizeInBits().getKnownMinValue() < AArch64::SVEBitsPerBlock;
}
-static InstructionCost getHistogramCost(const IntrinsicCostAttributes &ICA) {
+static InstructionCost getHistogramCost(const AArch64Subtarget *ST,
+ const IntrinsicCostAttributes &ICA) {
+ // We need to know at least the number of elements in the vector of buckets
+ // and the size of each element to update.
+ if (ICA.getArgTypes().size() < 2)
+ return InstructionCost::getInvalid();
+
+ // Only interested in costing for the hardware instruction from SVE2.
+ if (!ST->hasSVE2())
+ return InstructionCost::getInvalid();
+
Type *BucketPtrsTy = ICA.getArgTypes()[0]; // Type of vector of pointers
Type *EltTy = ICA.getArgTypes()[1]; // Type of bucket elements
unsigned TotalHistCnts = 1;
@@ -579,9 +589,11 @@ static InstructionCost getHistogramCost(const IntrinsicCostAttributes &ICA) {
unsigned NaturalVectorWidth = AArch64::SVEBitsPerBlock / LegalEltSize;
TotalHistCnts = EC / NaturalVectorWidth;
+
+ return InstructionCost(BaseHistCntCost * TotalHistCnts);
}
- return InstructionCost(BaseHistCntCost * TotalHistCnts);
+ return InstructionCost::getInvalid();
}
InstructionCost
@@ -597,10 +609,13 @@ AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
return InstructionCost::getInvalid();
switch (ICA.getID()) {
- case Intrinsic::experimental_vector_histogram_add:
- if (!ST->hasSVE2())
- return InstructionCost::getInvalid();
- return getHistogramCost(ICA);
+ case Intrinsic::experimental_vector_histogram_add: {
+ InstructionCost HistCost = getHistogramCost(ST, ICA);
+ // If the cost isn't valid, we may still be able to scalarize
+ if (HistCost.isValid())
+ return HistCost;
+ break;
+ }
case Intrinsic::umin:
case Intrinsic::umax:
case Intrinsic::smin:
@@ -3975,6 +3990,27 @@ InstructionCost AArch64TTIImpl::getScalarizationOverhead(
return DemandedElts.popcount() * (Insert + Extract) * VecInstCost;
}
+std::optional<InstructionCost> AArch64TTIImpl::getFP16BF16PromoteCost(
+ Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
+ TTI::OperandValueInfo Op2Info, bool IncludeTrunc,
+ std::function<InstructionCost(Type *)> InstCost) const {
+ if (!Ty->getScalarType()->isHalfTy() && !Ty->getScalarType()->isBFloatTy())
+ return std::nullopt;
+ if (Ty->getScalarType()->isHalfTy() && ST->hasFullFP16())
+ return std::nullopt;
+
+ Type *PromotedTy = Ty->getWithNewType(Type::getFloatTy(Ty->getContext()));
+ InstructionCost Cost = getCastInstrCost(Instruction::FPExt, PromotedTy, Ty,
+ TTI::CastContextHint::None, CostKind);
+ if (!Op1Info.isConstant() && !Op2Info.isConstant())
+ Cost *= 2;
+ Cost += InstCost(PromotedTy);
+ if (IncludeTrunc)
+ Cost += getCastInstrCost(Instruction::FPTrunc, Ty, PromotedTy,
+ TTI::CastContextHint::None, CostKind);
+ return Cost;
+}
+
InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
@@ -3997,6 +4033,18 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ // Increase the cost for half and bfloat types if not architecturally
+ // supported.
+ if (ISD == ISD::FADD || ISD == ISD::FSUB || ISD == ISD::FMUL ||
+ ISD == ISD::FDIV || ISD == ISD::FREM)
+ if (auto PromotedCost = getFP16BF16PromoteCost(
+ Ty, CostKind, Op1Info, Op2Info, /*IncludeTrunc=*/true,
+ [&](Type *PromotedTy) {
+ return getArithmeticInstrCost(Opcode, PromotedTy, CostKind,
+ Op1Info, Op2Info);
+ }))
+ return *PromotedCost;
+
switch (ISD) {
default:
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
@@ -4265,11 +4313,6 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
[[fallthrough]];
case ISD::FADD:
case ISD::FSUB:
- // Increase the cost for half and bfloat types if not architecturally
- // supported.
- if ((Ty->getScalarType()->isHalfTy() && !ST->hasFullFP16()) ||
- (Ty->getScalarType()->isBFloatTy() && !ST->hasBF16()))
- return 2 * LT.first;
if (!Ty->getScalarType()->isFP128Ty())
return LT.first;
[[fallthrough]];
@@ -4371,25 +4414,21 @@ InstructionCost AArch64TTIImpl::getCmpSelInstrCost(
}
if (Opcode == Instruction::FCmp) {
- // Without dedicated instructions we promote f16 + bf16 compares to f32.
- if ((!ST->hasFullFP16() && ValTy->getScalarType()->isHalfTy()) ||
- ValTy->getScalarType()->isBFloatTy()) {
- Type *PromotedTy =
- ValTy->getWithNewType(Type::getFloatTy(ValTy->getContext()));
- InstructionCost Cost =
- getCastInstrCost(Instruction::FPExt, PromotedTy, ValTy,
- TTI::CastContextHint::None, CostKind);
- if (!Op1Info.isConstant() && !Op2Info.isConstant())
- Cost *= 2;
- Cost += getCmpSelInstrCost(Opcode, PromotedTy, CondTy, VecPred, CostKind,
- Op1Info, Op2Info);
- if (ValTy->isVectorTy())
- Cost += getCastInstrCost(
- Instruction::Trunc, VectorType::getInteger(cast<VectorType>(ValTy)),
- VectorType::getInteger(cast<VectorType>(PromotedTy)),
- TTI::CastContextHint::None, CostKind);
- return Cost;
- }
+ if (auto PromotedCost = getFP16BF16PromoteCost(
+ ValTy, CostKind, Op1Info, Op2Info, /*IncludeTrunc=*/false,
+ [&](Type *PromotedTy) {
+ InstructionCost Cost =
+ getCmpSelInstrCost(Opcode, PromotedTy, CondTy, VecPred,
+ CostKind, Op1Info, Op2Info);
+ if (isa<VectorType>(PromotedTy))
+ Cost += getCastInstrCost(
+ Instruction::Trunc,
+ VectorType::getInteger(cast<VectorType>(ValTy)),
+ VectorType::getInteger(cast<VectorType>(PromotedTy)),
+ TTI::CastContextHint::None, CostKind);
+ return Cost;
+ }))
+ return *PromotedCost;
auto LT = getTypeLegalizationCost(ValTy);
// Model unknown fp compares as a libcall.
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 7f45177..fa9b25a 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -435,6 +435,14 @@ public:
bool preferPredicatedReductionSelect() const override { return ST->hasSVE(); }
+ /// FP16 and BF16 operations are lowered to fptrunc(op(fpext, fpext) if the
+ /// architecture features are not present.
+ std::optional<InstructionCost>
+ getFP16BF16PromoteCost(Type *Ty, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo Op1Info,
+ TTI::OperandValueInfo Op2Info, bool IncludeTrunc,
+ std::function<InstructionCost(Type *)> InstCost) const;
+
InstructionCost
getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
std::optional<FastMathFlags> FMF,
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index 010d0aaa..2155ace 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -125,7 +125,7 @@ struct AArch64OutgoingValueAssigner
bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
bool Res;
- if (Info.IsFixed && !UseVarArgsCCForFixed) {
+ if (!Flags.isVarArg() && !UseVarArgsCCForFixed) {
if (!IsReturn)
applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
@@ -361,7 +361,7 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
unsigned MaxSize = MemTy.getSizeInBytes() * 8;
// For varargs, we always want to extend them to 8 bytes, in which case
// we disable setting a max.
- if (!Arg.IsFixed)
+ if (Arg.Flags[0].isVarArg())
MaxSize = 0;
Register ValVReg = Arg.Regs[RegIndex];
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 2a324e5..626734a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -997,89 +997,24 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
const Function &F = MF.getFunction();
// Ensure there are enough SGPRs and VGPRs for wave dispatch, where wave
- // dispatch registers are function args.
- unsigned WaveDispatchNumSGPR = 0, WaveDispatchNumVGPR = 0;
-
- if (isShader(F.getCallingConv())) {
- bool IsPixelShader =
- F.getCallingConv() == CallingConv::AMDGPU_PS && !STM.isAmdHsaOS();
-
- // Calculate the number of VGPR registers based on the SPI input registers
- uint32_t InputEna = 0;
- uint32_t InputAddr = 0;
- unsigned LastEna = 0;
-
- if (IsPixelShader) {
- // Note for IsPixelShader:
- // By this stage, all enabled inputs are tagged in InputAddr as well.
- // We will use InputAddr to determine whether the input counts against the
- // vgpr total and only use the InputEnable to determine the last input
- // that is relevant - if extra arguments are used, then we have to honour
- // the InputAddr for any intermediate non-enabled inputs.
- InputEna = MFI->getPSInputEnable();
- InputAddr = MFI->getPSInputAddr();
-
- // We only need to consider input args up to the last used arg.
- assert((InputEna || InputAddr) &&
- "PSInputAddr and PSInputEnable should "
- "never both be 0 for AMDGPU_PS shaders");
- // There are some rare circumstances where InputAddr is non-zero and
- // InputEna can be set to 0. In this case we default to setting LastEna
- // to 1.
- LastEna = InputEna ? llvm::Log2_32(InputEna) + 1 : 1;
- }
+ // dispatch registers as function args.
+ unsigned WaveDispatchNumSGPR = MFI->getNumWaveDispatchSGPRs(),
+ WaveDispatchNumVGPR = MFI->getNumWaveDispatchVGPRs();
- // FIXME: We should be using the number of registers determined during
- // calling convention lowering to legalize the types.
- const DataLayout &DL = F.getDataLayout();
- unsigned PSArgCount = 0;
- unsigned IntermediateVGPR = 0;
- for (auto &Arg : F.args()) {
- unsigned NumRegs = (DL.getTypeSizeInBits(Arg.getType()) + 31) / 32;
- if (Arg.hasAttribute(Attribute::InReg)) {
- WaveDispatchNumSGPR += NumRegs;
- } else {
- // If this is a PS shader and we're processing the PS Input args (first
- // 16 VGPR), use the InputEna and InputAddr bits to define how many
- // VGPRs are actually used.
- // Any extra VGPR arguments are handled as normal arguments (and
- // contribute to the VGPR count whether they're used or not).
- if (IsPixelShader && PSArgCount < 16) {
- if ((1 << PSArgCount) & InputAddr) {
- if (PSArgCount < LastEna)
- WaveDispatchNumVGPR += NumRegs;
- else
- IntermediateVGPR += NumRegs;
- }
- PSArgCount++;
- } else {
- // If there are extra arguments we have to include the allocation for
- // the non-used (but enabled with InputAddr) input arguments
- if (IntermediateVGPR) {
- WaveDispatchNumVGPR += IntermediateVGPR;
- IntermediateVGPR = 0;
- }
- WaveDispatchNumVGPR += NumRegs;
- }
- }
- }
+ if (WaveDispatchNumSGPR) {
ProgInfo.NumSGPR = AMDGPUMCExpr::createMax(
- {ProgInfo.NumSGPR, CreateExpr(WaveDispatchNumSGPR)}, Ctx);
+ {ProgInfo.NumSGPR,
+ MCBinaryExpr::createAdd(CreateExpr(WaveDispatchNumSGPR), ExtraSGPRs,
+ Ctx)},
+ Ctx);
+ }
+ if (WaveDispatchNumVGPR) {
ProgInfo.NumArchVGPR = AMDGPUMCExpr::createMax(
{ProgInfo.NumVGPR, CreateExpr(WaveDispatchNumVGPR)}, Ctx);
ProgInfo.NumVGPR = AMDGPUMCExpr::createTotalNumVGPR(
ProgInfo.NumAccVGPR, ProgInfo.NumArchVGPR, Ctx);
- } else if (isKernel(F.getCallingConv()) &&
- MFI->getNumKernargPreloadedSGPRs()) {
- // Consider cases where the total number of UserSGPRs with trailing
- // allocated preload SGPRs, is greater than the number of explicitly
- // referenced SGPRs.
- const MCExpr *UserPlusExtraSGPRs = MCBinaryExpr::createAdd(
- CreateExpr(MFI->getNumUserSGPRs()), ExtraSGPRs, Ctx);
- ProgInfo.NumSGPR =
- AMDGPUMCExpr::createMax({ProgInfo.NumSGPR, UserPlusExtraSGPRs}, Ctx);
}
// Adjust number of registers used to meet default/requested minimum/maximum
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 3d8d274..64a9bde 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -580,6 +580,9 @@ bool AMDGPUCallLowering::lowerFormalArgumentsKernel(
++i;
}
+ if (Info->getNumKernargPreloadedSGPRs())
+ Info->setNumWaveDispatchSGPRs(Info->getNumUserSGPRs());
+
TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
return true;
@@ -743,6 +746,15 @@ bool AMDGPUCallLowering::lowerFormalArguments(
if (!determineAssignments(Assigner, SplitArgs, CCInfo))
return false;
+ if (IsEntryFunc) {
+ // This assumes the registers are allocated by CCInfo in ascending order
+ // with no gaps.
+ Info->setNumWaveDispatchSGPRs(
+ CCInfo.getFirstUnallocated(AMDGPU::SGPR_32RegClass.getRegisters()));
+ Info->setNumWaveDispatchVGPRs(
+ CCInfo.getFirstUnallocated(AMDGPU::VGPR_32RegClass.getRegisters()));
+ }
+
FormalArgHandler Handler(B, MRI);
if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B))
return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index fb83388..9d6584a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -3212,6 +3212,44 @@ bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
Src = Src.getOperand(0);
}
+ if (Mods != SISrcMods::NONE)
+ return true;
+
+ // Convert various sign-bit masks on integers to src mods. Currently disabled
+ // for 16-bit types as the codegen replaces the operand without adding a
+ // srcmod. This is intentionally finding the cases where we are performing
+ // float neg and abs on int types, the goal is not to obtain two's complement
+ // neg or abs. Limit converison to select operands via the nonCanonalizing
+ // pattern.
+ // TODO: Add 16-bit support.
+ if (IsCanonicalizing)
+ return true;
+
+ unsigned Opc = Src->getOpcode();
+ EVT VT = Src.getValueType();
+ if ((Opc != ISD::AND && Opc != ISD::OR && Opc != ISD::XOR) ||
+ (VT != MVT::i32 && VT != MVT::i64))
+ return true;
+
+ ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Src->getOperand(1));
+ if (!CRHS)
+ return true;
+
+ // Recognise (xor a, 0x80000000) as NEG SrcMod.
+ // Recognise (and a, 0x7fffffff) as ABS SrcMod.
+ // Recognise (or a, 0x80000000) as NEG+ABS SrcModifiers.
+ if (Opc == ISD::XOR && CRHS->getAPIntValue().isSignMask()) {
+ Mods |= SISrcMods::NEG;
+ Src = Src.getOperand(0);
+ } else if (Opc == ISD::AND && AllowAbs &&
+ CRHS->getAPIntValue().isMaxSignedValue()) {
+ Mods |= SISrcMods::ABS;
+ Src = Src.getOperand(0);
+ } else if (Opc == ISD::OR && AllowAbs && CRHS->getAPIntValue().isSignMask()) {
+ Mods |= SISrcMods::ABS | SISrcMods::NEG;
+ Src = Src.getOperand(0);
+ }
+
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a6e4a63..40d960e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -5879,8 +5879,12 @@ AMDGPULegalizerInfo::splitBufferOffsets(MachineIRBuilder &B,
const LLT S32 = LLT::scalar(32);
MachineRegisterInfo &MRI = *B.getMRI();
- std::tie(BaseReg, ImmOffset) =
- AMDGPU::getBaseWithConstantOffset(MRI, OrigOffset);
+ // On GFX1250+, voffset and immoffset are zero-extended from 32 bits before
+ // being added, so we can only safely match a 32-bit addition with no unsigned
+ // overflow.
+ bool CheckNUW = AMDGPU::isGFX1250(ST);
+ std::tie(BaseReg, ImmOffset) = AMDGPU::getBaseWithConstantOffset(
+ MRI, OrigOffset, /*KnownBits=*/nullptr, CheckNUW);
// If BaseReg is a pointer, convert it to int.
if (MRI.getType(BaseReg).isPointer())
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index 304e91e..4398ef7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1361,6 +1361,7 @@ public:
PtrParts visitAtomicCmpXchgInst(AtomicCmpXchgInst &AI);
PtrParts visitGetElementPtrInst(GetElementPtrInst &GEP);
+ PtrParts visitPtrToAddrInst(PtrToAddrInst &PA);
PtrParts visitPtrToIntInst(PtrToIntInst &PI);
PtrParts visitIntToPtrInst(IntToPtrInst &IP);
PtrParts visitAddrSpaceCastInst(AddrSpaceCastInst &I);
@@ -1954,6 +1955,21 @@ PtrParts SplitPtrStructs::visitPtrToIntInst(PtrToIntInst &PI) {
return {nullptr, nullptr};
}
+PtrParts SplitPtrStructs::visitPtrToAddrInst(PtrToAddrInst &PA) {
+ Value *Ptr = PA.getPointerOperand();
+ if (!isSplitFatPtr(Ptr->getType()))
+ return {nullptr, nullptr};
+ IRB.SetInsertPoint(&PA);
+
+ auto [Rsrc, Off] = getPtrParts(Ptr);
+ Value *Res = IRB.CreateIntCast(Off, PA.getType(), /*isSigned=*/false);
+ copyMetadata(Res, &PA);
+ Res->takeName(&PA);
+ SplitUsers.insert(&PA);
+ PA.replaceAllUsesWith(Res);
+ return {nullptr, nullptr};
+}
+
PtrParts SplitPtrStructs::visitIntToPtrInst(IntToPtrInst &IP) {
if (!isSplitFatPtr(IP.getType()))
return {nullptr, nullptr};
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteAGPRCopyMFMA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteAGPRCopyMFMA.cpp
index f580f43..c21a9a1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURewriteAGPRCopyMFMA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteAGPRCopyMFMA.cpp
@@ -109,12 +109,17 @@ bool AMDGPURewriteAGPRCopyMFMAImpl::run(MachineFunction &MF) const {
// Find AV_* registers assigned to AGPRs.
const TargetRegisterClass *VirtRegRC = MRI.getRegClass(VReg);
- if (!TRI.isVectorSuperClass(VirtRegRC))
+ if (!TRI.hasAGPRs(VirtRegRC))
continue;
- const TargetRegisterClass *AssignedRC = TRI.getPhysRegBaseClass(PhysReg);
- if (!TRI.isAGPRClass(AssignedRC))
- continue;
+ const TargetRegisterClass *AssignedRC = VirtRegRC;
+ if (TRI.hasVGPRs(VirtRegRC)) {
+ // If this is an AV register, we have to check if the actual assignment is
+ // to an AGPR
+ AssignedRC = TRI.getPhysRegBaseClass(PhysReg);
+ if (!TRI.isAGPRClass(AssignedRC))
+ continue;
+ }
LiveInterval &LI = LIS.getInterval(VReg);
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index ff8efd2..0d2feeb 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -4933,6 +4933,43 @@ bool AMDGPUAsmParser::validateOpSel(const MCInst &Inst) {
return false;
}
+ // Packed math FP32 instructions typically accept SGPRs or VGPRs as source
+ // operands. On gfx12+, if a source operand uses SGPRs, the HW can only read
+ // the first SGPR and use it for both the low and high operations.
+ if (isPackedFP32Inst(Opc) && isGFX12Plus()) {
+ int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
+ int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
+ int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
+ int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
+
+ const MCOperand &Src0 = Inst.getOperand(Src0Idx);
+ const MCOperand &Src1 = Inst.getOperand(Src1Idx);
+ unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
+ unsigned OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
+
+ const MCRegisterInfo *TRI = getContext().getRegisterInfo();
+
+ auto VerifyOneSGPR = [OpSel, OpSelHi](unsigned Index) -> bool {
+ unsigned Mask = 1U << Index;
+ return ((OpSel & Mask) == 0) && ((OpSelHi & Mask) == 0);
+ };
+
+ if (Src0.isReg() && isSGPR(Src0.getReg(), TRI) &&
+ !VerifyOneSGPR(/*Index=*/0))
+ return false;
+ if (Src1.isReg() && isSGPR(Src1.getReg(), TRI) &&
+ !VerifyOneSGPR(/*Index=*/1))
+ return false;
+
+ int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
+ if (Src2Idx != -1) {
+ const MCOperand &Src2 = Inst.getOperand(Src2Idx);
+ if (Src2.isReg() && isSGPR(Src2.getReg(), TRI) &&
+ !VerifyOneSGPR(/*Index=*/2))
+ return false;
+ }
+ }
+
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index 334afd3..ef63acc 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -368,46 +368,45 @@ static LaneBitmask findUseBetween(unsigned Reg, LaneBitmask LastUseMask,
////////////////////////////////////////////////////////////////////////////////
// GCNRPTarget
-GCNRPTarget::GCNRPTarget(const MachineFunction &MF, const GCNRegPressure &RP,
- bool CombineVGPRSavings)
- : RP(RP), CombineVGPRSavings(CombineVGPRSavings) {
+GCNRPTarget::GCNRPTarget(const MachineFunction &MF, const GCNRegPressure &RP)
+ : GCNRPTarget(RP, MF) {
const Function &F = MF.getFunction();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- setRegLimits(ST.getMaxNumSGPRs(F), ST.getMaxNumVGPRs(F), MF);
+ setTarget(ST.getMaxNumSGPRs(F), ST.getMaxNumVGPRs(F));
}
GCNRPTarget::GCNRPTarget(unsigned NumSGPRs, unsigned NumVGPRs,
- const MachineFunction &MF, const GCNRegPressure &RP,
- bool CombineVGPRSavings)
- : RP(RP), CombineVGPRSavings(CombineVGPRSavings) {
- setRegLimits(NumSGPRs, NumVGPRs, MF);
+ const MachineFunction &MF, const GCNRegPressure &RP)
+ : GCNRPTarget(RP, MF) {
+ setTarget(NumSGPRs, NumVGPRs);
}
GCNRPTarget::GCNRPTarget(unsigned Occupancy, const MachineFunction &MF,
- const GCNRegPressure &RP, bool CombineVGPRSavings)
- : RP(RP), CombineVGPRSavings(CombineVGPRSavings) {
+ const GCNRegPressure &RP)
+ : GCNRPTarget(RP, MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
unsigned DynamicVGPRBlockSize =
MF.getInfo<SIMachineFunctionInfo>()->getDynamicVGPRBlockSize();
- setRegLimits(ST.getMaxNumSGPRs(Occupancy, /*Addressable=*/false),
- ST.getMaxNumVGPRs(Occupancy, DynamicVGPRBlockSize), MF);
+ setTarget(ST.getMaxNumSGPRs(Occupancy, /*Addressable=*/false),
+ ST.getMaxNumVGPRs(Occupancy, DynamicVGPRBlockSize));
}
-void GCNRPTarget::setRegLimits(unsigned NumSGPRs, unsigned NumVGPRs,
- const MachineFunction &MF) {
+void GCNRPTarget::setTarget(unsigned NumSGPRs, unsigned NumVGPRs) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- unsigned DynamicVGPRBlockSize =
- MF.getInfo<SIMachineFunctionInfo>()->getDynamicVGPRBlockSize();
MaxSGPRs = std::min(ST.getAddressableNumSGPRs(), NumSGPRs);
MaxVGPRs = std::min(ST.getAddressableNumArchVGPRs(), NumVGPRs);
- MaxUnifiedVGPRs =
- ST.hasGFX90AInsts()
- ? std::min(ST.getAddressableNumVGPRs(DynamicVGPRBlockSize), NumVGPRs)
- : 0;
+ if (UnifiedRF) {
+ unsigned DynamicVGPRBlockSize =
+ MF.getInfo<SIMachineFunctionInfo>()->getDynamicVGPRBlockSize();
+ MaxUnifiedVGPRs =
+ std::min(ST.getAddressableNumVGPRs(DynamicVGPRBlockSize), NumVGPRs);
+ } else {
+ MaxUnifiedVGPRs = 0;
+ }
}
-bool GCNRPTarget::isSaveBeneficial(Register Reg,
- const MachineRegisterInfo &MRI) const {
+bool GCNRPTarget::isSaveBeneficial(Register Reg) const {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo *>(TRI);
@@ -416,16 +415,19 @@ bool GCNRPTarget::isSaveBeneficial(Register Reg,
return RP.getSGPRNum() > MaxSGPRs;
unsigned NumVGPRs =
SRI->isAGPRClass(RC) ? RP.getAGPRNum() : RP.getArchVGPRNum();
- return isVGPRBankSaveBeneficial(NumVGPRs);
+ // The addressable limit must always be respected.
+ if (NumVGPRs > MaxVGPRs)
+ return true;
+ // For unified RFs, combined VGPR usage limit must be respected as well.
+ return UnifiedRF && RP.getVGPRNum(true) > MaxUnifiedVGPRs;
}
bool GCNRPTarget::satisfied() const {
- if (RP.getSGPRNum() > MaxSGPRs)
+ if (RP.getSGPRNum() > MaxSGPRs || RP.getVGPRNum(false) > MaxVGPRs)
return false;
- if (RP.getVGPRNum(false) > MaxVGPRs &&
- (!CombineVGPRSavings || !satisifiesVGPRBanksTarget()))
+ if (UnifiedRF && RP.getVGPRNum(true) > MaxUnifiedVGPRs)
return false;
- return satisfiesUnifiedTarget();
+ return true;
}
///////////////////////////////////////////////////////////////////////////////
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index ea33a22..a9c58bb 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -186,20 +186,22 @@ public:
/// Sets up the target such that the register pressure starting at \p RP does
/// not show register spilling on function \p MF (w.r.t. the function's
/// mininum target occupancy).
- GCNRPTarget(const MachineFunction &MF, const GCNRegPressure &RP,
- bool CombineVGPRSavings = false);
+ GCNRPTarget(const MachineFunction &MF, const GCNRegPressure &RP);
/// Sets up the target such that the register pressure starting at \p RP does
/// not use more than \p NumSGPRs SGPRs and \p NumVGPRs VGPRs on function \p
/// MF.
GCNRPTarget(unsigned NumSGPRs, unsigned NumVGPRs, const MachineFunction &MF,
- const GCNRegPressure &RP, bool CombineVGPRSavings = false);
+ const GCNRegPressure &RP);
/// Sets up the target such that the register pressure starting at \p RP does
/// not prevent achieving an occupancy of at least \p Occupancy on function
/// \p MF.
GCNRPTarget(unsigned Occupancy, const MachineFunction &MF,
- const GCNRegPressure &RP, bool CombineVGPRSavings = false);
+ const GCNRegPressure &RP);
+
+ /// Changes the target (same semantics as constructor).
+ void setTarget(unsigned NumSGPRs, unsigned NumVGPRs);
const GCNRegPressure &getCurrentRP() const { return RP; }
@@ -207,7 +209,7 @@ public:
/// Determines whether saving virtual register \p Reg will be beneficial
/// towards achieving the RP target.
- bool isSaveBeneficial(Register Reg, const MachineRegisterInfo &MRI) const;
+ bool isSaveBeneficial(Register Reg) const;
/// Saves virtual register \p Reg with lanemask \p Mask.
void saveReg(Register Reg, LaneBitmask Mask, const MachineRegisterInfo &MRI) {
@@ -227,15 +229,15 @@ public:
if (Target.MaxUnifiedVGPRs) {
OS << ", " << Target.RP.getVGPRNum(true) << '/' << Target.MaxUnifiedVGPRs
<< " VGPRs (unified)";
- } else if (Target.CombineVGPRSavings) {
- OS << ", " << Target.RP.getArchVGPRNum() + Target.RP.getAGPRNum() << '/'
- << 2 * Target.MaxVGPRs << " VGPRs (combined target)";
}
return OS;
}
#endif
private:
+ const MachineFunction &MF;
+ const bool UnifiedRF;
+
/// Current register pressure.
GCNRegPressure RP;
@@ -246,29 +248,10 @@ private:
/// Target number of overall VGPRs for subtargets with unified RFs. Always 0
/// for subtargets with non-unified RFs.
unsigned MaxUnifiedVGPRs;
- /// Whether we consider that the register allocator will be able to swap
- /// between ArchVGPRs and AGPRs by copying them to a super register class.
- /// Concretely, this allows savings in one of the VGPR banks to help toward
- /// savings in the other VGPR bank.
- bool CombineVGPRSavings;
-
- inline bool satisifiesVGPRBanksTarget() const {
- assert(CombineVGPRSavings && "only makes sense with combined savings");
- return RP.getArchVGPRNum() + RP.getAGPRNum() <= 2 * MaxVGPRs;
- }
-
- /// Always satisified when the subtarget doesn't have a unified RF.
- inline bool satisfiesUnifiedTarget() const {
- return !MaxUnifiedVGPRs || RP.getVGPRNum(true) <= MaxUnifiedVGPRs;
- }
-
- inline bool isVGPRBankSaveBeneficial(unsigned NumVGPRs) const {
- return NumVGPRs > MaxVGPRs || !satisfiesUnifiedTarget() ||
- (CombineVGPRSavings && !satisifiesVGPRBanksTarget());
- }
- void setRegLimits(unsigned MaxSGPRs, unsigned MaxVGPRs,
- const MachineFunction &MF);
+ GCNRPTarget(const GCNRegPressure &RP, const MachineFunction &MF)
+ : MF(MF), UnifiedRF(MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts()),
+ RP(RP) {}
};
///////////////////////////////////////////////////////////////////////////////
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 96d5668..254b75b 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -1086,7 +1086,8 @@ bool ClusteredLowOccStage::initGCNSchedStage() {
}
/// Allows to easily filter for this stage's debug output.
-#define REMAT_DEBUG(X) LLVM_DEBUG(dbgs() << "[PreRARemat] "; X;)
+#define REMAT_PREFIX "[PreRARemat] "
+#define REMAT_DEBUG(X) LLVM_DEBUG(dbgs() << REMAT_PREFIX; X;)
bool PreRARematStage::initGCNSchedStage() {
// FIXME: This pass will invalidate cached BBLiveInMap and MBBLiveIns for
@@ -1115,10 +1116,15 @@ bool PreRARematStage::initGCNSchedStage() {
rematerialize();
if (GCNTrackers)
DAG.RegionLiveOuts.buildLiveRegMap();
- REMAT_DEBUG(
- dbgs() << "Retrying function scheduling with new min. occupancy of "
- << AchievedOcc << " from rematerializing (original was "
- << DAG.MinOccupancy << ", target was " << TargetOcc << ")\n");
+ REMAT_DEBUG({
+ dbgs() << "Retrying function scheduling with new min. occupancy of "
+ << AchievedOcc << " from rematerializing (original was "
+ << DAG.MinOccupancy;
+ if (TargetOcc)
+ dbgs() << ", target was " << *TargetOcc;
+ dbgs() << ")\n";
+ });
+
if (AchievedOcc > DAG.MinOccupancy) {
DAG.MinOccupancy = AchievedOcc;
SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
@@ -1540,8 +1546,7 @@ bool ClusteredLowOccStage::shouldRevertScheduling(unsigned WavesAfter) {
bool PreRARematStage::shouldRevertScheduling(unsigned WavesAfter) {
return GCNSchedStage::shouldRevertScheduling(WavesAfter) ||
- mayCauseSpilling(WavesAfter) ||
- (IncreaseOccupancy && WavesAfter < TargetOcc);
+ mayCauseSpilling(WavesAfter) || (TargetOcc && WavesAfter < TargetOcc);
}
bool ILPInitialScheduleStage::shouldRevertScheduling(unsigned WavesAfter) {
@@ -1687,78 +1692,63 @@ bool PreRARematStage::allUsesAvailableAt(const MachineInstr *InstToRemat,
}
bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
- REMAT_DEBUG({
- dbgs() << "Collecting rematerializable instructions in ";
- MF.getFunction().printAsOperand(dbgs(), false);
- dbgs() << '\n';
- });
+ const Function &F = MF.getFunction();
// Maps optimizable regions (i.e., regions at minimum and register-limited
// occupancy, or regions with spilling) to the target RP we would like to
// reach.
DenseMap<unsigned, GCNRPTarget> OptRegions;
- const Function &F = MF.getFunction();
- unsigned DynamicVGPRBlockSize =
- MF.getInfo<SIMachineFunctionInfo>()->getDynamicVGPRBlockSize();
-
- std::pair<unsigned, unsigned> WavesPerEU = ST.getWavesPerEU(F);
- const unsigned MaxSGPRsNoSpill = ST.getMaxNumSGPRs(F);
- const unsigned MaxVGPRsNoSpill = ST.getMaxNumVGPRs(F);
- const unsigned MaxSGPRsIncOcc =
- ST.getMaxNumSGPRs(DAG.MinOccupancy + 1, false);
- const unsigned MaxVGPRsIncOcc =
- ST.getMaxNumVGPRs(DAG.MinOccupancy + 1, DynamicVGPRBlockSize);
- IncreaseOccupancy = WavesPerEU.second > DAG.MinOccupancy;
-
- // Collect optimizable regions. If there is spilling in any region we will
- // just try to reduce spilling. Otherwise we will try to increase occupancy by
- // one in the whole function.
- for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
- GCNRegPressure &RP = DAG.Pressure[I];
- // We allow ArchVGPR or AGPR savings to count as savings of the other kind
- // of VGPR only when trying to eliminate spilling. We cannot do this when
- // trying to increase occupancy since VGPR class swaps only occur later in
- // the register allocator i.e., the scheduler will not be able to reason
- // about these savings and will not report an increase in the achievable
- // occupancy, triggering rollbacks.
- GCNRPTarget Target(MaxSGPRsNoSpill, MaxVGPRsNoSpill, MF, RP,
- /*CombineVGPRSavings=*/true);
- if (!Target.satisfied() && IncreaseOccupancy) {
- // There is spilling in the region and we were so far trying to increase
- // occupancy. Strop trying that and focus on reducing spilling.
- IncreaseOccupancy = false;
- OptRegions.clear();
- } else if (IncreaseOccupancy) {
- // There is no spilling in the region, try to increase occupancy.
- Target = GCNRPTarget(MaxSGPRsIncOcc, MaxVGPRsIncOcc, MF, RP,
- /*CombineVGPRSavings=*/false);
+ unsigned MaxSGPRs = ST.getMaxNumSGPRs(F);
+ unsigned MaxVGPRs = ST.getMaxNumVGPRs(F);
+ auto ResetTargetRegions = [&]() {
+ OptRegions.clear();
+ for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
+ const GCNRegPressure &RP = DAG.Pressure[I];
+ GCNRPTarget Target(MaxSGPRs, MaxVGPRs, MF, RP);
+ if (!Target.satisfied())
+ OptRegions.insert({I, Target});
}
- if (!Target.satisfied())
- OptRegions.insert({I, Target});
- }
- if (OptRegions.empty())
- return false;
+ };
-#ifndef NDEBUG
- if (IncreaseOccupancy) {
- REMAT_DEBUG(dbgs() << "Occupancy minimal (" << DAG.MinOccupancy
- << ") in regions:\n");
+ ResetTargetRegions();
+ if (!OptRegions.empty() || DAG.MinOccupancy >= MFI.getMaxWavesPerEU()) {
+ // In addition to register usage being above addressable limits, occupancy
+ // below the minimum is considered like "spilling" as well.
+ TargetOcc = std::nullopt;
} else {
- REMAT_DEBUG(dbgs() << "Spilling w.r.t. minimum target occupancy ("
- << WavesPerEU.first << ") in regions:\n");
- }
- for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
- if (auto OptIt = OptRegions.find(I); OptIt != OptRegions.end())
- REMAT_DEBUG(dbgs() << " [" << I << "] " << OptIt->getSecond() << '\n');
+ // There is no spilling and room to improve occupancy; set up "increased
+ // occupancy targets" for all regions.
+ TargetOcc = DAG.MinOccupancy + 1;
+ unsigned VGPRBlockSize =
+ MF.getInfo<SIMachineFunctionInfo>()->getDynamicVGPRBlockSize();
+ MaxSGPRs = ST.getMaxNumSGPRs(*TargetOcc, false);
+ MaxVGPRs = ST.getMaxNumVGPRs(*TargetOcc, VGPRBlockSize);
+ ResetTargetRegions();
}
-#endif
-
- // When we are reducing spilling, the target is the minimum target number of
- // waves/EU determined by the subtarget. In cases where either one of
- // "amdgpu-num-sgpr" or "amdgpu-num-vgpr" are set on the function, the current
- // minimum region occupancy may be higher than the latter.
- TargetOcc = IncreaseOccupancy ? DAG.MinOccupancy + 1
- : std::max(DAG.MinOccupancy, WavesPerEU.first);
+ REMAT_DEBUG({
+ dbgs() << "Analyzing ";
+ MF.getFunction().printAsOperand(dbgs(), false);
+ dbgs() << ": ";
+ if (OptRegions.empty()) {
+ dbgs() << "no objective to achieve, occupancy is maximal at "
+ << MFI.getMaxWavesPerEU();
+ } else if (!TargetOcc) {
+ dbgs() << "reduce spilling (minimum target occupancy is "
+ << MFI.getMinWavesPerEU() << ')';
+ } else {
+ dbgs() << "increase occupancy from " << DAG.MinOccupancy << " to "
+ << TargetOcc;
+ }
+ dbgs() << '\n';
+ for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
+ if (auto OptIt = OptRegions.find(I); OptIt != OptRegions.end()) {
+ dbgs() << REMAT_PREFIX << " [" << I << "] " << OptIt->getSecond()
+ << '\n';
+ }
+ }
+ });
+ if (OptRegions.empty())
+ return false;
// Accounts for a reduction in RP in an optimizable region. Returns whether we
// estimate that we have identified enough rematerialization opportunities to
@@ -1767,7 +1757,7 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
auto ReduceRPInRegion = [&](auto OptIt, Register Reg, LaneBitmask Mask,
bool &Progress) -> bool {
GCNRPTarget &Target = OptIt->getSecond();
- if (!Target.isSaveBeneficial(Reg, DAG.MRI))
+ if (!Target.isSaveBeneficial(Reg))
return false;
Progress = true;
Target.saveReg(Reg, Mask, DAG.MRI);
@@ -1876,7 +1866,7 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
}
}
- if (IncreaseOccupancy) {
+ if (TargetOcc) {
// We were trying to increase occupancy but failed, abort the stage.
REMAT_DEBUG(dbgs() << "Cannot increase occupancy\n");
Rematerializations.clear();
@@ -1979,7 +1969,9 @@ void PreRARematStage::rematerialize() {
// All regions impacted by at least one rematerialization must be rescheduled.
// Maximum pressure must also be recomputed for all regions where it changed
// non-predictably and checked against the target occupancy.
- AchievedOcc = TargetOcc;
+ unsigned DynamicVGPRBlockSize =
+ MF.getInfo<SIMachineFunctionInfo>()->getDynamicVGPRBlockSize();
+ AchievedOcc = MFI.getMaxWavesPerEU();
for (auto &[I, OriginalRP] : ImpactedRegions) {
bool IsEmptyRegion = DAG.Regions[I].first == DAG.Regions[I].second;
RescheduleRegions[I] = !IsEmptyRegion;
@@ -2003,9 +1995,8 @@ void PreRARematStage::rematerialize() {
}
}
DAG.Pressure[I] = RP;
- AchievedOcc = std::min(
- AchievedOcc, RP.getOccupancy(ST, MF.getInfo<SIMachineFunctionInfo>()
- ->getDynamicVGPRBlockSize()));
+ AchievedOcc =
+ std::min(AchievedOcc, RP.getOccupancy(ST, DynamicVGPRBlockSize));
}
REMAT_DEBUG(dbgs() << "Achieved occupancy " << AchievedOcc << "\n");
}
@@ -2035,7 +2026,7 @@ void PreRARematStage::finalizeGCNSchedStage() {
// which case we do not want to rollback either (the rescheduling was already
// reverted in PreRARematStage::shouldRevertScheduling in such cases).
unsigned MaxOcc = std::max(AchievedOcc, DAG.MinOccupancy);
- if (!IncreaseOccupancy || MaxOcc >= TargetOcc)
+ if (!TargetOcc || MaxOcc >= *TargetOcc)
return;
REMAT_DEBUG(dbgs() << "Rolling back all rematerializations\n");
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 32139a9..790370f 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -470,15 +470,12 @@ private:
/// After successful stage initialization, indicates which regions should be
/// rescheduled.
BitVector RescheduleRegions;
- /// Target occupancy the stage estimates is reachable through
- /// rematerialization. Greater than or equal to the pre-stage min occupancy.
- unsigned TargetOcc;
+ /// The target occupancy the stage is trying to achieve. Empty when the
+ /// objective is spilling reduction.
+ std::optional<unsigned> TargetOcc;
/// Achieved occupancy *only* through rematerializations (pre-rescheduling).
/// Smaller than or equal to the target occupancy.
unsigned AchievedOcc;
- /// Whether the stage is attempting to increase occupancy in the abscence of
- /// spilling.
- bool IncreaseOccupancy;
/// Returns whether remat can reduce spilling or increase function occupancy
/// by 1 through rematerialization. If it can do one, collects instructions in
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 0c653b1..962c276 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -2081,7 +2081,9 @@ SIFoldOperandsImpl::isClamp(const MachineInstr &MI) const {
case AMDGPU::V_MAX_F16_fake16_e64:
case AMDGPU::V_MAX_F64_e64:
case AMDGPU::V_MAX_NUM_F64_e64:
- case AMDGPU::V_PK_MAX_F16: {
+ case AMDGPU::V_PK_MAX_F16:
+ case AMDGPU::V_MAX_BF16_PSEUDO_e64:
+ case AMDGPU::V_PK_MAX_NUM_BF16: {
if (MI.mayRaiseFPException())
return nullptr;
@@ -2108,8 +2110,10 @@ SIFoldOperandsImpl::isClamp(const MachineInstr &MI) const {
// Having a 0 op_sel_hi would require swizzling the output in the source
// instruction, which we can't do.
- unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
- : 0u;
+ unsigned UnsetMods =
+ (Op == AMDGPU::V_PK_MAX_F16 || Op == AMDGPU::V_PK_MAX_NUM_BF16)
+ ? SISrcMods::OP_SEL_1
+ : 0u;
if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
return nullptr;
return Src0;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8f44c03..e866bd4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3106,6 +3106,15 @@ SDValue SITargetLowering::LowerFormalArguments(
if (!IsKernel) {
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
+
+ // This assumes the registers are allocated by CCInfo in ascending order
+ // with no gaps.
+ Info->setNumWaveDispatchSGPRs(
+ CCInfo.getFirstUnallocated(AMDGPU::SGPR_32RegClass.getRegisters()));
+ Info->setNumWaveDispatchVGPRs(
+ CCInfo.getFirstUnallocated(AMDGPU::VGPR_32RegClass.getRegisters()));
+ } else if (Info->getNumKernargPreloadedSGPRs()) {
+ Info->setNumWaveDispatchSGPRs(Info->getNumUserSGPRs());
}
SmallVector<SDValue, 16> Chains;
@@ -6106,6 +6115,7 @@ bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
case MVT::f64:
return true;
case MVT::f16:
+ case MVT::bf16:
return Subtarget->has16BitInsts() && !denormalModeIsFlushAllF64F16(MF);
default:
break;
@@ -10877,6 +10887,13 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
}
}
+// Return whether the operation has NoUnsignedWrap property.
+static bool isNoUnsignedWrap(SDValue Addr) {
+ return (Addr.getOpcode() == ISD::ADD &&
+ Addr->getFlags().hasNoUnsignedWrap()) ||
+ Addr->getOpcode() == ISD::OR;
+}
+
bool SITargetLowering::shouldPreservePtrArith(const Function &F,
EVT PtrVT) const {
return UseSelectionDAGPTRADD && PtrVT == MVT::i64;
@@ -10898,8 +10915,14 @@ SITargetLowering::splitBufferOffsets(SDValue Offset, SelectionDAG &DAG) const {
if ((C1 = dyn_cast<ConstantSDNode>(N0)))
N0 = SDValue();
else if (DAG.isBaseWithConstantOffset(N0)) {
- C1 = cast<ConstantSDNode>(N0.getOperand(1));
- N0 = N0.getOperand(0);
+ // On GFX1250+, voffset and immoffset are zero-extended from 32 bits before
+ // being added, so we can only safely match a 32-bit addition with no
+ // unsigned overflow.
+ bool CheckNUW = AMDGPU::isGFX1250(*Subtarget);
+ if (!CheckNUW || isNoUnsignedWrap(N0)) {
+ C1 = cast<ConstantSDNode>(N0.getOperand(1));
+ N0 = N0.getOperand(0);
+ }
}
if (C1) {
@@ -16902,7 +16925,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
if (RC) {
if (NumRegs > 1) {
- if (Idx >= RC->getNumRegs() || Idx + NumRegs - 1 > RC->getNumRegs())
+ if (Idx >= RC->getNumRegs() || Idx + NumRegs - 1 >= RC->getNumRegs())
return std::pair(0U, nullptr);
uint32_t Width = NumRegs * 32;
diff --git a/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp b/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp
index d8fe850..0a68512 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp
@@ -51,7 +51,7 @@ static cl::opt<unsigned>
namespace {
enum HardClauseType {
- // For GFX10:
+ // For GFX10 and GFX1250:
// Texture, buffer, global or scratch memory instructions.
HARDCLAUSE_VMEM,
@@ -102,7 +102,8 @@ public:
HardClauseType getHardClauseType(const MachineInstr &MI) {
if (MI.mayLoad() || (MI.mayStore() && ST->shouldClusterStores())) {
- if (ST->getGeneration() == AMDGPUSubtarget::GFX10) {
+ if (ST->getGeneration() == AMDGPUSubtarget::GFX10 ||
+ ST->hasGFX1250Insts()) {
if ((SIInstrInfo::isVMEM(MI) && !SIInstrInfo::isFLAT(MI)) ||
SIInstrInfo::isSegmentSpecificFLAT(MI)) {
if (ST->hasNSAClauseBug()) {
@@ -115,7 +116,6 @@ public:
if (SIInstrInfo::isFLAT(MI))
return HARDCLAUSE_FLAT;
} else {
- assert(ST->getGeneration() >= AMDGPUSubtarget::GFX11);
if (SIInstrInfo::isMIMG(MI)) {
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index f20b22d..19e6bcf 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -18,6 +18,7 @@
#include "GCNSubtarget.h"
#include "SIMachineFunctionInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/LiveIntervals.h"
@@ -5534,6 +5535,15 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
}
}
+ // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more
+ // information.
+ if (AMDGPU::isPackedFP32Inst(Opcode) && AMDGPU::isGFX12Plus(ST)) {
+ for (unsigned I = 0; I < 3; ++I) {
+ if (!isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, I))
+ return false;
+ }
+ }
+
return true;
}
@@ -6005,6 +6015,21 @@ bool SIInstrInfo::isLegalRegOperand(const MachineInstr &MI, unsigned OpIdx,
const MCOperandInfo OpInfo = MI.getDesc().operands()[OpIdx];
unsigned Opc = MI.getOpcode();
+ // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more
+ // information.
+ if (AMDGPU::isPackedFP32Inst(MI.getOpcode()) && AMDGPU::isGFX12Plus(ST) &&
+ MO.isReg() && RI.isSGPRReg(MRI, MO.getReg())) {
+ constexpr const AMDGPU::OpName OpNames[] = {
+ AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2};
+
+ for (auto [I, OpName] : enumerate(OpNames)) {
+ int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[I]);
+ if (static_cast<unsigned>(SrcIdx) == OpIdx &&
+ !isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, I, &MO))
+ return false;
+ }
+ }
+
if (!isLegalRegOperand(MRI, OpInfo, MO))
return false;
@@ -6053,6 +6078,39 @@ bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
return true;
}
+bool SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand(
+ const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN,
+ const MachineOperand *MO) const {
+ constexpr const unsigned NumOps = 3;
+ constexpr const AMDGPU::OpName OpNames[NumOps * 2] = {
+ AMDGPU::OpName::src0, AMDGPU::OpName::src1,
+ AMDGPU::OpName::src2, AMDGPU::OpName::src0_modifiers,
+ AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers};
+
+ assert(SrcN < NumOps);
+
+ if (!MO) {
+ int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[SrcN]);
+ if (SrcIdx == -1)
+ return true;
+ MO = &MI.getOperand(SrcIdx);
+ }
+
+ if (!MO->isReg() || !RI.isSGPRReg(MRI, MO->getReg()))
+ return true;
+
+ int ModsIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[NumOps + SrcN]);
+ if (ModsIdx == -1)
+ return true;
+
+ unsigned Mods = MI.getOperand(ModsIdx).getImm();
+ bool OpSel = Mods & SISrcMods::OP_SEL_0;
+ bool OpSelHi = Mods & SISrcMods::OP_SEL_1;
+
+ return !OpSel && !OpSelHi;
+}
+
bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand *MO) const {
const MachineFunction &MF = *MI.getParent()->getParent();
@@ -6390,6 +6448,15 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
if ((Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
!RI.isVGPR(MRI, MI.getOperand(VOP3Idx[2]).getReg()))
legalizeOpWithMove(MI, VOP3Idx[2]);
+
+ // Fix the register class of packed FP32 instructions on gfx12+. See
+ // SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more information.
+ if (AMDGPU::isPackedFP32Inst(Opc) && AMDGPU::isGFX12Plus(ST)) {
+ for (unsigned I = 0; I < 3; ++I) {
+ if (!isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, /*SrcN=*/I))
+ legalizeOpWithMove(MI, VOP3Idx[I]);
+ }
+ }
}
Register SIInstrInfo::readlaneVGPRToSGPR(
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index e042b59..6b9403f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1287,6 +1287,19 @@ public:
const MachineOperand &MO) const;
bool isLegalRegOperand(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand &MO) const;
+
+ /// Check if \p MO would be a legal operand for gfx12+ packed math FP32
+ /// instructions. Packed math FP32 instructions typically accept SGPRs or
+ /// VGPRs as source operands. On gfx12+, if a source operand uses SGPRs, the
+ /// HW can only read the first SGPR and use it for both the low and high
+ /// operations.
+ /// \p SrcN can be 0, 1, or 2, representing src0, src1, and src2,
+ /// respectively. If \p MO is nullptr, the operand corresponding to SrcN will
+ /// be used.
+ bool isLegalGFX12PlusPackedMathFP32Operand(
+ const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN,
+ const MachineOperand *MO = nullptr) const;
+
/// Legalize operands in \p MI by either commuting it or inserting a
/// copy of src1.
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 9a1448f..8a11203 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -33,7 +33,7 @@ using namespace llvm;
// optimal RC for Opc and Dest of MFMA. In particular, there are high RP cases
// where it is better to produce the VGPR form (e.g. if there are VGPR users
// of the MFMA result).
-cl::opt<bool> MFMAVGPRForm(
+static cl::opt<bool> MFMAVGPRForm(
"amdgpu-mfma-vgpr-form", cl::Hidden,
cl::desc("Whether to force use VGPR for Opc and Dest of MFMA. If "
"unspecified, default to compiler heuristics"),
@@ -728,6 +728,8 @@ yaml::SIMachineFunctionInfo::SIMachineFunctionInfo(
MemoryBound(MFI.isMemoryBound()), WaveLimiter(MFI.needsWaveLimiter()),
HasSpilledSGPRs(MFI.hasSpilledSGPRs()),
HasSpilledVGPRs(MFI.hasSpilledVGPRs()),
+ NumWaveDispatchSGPRs(MFI.getNumWaveDispatchSGPRs()),
+ NumWaveDispatchVGPRs(MFI.getNumWaveDispatchVGPRs()),
HighBitsOf32BitAddress(MFI.get32BitAddressHighBits()),
Occupancy(MFI.getOccupancy()),
ScratchRSrcReg(regToString(MFI.getScratchRSrcReg(), TRI)),
@@ -784,6 +786,8 @@ bool SIMachineFunctionInfo::initializeBaseYamlFields(
WaveLimiter = YamlMFI.WaveLimiter;
HasSpilledSGPRs = YamlMFI.HasSpilledSGPRs;
HasSpilledVGPRs = YamlMFI.HasSpilledVGPRs;
+ NumWaveDispatchSGPRs = YamlMFI.NumWaveDispatchSGPRs;
+ NumWaveDispatchVGPRs = YamlMFI.NumWaveDispatchVGPRs;
BytesInStackArgArea = YamlMFI.BytesInStackArgArea;
ReturnsVoid = YamlMFI.ReturnsVoid;
IsWholeWaveFunction = YamlMFI.IsWholeWaveFunction;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 08b0206..ca8f803 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -270,6 +270,8 @@ struct SIMachineFunctionInfo final : public yaml::MachineFunctionInfo {
bool WaveLimiter = false;
bool HasSpilledSGPRs = false;
bool HasSpilledVGPRs = false;
+ uint16_t NumWaveDispatchSGPRs = 0;
+ uint16_t NumWaveDispatchVGPRs = 0;
uint32_t HighBitsOf32BitAddress = 0;
// TODO: 10 may be a better default since it's the maximum.
@@ -327,6 +329,8 @@ template <> struct MappingTraits<SIMachineFunctionInfo> {
YamlIO.mapOptional("waveLimiter", MFI.WaveLimiter, false);
YamlIO.mapOptional("hasSpilledSGPRs", MFI.HasSpilledSGPRs, false);
YamlIO.mapOptional("hasSpilledVGPRs", MFI.HasSpilledVGPRs, false);
+ YamlIO.mapOptional("numWaveDispatchSGPRs", MFI.NumWaveDispatchSGPRs, false);
+ YamlIO.mapOptional("numWaveDispatchVGPRs", MFI.NumWaveDispatchVGPRs, false);
YamlIO.mapOptional("scratchRSrcReg", MFI.ScratchRSrcReg,
StringValue("$private_rsrc_reg"));
YamlIO.mapOptional("frameOffsetReg", MFI.FrameOffsetReg,
@@ -465,6 +469,9 @@ private:
unsigned NumUserSGPRs = 0;
unsigned NumSystemSGPRs = 0;
+ unsigned NumWaveDispatchSGPRs = 0;
+ unsigned NumWaveDispatchVGPRs = 0;
+
bool HasSpilledSGPRs = false;
bool HasSpilledVGPRs = false;
bool HasNonSpillStackObjects = false;
@@ -991,6 +998,14 @@ public:
return UserSGPRInfo.getNumKernargPreloadSGPRs();
}
+ unsigned getNumWaveDispatchSGPRs() const { return NumWaveDispatchSGPRs; }
+
+ void setNumWaveDispatchSGPRs(unsigned Count) { NumWaveDispatchSGPRs = Count; }
+
+ unsigned getNumWaveDispatchVGPRs() const { return NumWaveDispatchVGPRs; }
+
+ void setNumWaveDispatchVGPRs(unsigned Count) { NumWaveDispatchVGPRs = Count; }
+
Register getPrivateSegmentWaveByteOffsetSystemSGPR() const {
return ArgInfo.PrivateSegmentWaveByteOffset.getRegister();
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 00dcb9b..1e3e9a2 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -3318,6 +3318,20 @@ unsigned getLdsDwGranularity(const MCSubtargetInfo &ST) {
return 128;
}
+bool isPackedFP32Inst(unsigned Opc) {
+ switch (Opc) {
+ case AMDGPU::V_PK_ADD_F32:
+ case AMDGPU::V_PK_ADD_F32_gfx12:
+ case AMDGPU::V_PK_MUL_F32:
+ case AMDGPU::V_PK_MUL_F32_gfx12:
+ case AMDGPU::V_PK_FMA_F32:
+ case AMDGPU::V_PK_FMA_F32_gfx12:
+ return true;
+ default:
+ return false;
+ }
+}
+
} // namespace AMDGPU
raw_ostream &operator<<(raw_ostream &OS,
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 1252e35..1bcd36c 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -1709,6 +1709,8 @@ bool isArgPassedInSGPR(const Argument *Arg);
bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo);
+LLVM_READONLY bool isPackedFP32Inst(unsigned Opc);
+
LLVM_READONLY
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
int64_t EncodedOffset);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index ea99cc4..75d3cfa 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -802,6 +802,12 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
setOperationAction(ISD::BSWAP, VT, Expand);
}
+ if (!Subtarget->isThumb1Only() && !Subtarget->hasV8_1MMainlineOps())
+ setOperationAction(ISD::SCMP, MVT::i32, Custom);
+
+ if (!Subtarget->hasV8_1MMainlineOps())
+ setOperationAction(ISD::UCMP, MVT::i32, Custom);
+
setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
@@ -1634,6 +1640,10 @@ bool ARMTargetLowering::useSoftFloat() const {
return Subtarget->useSoftFloat();
}
+bool ARMTargetLowering::shouldExpandCmpUsingSelects(EVT VT) const {
+ return !Subtarget->isThumb1Only() && VT.getSizeInBits() <= 32;
+}
+
// FIXME: It might make sense to define the representative register class as the
// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
@@ -10612,6 +10622,133 @@ SDValue ARMTargetLowering::LowerFP_TO_BF16(SDValue Op,
return DAG.getBitcast(MVT::i32, Res);
}
+SDValue ARMTargetLowering::LowerCMP(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+
+ // Determine if this is signed or unsigned comparison
+ bool IsSigned = (Op.getOpcode() == ISD::SCMP);
+
+ // Special case for Thumb1 UCMP only
+ if (!IsSigned && Subtarget->isThumb1Only()) {
+ // For Thumb unsigned comparison, use this sequence:
+ // subs r2, r0, r1 ; r2 = LHS - RHS, sets flags
+ // sbc r2, r2 ; r2 = r2 - r2 - !carry
+ // cmp r1, r0 ; compare RHS with LHS
+ // sbc r1, r1 ; r1 = r1 - r1 - !carry
+ // subs r0, r2, r1 ; r0 = r2 - r1 (final result)
+
+ // First subtraction: LHS - RHS
+ SDValue Sub1WithFlags = DAG.getNode(
+ ARMISD::SUBC, dl, DAG.getVTList(MVT::i32, FlagsVT), LHS, RHS);
+ SDValue Sub1Result = Sub1WithFlags.getValue(0);
+ SDValue Flags1 = Sub1WithFlags.getValue(1);
+
+ // SUBE: Sub1Result - Sub1Result - !carry
+ // This gives 0 if LHS >= RHS (unsigned), -1 if LHS < RHS (unsigned)
+ SDValue Sbc1 =
+ DAG.getNode(ARMISD::SUBE, dl, DAG.getVTList(MVT::i32, FlagsVT),
+ Sub1Result, Sub1Result, Flags1);
+ SDValue Sbc1Result = Sbc1.getValue(0);
+
+ // Second comparison: RHS vs LHS (reverse comparison)
+ SDValue CmpFlags = DAG.getNode(ARMISD::CMP, dl, FlagsVT, RHS, LHS);
+
+ // SUBE: RHS - RHS - !carry
+ // This gives 0 if RHS <= LHS (unsigned), -1 if RHS > LHS (unsigned)
+ SDValue Sbc2 = DAG.getNode(
+ ARMISD::SUBE, dl, DAG.getVTList(MVT::i32, FlagsVT), RHS, RHS, CmpFlags);
+ SDValue Sbc2Result = Sbc2.getValue(0);
+
+ // Final subtraction: Sbc1Result - Sbc2Result (no flags needed)
+ SDValue Result =
+ DAG.getNode(ISD::SUB, dl, MVT::i32, Sbc1Result, Sbc2Result);
+ if (Op.getValueType() != MVT::i32)
+ Result = DAG.getSExtOrTrunc(Result, dl, Op.getValueType());
+
+ return Result;
+ }
+
+ // For the ARM assembly pattern:
+ // subs r0, r0, r1 ; subtract RHS from LHS and set flags
+ // movgt r0, #1 ; if LHS > RHS, set result to 1 (GT for signed, HI for
+ // unsigned) mvnlt r0, #0 ; if LHS < RHS, set result to -1 (LT for
+ // signed, LO for unsigned)
+ // ; if LHS == RHS, result remains 0 from the subs
+
+ // Optimization: if RHS is a subtraction against 0, use ADDC instead of SUBC
+ unsigned Opcode = ARMISD::SUBC;
+
+ // Check if RHS is a subtraction against 0: (0 - X)
+ if (RHS.getOpcode() == ISD::SUB) {
+ SDValue SubLHS = RHS.getOperand(0);
+ SDValue SubRHS = RHS.getOperand(1);
+
+ // Check if it's 0 - X
+ if (isNullConstant(SubLHS)) {
+ bool CanUseAdd = false;
+ if (IsSigned) {
+ // For SCMP: only if X is known to never be INT_MIN (to avoid overflow)
+ if (RHS->getFlags().hasNoSignedWrap() || !DAG.computeKnownBits(SubRHS)
+ .getSignedMinValue()
+ .isMinSignedValue()) {
+ CanUseAdd = true;
+ }
+ } else {
+ // For UCMP: only if X is known to never be zero
+ if (DAG.isKnownNeverZero(SubRHS)) {
+ CanUseAdd = true;
+ }
+ }
+
+ if (CanUseAdd) {
+ Opcode = ARMISD::ADDC;
+ RHS = SubRHS; // Replace RHS with X, so we do LHS + X instead of
+ // LHS - (0 - X)
+ }
+ }
+ }
+
+ // Generate the operation with flags
+ SDValue OpWithFlags;
+ if (Opcode == ARMISD::ADDC) {
+ // Use ADDC: LHS + RHS (where RHS was 0 - X, now X)
+ OpWithFlags = DAG.getNode(ARMISD::ADDC, dl,
+ DAG.getVTList(MVT::i32, FlagsVT), LHS, RHS);
+ } else {
+ // Use ARMISD::SUBC to generate SUBS instruction (subtract with flags)
+ OpWithFlags = DAG.getNode(ARMISD::SUBC, dl,
+ DAG.getVTList(MVT::i32, FlagsVT), LHS, RHS);
+ }
+
+ SDValue OpResult = OpWithFlags.getValue(0); // The operation result
+ SDValue Flags = OpWithFlags.getValue(1); // The flags
+
+ // Constants for conditional moves
+ SDValue One = DAG.getConstant(1, dl, MVT::i32);
+ SDValue MinusOne = DAG.getAllOnesConstant(dl, MVT::i32);
+
+ // Select condition codes based on signed vs unsigned
+ ARMCC::CondCodes GTCond = IsSigned ? ARMCC::GT : ARMCC::HI;
+ ARMCC::CondCodes LTCond = IsSigned ? ARMCC::LT : ARMCC::LO;
+
+ // First conditional move: if greater than, set to 1
+ SDValue GTCondValue = DAG.getConstant(GTCond, dl, MVT::i32);
+ SDValue Result1 = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, OpResult, One,
+ GTCondValue, Flags);
+
+ // Second conditional move: if less than, set to -1
+ SDValue LTCondValue = DAG.getConstant(LTCond, dl, MVT::i32);
+ SDValue Result2 = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, Result1, MinusOne,
+ LTCondValue, Flags);
+
+ if (Op.getValueType() != MVT::i32)
+ Result2 = DAG.getSExtOrTrunc(Result2, dl, Op.getValueType());
+
+ return Result2;
+}
+
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
switch (Op.getOpcode()) {
@@ -10740,6 +10877,9 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::FP_TO_BF16:
return LowerFP_TO_BF16(Op, DAG);
case ARMISD::WIN__DBZCHK: return SDValue();
+ case ISD::UCMP:
+ case ISD::SCMP:
+ return LowerCMP(Op, DAG);
}
}
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 825145d..a84a3cb 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -607,6 +607,8 @@ class VectorType;
bool preferZeroCompareBranch() const override { return true; }
+ bool shouldExpandCmpUsingSelects(EVT VT) const override;
+
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
bool hasAndNotCompare(SDValue V) const override {
@@ -904,6 +906,7 @@ class VectorType;
void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
SDValue LowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCMP(SDValue Op, SelectionDAG &DAG) const;
Register getRegisterByName(const char* RegName, LLT VT,
const MachineFunction &MF) const override;
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 3955f2a..25ad9ec 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -669,7 +669,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
default: {
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
// us to fold the constant into the cmp instruction.
- RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
+ RHS = DAG.getSignedConstant(C->getSExtValue() + 1, DL, VT);
CC = ISD::SETGE;
break;
}
@@ -713,7 +713,10 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
// fold the constant into the cmp instruction.
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
- RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
+ // Doing a "icmp ugt i16 65535, %0" comparison should have been converted
+ // already to something else. Assert to make sure this assumption holds.
+ assert((!C->isAllOnes()) && "integer overflow in comparison transform");
+ RHS = DAG.getConstant(C->getZExtValue() + 1, DL, VT);
CC = ISD::SETUGE;
break;
}
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.h b/llvm/lib/Target/AVR/AVRISelLowering.h
index 2ae22b2..301ce9c 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.h
+++ b/llvm/lib/Target/AVR/AVRISelLowering.h
@@ -94,6 +94,8 @@ public:
return ShiftLegalizationStrategy::LowerToLibcall;
}
+ bool softPromoteHalfType() const override { return true; }
+
private:
SDValue getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AVRcc,
SelectionDAG &DAG, SDLoc dl) const;
diff --git a/llvm/lib/Target/AVR/AVRTargetMachine.cpp b/llvm/lib/Target/AVR/AVRTargetMachine.cpp
index b75417a..02212d2 100644
--- a/llvm/lib/Target/AVR/AVRTargetMachine.cpp
+++ b/llvm/lib/Target/AVR/AVRTargetMachine.cpp
@@ -20,6 +20,7 @@
#include "AVR.h"
#include "AVRMachineFunctionInfo.h"
#include "AVRTargetObjectFile.h"
+#include "AVRTargetTransformInfo.h"
#include "MCTargetDesc/AVRMCTargetDesc.h"
#include "TargetInfo/AVRTargetInfo.h"
@@ -28,7 +29,7 @@
namespace llvm {
static const char *AVRDataLayout =
- "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8";
+ "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-n16:8-a:8";
/// Processes a CPU name.
static StringRef getCPU(StringRef CPU) {
@@ -62,7 +63,9 @@ namespace {
class AVRPassConfig : public TargetPassConfig {
public:
AVRPassConfig(AVRTargetMachine &TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {
+ EnableLoopTermFold = true;
+ }
AVRTargetMachine &getAVRTargetMachine() const {
return getTM<AVRTargetMachine>();
@@ -107,6 +110,11 @@ const AVRSubtarget *AVRTargetMachine::getSubtargetImpl(const Function &) const {
return &SubTarget;
}
+TargetTransformInfo
+AVRTargetMachine::getTargetTransformInfo(const Function &F) const {
+ return TargetTransformInfo(std::make_unique<AVRTTIImpl>(this, F));
+}
+
MachineFunctionInfo *AVRTargetMachine::createMachineFunctionInfo(
BumpPtrAllocator &Allocator, const Function &F,
const TargetSubtargetInfo *STI) const {
diff --git a/llvm/lib/Target/AVR/AVRTargetMachine.h b/llvm/lib/Target/AVR/AVRTargetMachine.h
index 167d007..9452b3d 100644
--- a/llvm/lib/Target/AVR/AVRTargetMachine.h
+++ b/llvm/lib/Target/AVR/AVRTargetMachine.h
@@ -48,6 +48,8 @@ public:
createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F,
const TargetSubtargetInfo *STI) const override;
+ TargetTransformInfo getTargetTransformInfo(const Function &F) const override;
+
bool isNoopAddrSpaceCast(unsigned SrcAs, unsigned DestAs) const override {
// While AVR has different address spaces, they are all represented by
// 16-bit pointers that can be freely casted between (of course, a pointer
diff --git a/llvm/lib/Target/AVR/AVRTargetTransformInfo.cpp b/llvm/lib/Target/AVR/AVRTargetTransformInfo.cpp
new file mode 100644
index 0000000..4dd8660
--- /dev/null
+++ b/llvm/lib/Target/AVR/AVRTargetTransformInfo.cpp
@@ -0,0 +1,25 @@
+//===-- AVRTargetTransformInfo.cpp - AVR specific TTI ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRTargetTransformInfo.h"
+#include "llvm/CodeGen/CostTable.h"
+
+using namespace llvm;
+
+bool AVRTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
+ const TargetTransformInfo::LSRCost &C2) const {
+ // AVR specific here are "instruction number 1st priority".
+ // If we need to emit adds inside the loop to add up base registers, then
+ // we need at least one extra temporary register.
+ unsigned C1NumRegs = C1.NumRegs + (C1.NumBaseAdds != 0);
+ unsigned C2NumRegs = C2.NumRegs + (C2.NumBaseAdds != 0);
+ return std::tie(C1.Insns, C1NumRegs, C1.AddRecCost, C1.NumIVMuls,
+ C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
+ std::tie(C2.Insns, C2NumRegs, C2.AddRecCost, C2.NumIVMuls,
+ C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
+}
diff --git a/llvm/lib/Target/AVR/AVRTargetTransformInfo.h b/llvm/lib/Target/AVR/AVRTargetTransformInfo.h
new file mode 100644
index 0000000..0daeeb8
--- /dev/null
+++ b/llvm/lib/Target/AVR/AVRTargetTransformInfo.h
@@ -0,0 +1,51 @@
+//===- AVRTargetTransformInfo.h - AVR specific TTI --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines a TargetTransformInfoImplBase conforming object specific
+/// to the AVR target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AVR_AVRTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_AVR_AVRTARGETTRANSFORMINFO_H
+
+#include "AVRSubtarget.h"
+#include "AVRTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/IR/Function.h"
+#include <optional>
+
+namespace llvm {
+
+class AVRTTIImpl final : public BasicTTIImplBase<AVRTTIImpl> {
+ using BaseT = BasicTTIImplBase<AVRTTIImpl>;
+ using TTI = TargetTransformInfo;
+
+ friend BaseT;
+
+ const AVRSubtarget *ST;
+ const AVRTargetLowering *TLI;
+
+ const AVRSubtarget *getST() const { return ST; }
+ const AVRTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit AVRTTIImpl(const AVRTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
+ const TargetTransformInfo::LSRCost &C2) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_AVR_AVRTARGETTRANSFORMINFO_H
diff --git a/llvm/lib/Target/AVR/CMakeLists.txt b/llvm/lib/Target/AVR/CMakeLists.txt
index 781dac0..a31c545 100644
--- a/llvm/lib/Target/AVR/CMakeLists.txt
+++ b/llvm/lib/Target/AVR/CMakeLists.txt
@@ -29,11 +29,13 @@ add_llvm_target(AVRCodeGen
AVRSubtarget.cpp
AVRTargetMachine.cpp
AVRTargetObjectFile.cpp
+ AVRTargetTransformInfo.cpp
DEPENDS
intrinsics_gen
LINK_COMPONENTS
+ Analysis
AVRDesc
AVRInfo
AsmPrinter
@@ -44,6 +46,8 @@ add_llvm_target(AVRCodeGen
SelectionDAG
Support
Target
+ TargetParser
+ TransformUtils
ADD_TO_COMPONENT
AVR
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index 0ec15a6..c10a1f5 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -746,7 +746,7 @@ public:
IRBuilder<> &IRB = OpBuilder.getIRB();
return replaceFunction(F, [&](CallInst *CI) -> Error {
IRB.SetInsertPoint(CI);
- Value *Ptr = CI->getArgOperand(1);
+ Value *Ptr = CI->getArgOperand(0);
assert(Ptr->getType()->isPointerTy() &&
"Expected operand of lifetime intrinsic to be a pointer");
diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp
index 1bd5dd7..1eb03bf 100644
--- a/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp
+++ b/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp
@@ -13,11 +13,15 @@
#include "DXILWriterPass.h"
#include "DXILBitcodeWriter.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/ModuleSummaryAnalysis.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/InitializePasses.h"
@@ -54,49 +58,81 @@ public:
};
static void legalizeLifetimeIntrinsics(Module &M) {
- for (Function &F : M) {
- Intrinsic::ID IID = F.getIntrinsicID();
- if (IID != Intrinsic::lifetime_start && IID != Intrinsic::lifetime_end)
+ LLVMContext &Ctx = M.getContext();
+ Type *I64Ty = IntegerType::get(Ctx, 64);
+ Type *PtrTy = PointerType::get(Ctx, 0);
+ Intrinsic::ID LifetimeIIDs[2] = {Intrinsic::lifetime_start,
+ Intrinsic::lifetime_end};
+ for (Intrinsic::ID &IID : LifetimeIIDs) {
+ Function *F = M.getFunction(Intrinsic::getName(IID, {PtrTy}, &M));
+ if (!F)
continue;
- // Lifetime intrinsics in LLVM 3.7 do not have the memory FnAttr
- F.removeFnAttr(Attribute::Memory);
-
- // Lifetime intrinsics in LLVM 3.7 do not have mangled names
- F.setName(Intrinsic::getBaseName(IID));
-
- // LLVM 3.7 Lifetime intrinics require an i8* operand, so we insert bitcasts
- // to ensure that is the case
- for (auto *User : make_early_inc_range(F.users())) {
- CallInst *CI = dyn_cast<CallInst>(User);
- assert(CI && "Expected user of a lifetime intrinsic function to be a "
- "lifetime intrinsic call");
- Value *PtrOperand = CI->getArgOperand(1);
- PointerType *PtrTy = cast<PointerType>(PtrOperand->getType());
+ // Get or insert an LLVM 3.7-compliant lifetime intrinsic function of the
+ // form `void @llvm.lifetime.[start/end](i64, ptr)` with the NoUnwind
+ // attribute
+ AttributeList Attr;
+ Attr = Attr.addFnAttribute(Ctx, Attribute::NoUnwind);
+ FunctionCallee LifetimeCallee = M.getOrInsertFunction(
+ Intrinsic::getBaseName(IID), Attr, Type::getVoidTy(Ctx), I64Ty, PtrTy);
+
+ // Replace all calls to lifetime intrinsics with calls to the
+ // LLVM 3.7-compliant version of the lifetime intrinsic
+ for (User *U : make_early_inc_range(F->users())) {
+ CallInst *CI = dyn_cast<CallInst>(U);
+ assert(CI &&
+ "Expected user of a lifetime intrinsic function to be a CallInst");
+
+ // LLVM 3.7 lifetime intrinics require an i8* operand, so we insert
+ // a bitcast to ensure that is the case
+ Value *PtrOperand = CI->getArgOperand(0);
+ PointerType *PtrOpPtrTy = cast<PointerType>(PtrOperand->getType());
Value *NoOpBitCast = CastInst::Create(Instruction::BitCast, PtrOperand,
- PtrTy, "", CI->getIterator());
- CI->setArgOperand(1, NoOpBitCast);
+ PtrOpPtrTy, "", CI->getIterator());
+
+ // LLVM 3.7 lifetime intrinsics have an explicit size operand, whose value
+ // we can obtain from the pointer operand which must be an AllocaInst (as
+ // of https://github.com/llvm/llvm-project/pull/149310)
+ AllocaInst *AI = dyn_cast<AllocaInst>(PtrOperand);
+ assert(AI &&
+ "The pointer operand of a lifetime intrinsic call must be an "
+ "AllocaInst");
+ std::optional<TypeSize> AllocSize =
+ AI->getAllocationSize(CI->getDataLayout());
+ assert(AllocSize.has_value() &&
+ "Expected the allocation size of AllocaInst to be known");
+ CallInst *NewCI = CallInst::Create(
+ LifetimeCallee,
+ {ConstantInt::get(I64Ty, AllocSize.value().getFixedValue()),
+ NoOpBitCast},
+ "", CI->getIterator());
+ for (Attribute ParamAttr : CI->getParamAttributes(0))
+ NewCI->addParamAttr(1, ParamAttr);
+
+ CI->eraseFromParent();
}
+
+ F->eraseFromParent();
}
}
static void removeLifetimeIntrinsics(Module &M) {
- for (Function &F : make_early_inc_range(M)) {
- if (Intrinsic::ID IID = F.getIntrinsicID();
- IID != Intrinsic::lifetime_start && IID != Intrinsic::lifetime_end)
+ Intrinsic::ID LifetimeIIDs[2] = {Intrinsic::lifetime_start,
+ Intrinsic::lifetime_end};
+ for (Intrinsic::ID &IID : LifetimeIIDs) {
+ Function *F = M.getFunction(Intrinsic::getBaseName(IID));
+ if (!F)
continue;
- for (User *U : make_early_inc_range(F.users())) {
- LifetimeIntrinsic *LI = dyn_cast<LifetimeIntrinsic>(U);
- assert(LI && "Expected user of lifetime intrinsic function to be "
- "a LifetimeIntrinsic instruction");
- BitCastInst *BCI = dyn_cast<BitCastInst>(LI->getArgOperand(1));
- assert(BCI && "Expected pointer operand of LifetimeIntrinsic to be a "
- "BitCastInst");
- LI->eraseFromParent();
+ for (User *U : make_early_inc_range(F->users())) {
+ CallInst *CI = dyn_cast<CallInst>(U);
+ assert(CI && "Expected user of lifetime function to be a CallInst");
+ BitCastInst *BCI = dyn_cast<BitCastInst>(CI->getArgOperand(1));
+ assert(BCI && "Expected pointer operand of CallInst to be a BitCastInst");
+ CI->eraseFromParent();
BCI->eraseFromParent();
}
- F.eraseFromParent();
+ F->eraseFromParent();
}
}
diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
index ffd900c..5153d24 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
@@ -56,6 +56,8 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable(
case Intrinsic::dx_wave_reduce_sum:
case Intrinsic::dx_wave_reduce_umax:
case Intrinsic::dx_wave_reduce_usum:
+ case Intrinsic::dx_imad:
+ case Intrinsic::dx_umad:
return true;
default:
return false;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a5bf0e5..6583a0f 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -6729,8 +6729,7 @@ static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State,
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
unsigned ValNo, MVT ValVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, bool IsFixed, bool IsRet,
- Type *OrigTy) {
+ CCState &State, bool IsRet, Type *OrigTy) {
unsigned GRLen = DL.getLargestLegalIntTypeSizeInBits();
assert((GRLen == 32 || GRLen == 64) && "Unspport GRLen");
MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
@@ -6752,7 +6751,7 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
case LoongArchABI::ABI_LP64F:
case LoongArchABI::ABI_ILP32D:
case LoongArchABI::ABI_LP64D:
- UseGPRForFloat = !IsFixed;
+ UseGPRForFloat = ArgFlags.isVarArg();
break;
case LoongArchABI::ABI_ILP32S:
case LoongArchABI::ABI_LP64S:
@@ -6766,7 +6765,8 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
// will not be passed by registers if the original type is larger than
// 2*GRLen, so the register alignment rule does not apply.
unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
- if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoGRLenInBytes &&
+ if (ArgFlags.isVarArg() &&
+ ArgFlags.getNonZeroOrigAlign() == TwoGRLenInBytes &&
DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
// Skip 'odd' register if necessary.
@@ -6916,7 +6916,7 @@ void LoongArchTargetLowering::analyzeInputArgs(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, CCValAssign::Full, Ins[i].Flags,
- CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
+ CCInfo, IsRet, ArgTy)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " << ArgVT
<< '\n');
llvm_unreachable("");
@@ -6934,7 +6934,7 @@ void LoongArchTargetLowering::analyzeOutputArgs(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, CCValAssign::Full, Outs[i].Flags,
- CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
+ CCInfo, IsRet, OrigTy)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << ArgVT
<< "\n");
llvm_unreachable("");
@@ -7647,8 +7647,7 @@ bool LoongArchTargetLowering::CanLowerReturn(
LoongArchABI::ABI ABI =
MF.getSubtarget<LoongArchSubtarget>().getTargetABI();
if (CC_LoongArch(MF.getDataLayout(), ABI, i, Outs[i].VT, CCValAssign::Full,
- Outs[i].Flags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true,
- nullptr))
+ Outs[i].Flags, CCInfo, /*IsRet=*/true, nullptr))
return false;
}
return true;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 6b49a98f..f79ba74 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -330,7 +330,7 @@ private:
unsigned ValNo, MVT ValVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State,
- bool IsFixed, bool IsRet, Type *OrigTy);
+ bool IsRet, Type *OrigTy);
void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
index fda9d97..ca5d27d 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
@@ -254,7 +254,8 @@ bool LoongArchAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_LARCH_ALIGN);
F.setVarFixups({Fixup});
F.setLinkerRelaxable();
- F.getParent()->setLinkerRelaxable();
+ if (!F.getParent()->isLinkerRelaxable())
+ F.getParent()->setFirstLinkerRelaxable(F.getLayoutOrder());
return true;
}
diff --git a/llvm/lib/Target/Mips/MipsCCState.cpp b/llvm/lib/Target/Mips/MipsCCState.cpp
index 9e8cd2e..13237c5 100644
--- a/llvm/lib/Target/Mips/MipsCCState.cpp
+++ b/llvm/lib/Target/Mips/MipsCCState.cpp
@@ -128,12 +128,10 @@ void MipsCCState::PreAnalyzeReturnValue(EVT ArgVT) {
OriginalRetWasFloatVector.push_back(originalEVTTypeIsVectorFloat(ArgVT));
}
-void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, bool IsFixed,
- const char *Func) {
+void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, const char *Func) {
OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, Func));
OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
OriginalArgWasFloatVector.push_back(ArgTy->isVectorTy());
- CallOperandIsFixed.push_back(IsFixed);
}
/// Identify lowered values that originated from f128, float and sret to vXfXX
@@ -148,7 +146,6 @@ void MipsCCState::PreAnalyzeCallOperands(
OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.Ty, Func));
OriginalArgWasFloat.push_back(FuncArg.Ty->isFloatingPointTy());
OriginalArgWasFloatVector.push_back(FuncArg.Ty->isVectorTy());
- CallOperandIsFixed.push_back(Outs[i].IsFixed);
}
}
diff --git a/llvm/lib/Target/Mips/MipsCCState.h b/llvm/lib/Target/Mips/MipsCCState.h
index 4229da5..30b68e8 100644
--- a/llvm/lib/Target/Mips/MipsCCState.h
+++ b/llvm/lib/Target/Mips/MipsCCState.h
@@ -36,7 +36,7 @@ public:
static bool originalEVTTypeIsVectorFloat(EVT Ty);
static bool originalTypeIsVectorFloat(const Type *Ty);
- void PreAnalyzeCallOperand(const Type *ArgTy, bool IsFixed, const char *Func);
+ void PreAnalyzeCallOperand(const Type *ArgTy, const char *Func);
void PreAnalyzeFormalArgument(const Type *ArgTy, ISD::ArgFlagsTy Flags);
void PreAnalyzeReturnValue(EVT ArgVT);
@@ -86,10 +86,6 @@ private:
/// vector.
SmallVector<bool, 4> OriginalRetWasFloatVector;
- /// Records whether the value was a fixed argument.
- /// See ISD::OutputArg::IsFixed,
- SmallVector<bool, 4> CallOperandIsFixed;
-
// Used to handle MIPS16-specific calling convention tweaks.
// FIXME: This should probably be a fully fledged calling convention.
SpecialCallingConvType SpecialCallingConv;
@@ -106,7 +102,6 @@ public:
OriginalArgWasF128.clear();
OriginalArgWasFloat.clear();
OriginalArgWasFloatVector.clear();
- CallOperandIsFixed.clear();
PreAnalyzeCallOperands(Outs, FuncArgs, Func);
}
@@ -213,7 +208,6 @@ public:
bool WasOriginalRetVectorFloat(unsigned ValNo) const {
return OriginalRetWasFloatVector[ValNo];
}
- bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; }
SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; }
};
}
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index 555773a..fa49108 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -47,7 +47,7 @@ struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
if (IsReturn)
State.PreAnalyzeReturnValue(EVT::getEVT(Info.Ty));
else
- State.PreAnalyzeCallOperand(Info.Ty, Info.IsFixed, Func);
+ State.PreAnalyzeCallOperand(Info.Ty, Func);
return CallLowering::OutgoingValueAssigner::assignArg(
ValNo, OrigVT, ValVT, LocVT, LocInfo, Info, Flags, State);
diff --git a/llvm/lib/Target/Mips/MipsCallingConv.td b/llvm/lib/Target/Mips/MipsCallingConv.td
index 39e184a..0e5c16c 100644
--- a/llvm/lib/Target/Mips/MipsCallingConv.td
+++ b/llvm/lib/Target/Mips/MipsCallingConv.td
@@ -29,12 +29,6 @@ class CCIfOrigArgWasFloat<CCAction A>
class CCIfOrigArgWasF128<CCAction A>
: CCIf<"static_cast<MipsCCState *>(&State)->WasOriginalArgF128(ValNo)", A>;
-/// Match if this specific argument is a vararg.
-/// This is slightly different fro CCIfIsVarArg which matches if any argument is
-/// a vararg.
-class CCIfArgIsVarArg<CCAction A>
- : CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)", A>;
-
/// Match if the return was a floating point vector.
class CCIfOrigArgWasNotVectorFloat<CCAction A>
: CCIf<"!static_cast<MipsCCState *>(&State)"
@@ -344,7 +338,7 @@ def CC_Mips_VarArg : CallingConv<[
]>;
def CC_Mips : CallingConv<[
- CCIfVarArg<CCIfArgIsVarArg<CCDelegateTo<CC_Mips_VarArg>>>,
+ CCIfVarArg<CCIfArgVarArg<CCDelegateTo<CC_Mips_VarArg>>>,
CCDelegateTo<CC_Mips_FixedArg>
]>;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 881ba8e..7b875a8 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -4408,7 +4408,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'K': // unsigned 16 bit immediate
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
EVT Type = Op.getValueType();
- uint64_t Val = (uint64_t)C->getZExtValue();
+ uint64_t Val = C->getZExtValue();
if (isUInt<16>(Val)) {
Result = DAG.getTargetConstant(Val, DL, Type);
break;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 15f45a1..d4f0cc9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -900,6 +900,17 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
if (STI.allowFP16Math() || STI.hasBF16Math())
setTargetDAGCombine(ISD::SETCC);
+ // Vector reduction operations. These may be turned into shuffle or tree
+ // reductions depending on what instructions are available for each type.
+ for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
+ MVT EltVT = VT.getVectorElementType();
+ if (EltVT == MVT::f32 || EltVT == MVT::f64) {
+ setOperationAction({ISD::VECREDUCE_FMAX, ISD::VECREDUCE_FMIN,
+ ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM},
+ VT, Custom);
+ }
+ }
+
// Promote fp16 arithmetic if fp16 hardware isn't available or the
// user passed --nvptx-no-fp16-math. The flag is useful because,
// although sm_53+ GPUs have some sort of FP16 support in
@@ -1143,6 +1154,10 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
MAKE_CASE(NVPTXISD::BFI)
MAKE_CASE(NVPTXISD::PRMT)
MAKE_CASE(NVPTXISD::FCOPYSIGN)
+ MAKE_CASE(NVPTXISD::FMAXNUM3)
+ MAKE_CASE(NVPTXISD::FMINNUM3)
+ MAKE_CASE(NVPTXISD::FMAXIMUM3)
+ MAKE_CASE(NVPTXISD::FMINIMUM3)
MAKE_CASE(NVPTXISD::DYNAMIC_STACKALLOC)
MAKE_CASE(NVPTXISD::STACKRESTORE)
MAKE_CASE(NVPTXISD::STACKSAVE)
@@ -1929,6 +1944,124 @@ static SDValue getPRMT(SDValue A, SDValue B, uint64_t Selector, SDLoc DL,
return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode);
}
+/// Reduces the elements using the scalar operations provided. The operations
+/// are sorted descending in number of inputs they take. The flags on the
+/// original reduction operation will be propagated to each scalar operation.
+/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction
+/// used in ExpandReductions and SelectionDAG.
+static SDValue buildTreeReduction(
+ const SmallVector<SDValue> &Elements, EVT EltTy,
+ ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
+ const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) {
+ // Build the reduction tree at each level, starting with all the elements.
+ SmallVector<SDValue> Level = Elements;
+
+ unsigned OpIdx = 0;
+ while (Level.size() > 1) {
+ // Try to reduce this level using the current operator.
+ const auto [Op, NumInputs] = Ops[OpIdx];
+
+ // Build the next level by partially reducing all elements.
+ SmallVector<SDValue> ReducedLevel;
+ unsigned I = 0, E = Level.size();
+ for (; I + NumInputs <= E; I += NumInputs) {
+ // Reduce elements in groups of [NumInputs], as much as possible.
+ ReducedLevel.push_back(DAG.getNode(
+ Op, DL, EltTy, ArrayRef<SDValue>(Level).slice(I, NumInputs), Flags));
+ }
+
+ if (I < E) {
+ // Handle leftover elements.
+
+ if (ReducedLevel.empty()) {
+ // We didn't reduce anything at this level. We need to pick a smaller
+ // operator.
+ ++OpIdx;
+ assert(OpIdx < Ops.size() && "no smaller operators for reduction");
+ continue;
+ }
+
+ // We reduced some things but there's still more left, meaning the
+ // operator's number of inputs doesn't evenly divide this level size. Move
+ // these elements to the next level.
+ for (; I < E; ++I)
+ ReducedLevel.push_back(Level[I]);
+ }
+
+ // Process the next level.
+ Level = ReducedLevel;
+ }
+
+ return *Level.begin();
+}
+
+// Get scalar reduction opcode
+static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) {
+ switch (ReductionOpcode) {
+ case ISD::VECREDUCE_FMAX:
+ return ISD::FMAXNUM;
+ case ISD::VECREDUCE_FMIN:
+ return ISD::FMINNUM;
+ case ISD::VECREDUCE_FMAXIMUM:
+ return ISD::FMAXIMUM;
+ case ISD::VECREDUCE_FMINIMUM:
+ return ISD::FMINIMUM;
+ default:
+ llvm_unreachable("unhandled reduction opcode");
+ }
+}
+
+/// Get 3-input scalar reduction opcode
+static std::optional<NVPTXISD::NodeType>
+getScalar3OpcodeForReduction(unsigned ReductionOpcode) {
+ switch (ReductionOpcode) {
+ case ISD::VECREDUCE_FMAX:
+ return NVPTXISD::FMAXNUM3;
+ case ISD::VECREDUCE_FMIN:
+ return NVPTXISD::FMINNUM3;
+ case ISD::VECREDUCE_FMAXIMUM:
+ return NVPTXISD::FMAXIMUM3;
+ case ISD::VECREDUCE_FMINIMUM:
+ return NVPTXISD::FMINIMUM3;
+ default:
+ return std::nullopt;
+ }
+}
+
+/// Lower reductions to either a sequence of operations or a tree if
+/// reassociations are allowed. This method will use larger operations like
+/// max3/min3 when the target supports them.
+SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const SDNodeFlags Flags = Op->getFlags();
+ SDValue Vector = Op.getOperand(0);
+
+ const unsigned Opcode = Op->getOpcode();
+ const EVT EltTy = Vector.getValueType().getVectorElementType();
+
+ // Whether we can use 3-input min/max when expanding the reduction.
+ const bool CanUseMinMax3 =
+ EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
+ STI.getPTXVersion() >= 88 &&
+ (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
+ Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
+
+ // A list of SDNode opcodes with equivalent semantics, sorted descending by
+ // number of inputs they take.
+ SmallVector<std::pair<unsigned /*Op*/, unsigned /*NumIn*/>, 2> ScalarOps;
+
+ if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode);
+ CanUseMinMax3 && Opcode3Elem)
+ ScalarOps.push_back({*Opcode3Elem, 3});
+ ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2});
+
+ SmallVector<SDValue> Elements;
+ DAG.ExtractVectorElements(Vector, Elements);
+
+ return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG);
+}
+
SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
// Handle bitcasting from v2i8 without hitting the default promotion
// strategy which goes through stack memory.
@@ -2808,6 +2941,11 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::CONCAT_VECTORS:
return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::VECREDUCE_FMAX:
+ case ISD::VECREDUCE_FMIN:
+ case ISD::VECREDUCE_FMAXIMUM:
+ case ISD::VECREDUCE_FMINIMUM:
+ return LowerVECREDUCE(Op, DAG);
case ISD::STORE:
return LowerSTORE(Op, DAG);
case ISD::LOAD:
@@ -3908,6 +4046,18 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic(
return true;
}
+ case Intrinsic::nvvm_prefetch_tensormap: {
+ auto &DL = I.getDataLayout();
+ Info.opc = ISD::INTRINSIC_VOID;
+ Info.memVT = getPointerTy(DL);
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.flags =
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable;
+ Info.align.reset();
+ return true;
+ }
+
case Intrinsic::nvvm_ldu_global_i:
case Intrinsic::nvvm_ldu_global_f:
case Intrinsic::nvvm_ldu_global_p: {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index cf72a1e..43e721a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -64,6 +64,11 @@ enum NodeType : unsigned {
UNPACK_VECTOR,
FCOPYSIGN,
+ FMAXNUM3,
+ FMINNUM3,
+ FMAXIMUM3,
+ FMINIMUM3,
+
DYNAMIC_STACKALLOC,
STACKRESTORE,
STACKSAVE,
@@ -286,6 +291,7 @@ private:
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index aac611d..1ab41bf 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -347,6 +347,36 @@ multiclass FMINIMUMMAXIMUM<string OpcStr, bit NaN, SDNode OpNode> {
Requires<[hasBF16Math, hasSM<80>, hasPTX<70>]>;
}
+// Template for 3-input minimum/maximum instructions
+// (sm_100+/PTX 8.8 and f32 only)
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32 functions.
+multiclass FMINIMUMMAXIMUM3<string OpcStr, bit NaN, SDNode OpNode> {
+ defvar nan_str = !if(NaN, ".NaN", "");
+ def f32rrr :
+ BasicFlagsNVPTXInst<(outs B32:$dst),
+ (ins B32:$a, B32:$b, B32:$c),
+ (ins FTZFlag:$ftz),
+ OpcStr # "$ftz" # nan_str # ".f32",
+ [(set f32:$dst, (OpNode f32:$a, f32:$b, f32:$c))]>,
+ Requires<[hasPTX<88>, hasSM<100>]>;
+ def f32rri :
+ BasicFlagsNVPTXInst<(outs B32:$dst),
+ (ins B32:$a, B32:$b, f32imm:$c),
+ (ins FTZFlag:$ftz),
+ OpcStr # "$ftz" # nan_str # ".f32",
+ [(set f32:$dst, (OpNode f32:$a, f32:$b, fpimm:$c))]>,
+ Requires<[hasPTX<88>, hasSM<100>]>;
+ def f32rii :
+ BasicFlagsNVPTXInst<(outs B32:$dst),
+ (ins B32:$a, f32imm:$b, f32imm:$c),
+ (ins FTZFlag:$ftz),
+ OpcStr # "$ftz" # nan_str # ".f32",
+ [(set f32:$dst, (OpNode f32:$a, fpimm:$b, fpimm:$c))]>,
+ Requires<[hasPTX<88>, hasSM<100>]>;
+}
+
// Template for instructions which take three FP args. The
// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
//
@@ -900,6 +930,20 @@ defm MAX : FMINIMUMMAXIMUM<"max", /* NaN */ false, fmaxnum>;
defm MIN_NAN : FMINIMUMMAXIMUM<"min", /* NaN */ true, fminimum>;
defm MAX_NAN : FMINIMUMMAXIMUM<"max", /* NaN */ true, fmaximum>;
+def nvptx_fminnum3 : SDNode<"NVPTXISD::FMINNUM3", SDTFPTernaryOp,
+ [SDNPCommutative]>;
+def nvptx_fmaxnum3 : SDNode<"NVPTXISD::FMAXNUM3", SDTFPTernaryOp,
+ [SDNPCommutative]>;
+def nvptx_fminimum3 : SDNode<"NVPTXISD::FMINIMUM3", SDTFPTernaryOp,
+ [SDNPCommutative]>;
+def nvptx_fmaximum3 : SDNode<"NVPTXISD::FMAXIMUM3", SDTFPTernaryOp,
+ [SDNPCommutative]>;
+
+defm FMIN3 : FMINIMUMMAXIMUM3<"min", /* NaN */ false, nvptx_fminnum3>;
+defm FMAX3 : FMINIMUMMAXIMUM3<"max", /* NaN */ false, nvptx_fmaxnum3>;
+defm FMINNAN3 : FMINIMUMMAXIMUM3<"min", /* NaN */ true, nvptx_fminimum3>;
+defm FMAXNAN3 : FMINIMUMMAXIMUM3<"max", /* NaN */ true, nvptx_fmaximum3>;
+
defm FABS : F2<"abs", fabs>;
defm FNEG : F2<"neg", fneg>;
defm FABS_H: F2_Support_Half<"abs", fabs>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index d337192..d4a0ca7 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -39,6 +39,12 @@ def AS_match {
code global = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GLOBAL);
}];
+ code const = [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_CONST);
+ }];
+ code param = [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_PARAM);
+ }];
}
@@ -950,33 +956,47 @@ foreach dim = 3...5 in {
defm TMA_TENSOR_PF_TILE_GATHER4_2D : TMA_TENSOR_PREFETCH_INTR<5, "tile_gather4",
[hasTMACTAGroupSupport]>;
-//Prefetch and Prefetchu
-
-let Predicates = [hasPTX<80>, hasSM<90>] in {
- class PREFETCH_INTRS<string InstName> :
- BasicNVPTXInst<(outs), (ins ADDR:$addr),
- InstName,
- [(!cast<Intrinsic>(!strconcat("int_nvvm_",
- !subst(".", "_", InstName))) addr:$addr)]>;
+//Prefetchu and Prefetch
- def PREFETCH_L1 : PREFETCH_INTRS<"prefetch.L1">;
- def PREFETCH_L2 : PREFETCH_INTRS<"prefetch.L2">;
- def PREFETCH_GLOBAL_L1 : PREFETCH_INTRS<"prefetch.global.L1">;
- def PREFETCH_LOCAL_L1 : PREFETCH_INTRS<"prefetch.local.L1">;
- def PREFETCH_GLOBAL_L2 : PREFETCH_INTRS<"prefetch.global.L2">;
- def PREFETCH_LOCAL_L2 : PREFETCH_INTRS<"prefetch.local.L2">;
+defvar frag_pat = (int_nvvm_prefetch_tensormap node:$addr);
- def PREFETCH_GLOBAL_L2_EVICT_NORMAL : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "prefetch.global.L2::evict_normal",
- [(int_nvvm_prefetch_global_L2_evict_normal addr:$addr)]>;
+multiclass PREFETCH_TENSORMAP_PATFRAG<string suffix, code predicate> {
+ def !tolower(suffix) : PatFrag<!setdagop(frag_pat, ops), frag_pat, predicate>;
+}
- def PREFETCH_GLOBAL_L2_EVICT_LAST : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "prefetch.global.L2::evict_last",
- [(int_nvvm_prefetch_global_L2_evict_last addr:$addr)]>;
+defm prefetch_tensormap_ : PREFETCH_TENSORMAP_PATFRAG<"CONST", AS_match.const>;
+defm prefetch_tensormap_ : PREFETCH_TENSORMAP_PATFRAG<"GENERIC", AS_match.generic>;
+defm prefetch_tensormap_ : PREFETCH_TENSORMAP_PATFRAG<"PARAM", AS_match.param>;
- def PREFETCHU_L1 : PREFETCH_INTRS<"prefetchu.L1">;
+multiclass PREFETCH_TENSORMAP_INST<string addrspace_name, PatFrag pattern_frag> {
+ def "" : BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "prefetch" # addrspace_name # ".tensormap",
+ [(pattern_frag addr:$addr)]>,
+ Requires<[hasPTX<80>, hasSM<90>]>;
}
+defm PREFETCH_CONST_TENSORMAP : PREFETCH_TENSORMAP_INST<".const", prefetch_tensormap_const>;
+defm PREFETCH_GENERIC_TENSORMAP : PREFETCH_TENSORMAP_INST<"", prefetch_tensormap_generic>;
+defm PREFETCH_PARAM_TENSORMAP : PREFETCH_TENSORMAP_INST<".param", prefetch_tensormap_param>;
+
+class PREFETCH_INTRS<string InstName, Intrinsic Intr> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ InstName,
+ [(Intr addr:$addr)]>,
+ Requires<[hasPTX<80>, hasSM<90>]>;
+
+def PREFETCHU_L1 : PREFETCH_INTRS<"prefetchu.L1", int_nvvm_prefetchu_L1>;
+def PREFETCH_L1 : PREFETCH_INTRS<"prefetch.L1", int_nvvm_prefetch_L1>;
+def PREFETCH_L2 : PREFETCH_INTRS<"prefetch.L2", int_nvvm_prefetch_L2>;
+def PREFETCH_GLOBAL_L1 : PREFETCH_INTRS<"prefetch.global.L1", int_nvvm_prefetch_global_L1>;
+def PREFETCH_LOCAL_L1 : PREFETCH_INTRS<"prefetch.local.L1", int_nvvm_prefetch_local_L1>;
+def PREFETCH_GLOBAL_L2 : PREFETCH_INTRS<"prefetch.global.L2", int_nvvm_prefetch_global_L2>;
+def PREFETCH_LOCAL_L2 : PREFETCH_INTRS<"prefetch.local.L2", int_nvvm_prefetch_local_L2>;
+def PREFETCH_GLOBAL_L2_EVICT_NORMAL : PREFETCH_INTRS<"prefetch.global.L2::evict_normal",
+ int_nvvm_prefetch_global_L2_evict_normal>;
+def PREFETCH_GLOBAL_L2_EVICT_LAST : PREFETCH_INTRS<"prefetch.global.L2::evict_last",
+ int_nvvm_prefetch_global_L2_evict_last>;
+
//Applypriority intrinsics
class APPLYPRIORITY_L2_INTRS<string addrspace> :
BasicNVPTXInst<(outs), (ins ADDR:$addr, B64:$size),
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 3ae2d9d..f4f8961 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -564,7 +564,8 @@ bool NVPTXTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
case Intrinsic::nvvm_isspacep_global:
case Intrinsic::nvvm_isspacep_local:
case Intrinsic::nvvm_isspacep_shared:
- case Intrinsic::nvvm_isspacep_shared_cluster: {
+ case Intrinsic::nvvm_isspacep_shared_cluster:
+ case Intrinsic::nvvm_prefetch_tensormap: {
OpIndexes.push_back(0);
return true;
}
@@ -587,6 +588,11 @@ Value *NVPTXTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
return ConstantInt::get(II->getType(), *R);
return nullptr;
}
+ case Intrinsic::nvvm_prefetch_tensormap: {
+ IRBuilder<> Builder(II);
+ return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_prefetch_tensormap,
+ NewV);
+ }
}
return nullptr;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 9a6e261..b32d931b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -87,6 +87,13 @@ public:
}
unsigned getMinVectorRegisterBitWidth() const override { return 32; }
+ bool shouldExpandReduction(const IntrinsicInst *II) const override {
+ // Turn off ExpandReductions pass for NVPTX, which doesn't have advanced
+ // swizzling operations. Our backend/Selection DAG can expand these
+ // reductions with less movs.
+ return false;
+ }
+
// We don't want to prevent inlining because of target-cpu and -features
// attributes that were added to newer versions of LLVM/Clang: There are
// no incompatible functions in PTX, ptxas will throw errors in such cases.
diff --git a/llvm/lib/Target/PowerPC/PPCCCState.h b/llvm/lib/Target/PowerPC/PPCCCState.h
index b0e50b2..feab9c5 100644
--- a/llvm/lib/Target/PowerPC/PPCCCState.h
+++ b/llvm/lib/Target/PowerPC/PPCCCState.h
@@ -38,36 +38,6 @@ public:
void clearWasPPCF128() { OriginalArgWasPPCF128.clear(); }
};
-class AIXCCState : public CCState {
-private:
- BitVector IsFixed;
-
-public:
- AIXCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
- SmallVectorImpl<CCValAssign> &Locs, LLVMContext &C)
- : CCState(CC, IsVarArg, MF, Locs, C) {}
-
- void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
- CCAssignFn Fn) {
- // All formal arguments are fixed.
- IsFixed.resize(Ins.size(), true);
- CCState::AnalyzeFormalArguments(Ins, Fn);
- }
-
- void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
- CCAssignFn Fn) {
- // Record whether the call operand was a fixed argument.
- IsFixed.resize(Outs.size(), false);
- for (unsigned ValNo = 0, E = Outs.size(); ValNo != E; ++ValNo)
- if (Outs[ValNo].IsFixed)
- IsFixed.set(ValNo);
-
- CCState::AnalyzeCallOperands(Outs, Fn);
- }
-
- bool isFixed(unsigned ValNo) const { return IsFixed.test(ValNo); }
-};
-
} // end namespace llvm
#endif
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 196574e..2698bd6 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -6089,7 +6089,7 @@ SDValue PPCTargetLowering::LowerCall_32SVR4(
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result;
- if (Outs[i].IsFixed) {
+ if (!ArgFlags.isVarArg()) {
Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
CCInfo);
} else {
@@ -6905,8 +6905,7 @@ static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &S) {
- AIXCCState &State = static_cast<AIXCCState &>(S);
+ CCState &State) {
const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
State.getMachineFunction().getSubtarget());
const bool IsPPC64 = Subtarget.isPPC64();
@@ -7090,7 +7089,7 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
// They are passed in VRs if any are available (unlike arguments passed
// through ellipses) and shadow GPRs (unlike arguments to non-vaarg
// functions)
- if (State.isFixed(ValNo)) {
+ if (!ArgFlags.isVarArg()) {
if (MCRegister VReg = State.AllocateReg(VR)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
// Shadow allocate GPRs and stack space even though we pass in a VR.
@@ -7278,7 +7277,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
const EVT PtrVT = getPointerTy(MF.getDataLayout());
// Reserve space for the linkage area on the stack.
@@ -7625,8 +7624,8 @@ SDValue PPCTargetLowering::LowerCall_AIX(
MachineFunction &MF = DAG.getMachineFunction();
SmallVector<CCValAssign, 16> ArgLocs;
- AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
+ *DAG.getContext());
// Reserve space for the linkage save area (LSA) on the stack.
// In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 76dca47..f123040 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -1102,13 +1102,20 @@ void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
SpillsKnownBit = true;
break;
default:
+ // When spilling a CR bit, the super register may not be explicitly defined
+ // (i.e. it can be defined by a CR-logical that only defines the subreg) so
+ // we state that the CR field is undef. Also, in order to preserve the kill
+ // flag on the CR bit, we add it as an implicit use.
+
// On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all
// bits (specifically, it produces a -1 if the CR bit is set). Ultimately,
// the bit that is of importance to us is bit 32 (bit 0 of a 32-bit
// register), and SETNBC will set this.
if (Subtarget.isISA3_1()) {
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg)
- .addReg(SrcReg, RegState::Undef);
+ .addReg(SrcReg, RegState::Undef)
+ .addReg(SrcReg, RegState::Implicit |
+ getKillRegState(MI.getOperand(0).isKill()));
break;
}
@@ -1122,16 +1129,14 @@ void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg)
- .addReg(getCRFromCRBit(SrcReg), RegState::Undef);
+ .addReg(getCRFromCRBit(SrcReg), RegState::Undef)
+ .addReg(SrcReg, RegState::Implicit |
+ getKillRegState(MI.getOperand(0).isKill()));
break;
}
}
// We need to move the CR field that contains the CR bit we are spilling.
- // The super register may not be explicitly defined (i.e. it can be defined
- // by a CR-logical that only defines the subreg) so we state that the CR
- // field is undef. Also, in order to preserve the kill flag on the CR bit,
- // we add it as an implicit use.
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
.addReg(getCRFromCRBit(SrcReg), RegState::Undef)
.addReg(SrcReg,
diff --git a/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp b/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
index 95de9f3..4039fed 100644
--- a/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
@@ -22,3 +22,9 @@ bool PPCSelectionDAGInfo::isTargetStrictFPOpcode(unsigned Opcode) const {
return Opcode >= PPCISD::FIRST_STRICTFP_OPCODE &&
Opcode <= PPCISD::LAST_STRICTFP_OPCODE;
}
+
+std::pair<SDValue, SDValue> PPCSelectionDAGInfo::EmitTargetCodeForMemcmp(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue Op3, const CallInst *CI) const {
+ return DAG.getMemcmp(Chain, dl, Op1, Op2, Op3, CI);
+}
diff --git a/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h b/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
index 08e2ddb..1537851 100644
--- a/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
@@ -20,6 +20,11 @@ public:
bool isTargetMemoryOpcode(unsigned Opcode) const override;
bool isTargetStrictFPOpcode(unsigned Opcode) const override;
+
+ std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+ SDValue Op1, SDValue Op2, SDValue Op3,
+ const CallInst *CI) const override;
};
} // namespace llvm
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 67cc01e..e0ac591 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -674,6 +674,9 @@ static constexpr FeatureBitset XAndesGroup = {
static constexpr DecoderListEntry DecoderList32[]{
// Vendor Extensions
+ {DecoderTableXCV32, XCVFeatureGroup, "CORE-V extensions"},
+ {DecoderTableXRivos32, XRivosFeatureGroup, "Rivos"},
+ {DecoderTableXqci32, XqciFeatureGroup, "Qualcomm uC Extensions"},
{DecoderTableXVentana32,
{RISCV::FeatureVendorXVentanaCondOps},
"XVentanaCondOps"},
@@ -690,9 +693,6 @@ static constexpr DecoderListEntry DecoderList32[]{
"MIPS mips.pref"},
{DecoderTableXAndes32, XAndesGroup, "Andes extensions"},
// Standard Extensions
- {DecoderTableXCV32, XCVFeatureGroup, "CORE-V extensions"},
- {DecoderTableXqci32, XqciFeatureGroup, "Qualcomm uC Extensions"},
- {DecoderTableXRivos32, XRivosFeatureGroup, "Rivos"},
{DecoderTable32, {}, "standard 32-bit instructions"},
{DecoderTableRV32Only32, {}, "RV32-only standard 32-bit instructions"},
{DecoderTableZfinx32, {}, "Zfinx (Float in Integer)"},
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index d2b75a6..34026ed 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -45,8 +45,8 @@ public:
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
CCState &State) override {
- if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State, Info.IsFixed,
- IsRet, Info.Ty))
+ if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State, IsRet,
+ Info.Ty))
return true;
StackSize = State.getStackSize();
@@ -196,8 +196,8 @@ public:
if (LocVT.isScalableVector())
MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
- if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State,
- /*IsFixed=*/true, IsRet, Info.Ty))
+ if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State, IsRet,
+ Info.Ty))
return true;
StackSize = State.getStackSize();
@@ -454,7 +454,7 @@ bool RISCVCallLowering::canLowerReturn(MachineFunction &MF,
for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
MVT VT = MVT::getVT(Outs[I].Ty);
if (CC_RISCV(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo,
- /*IsFixed=*/true, /*isRet=*/true, nullptr))
+ /*isRet=*/true, nullptr))
return false;
}
return true;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index 95ec42f..8d956ce 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -32,6 +32,11 @@ static cl::opt<bool> ULEB128Reloc(
"riscv-uleb128-reloc", cl::init(true), cl::Hidden,
cl::desc("Emit R_RISCV_SET_ULEB128/E_RISCV_SUB_ULEB128 if appropriate"));
+static cl::opt<bool>
+ AlignRvc("riscv-align-rvc", cl::init(true), cl::Hidden,
+ cl::desc("When generating R_RISCV_ALIGN, insert $alignment-2 "
+ "bytes of NOPs even in norvc code"));
+
RISCVAsmBackend::RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI,
bool Is64Bit, const MCTargetOptions &Options)
: MCAsmBackend(llvm::endianness::little), STI(STI), OSABI(OSABI),
@@ -306,12 +311,21 @@ void RISCVAsmBackend::relaxInstruction(MCInst &Inst,
// If conditions are met, compute the padding size and create a fixup encoding
// the padding size in the addend.
bool RISCVAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
- // Use default handling unless linker relaxation is enabled and the alignment
- // is larger than the nop size.
- const MCSubtargetInfo *STI = F.getSubtargetInfo();
- if (!STI->hasFeature(RISCV::FeatureRelax))
+ // Alignments before the first linker-relaxable instruction have fixed sizes
+ // and do not require relocations. Alignments after a linker-relaxable
+ // instruction require a relocation, even if the STI specifies norelax.
+ //
+ // firstLinkerRelaxable is the layout order within the subsection, which may
+ // be smaller than the section's order. Therefore, alignments in a
+ // lower-numbered subsection may be unnecessarily treated as linker-relaxable.
+ auto *Sec = F.getParent();
+ if (F.getLayoutOrder() <= Sec->firstLinkerRelaxable())
return false;
- unsigned MinNopLen = STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
+
+ // Use default handling unless the alignment is larger than the nop size.
+ const MCSubtargetInfo *STI = F.getSubtargetInfo();
+ unsigned MinNopLen =
+ AlignRvc || STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
if (F.getAlignment() <= MinNopLen)
return false;
@@ -321,7 +335,6 @@ bool RISCVAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_RISCV_ALIGN);
F.setVarFixups({Fixup});
F.setLinkerRelaxable();
- F.getParent()->setLinkerRelaxable();
return true;
}
@@ -471,9 +484,12 @@ bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
Count -= 1;
}
+ // TODO: emit a mapping symbol right here
+
if (Count % 4 == 2) {
- // The canonical nop with Zca is c.nop.
- OS.write(STI->hasFeature(RISCV::FeatureStdExtZca) ? "\x01\0" : "\0\0", 2);
+ // The canonical nop with Zca is c.nop. For .balign 4, we generate a 2-byte
+ // c.nop even in a norvc region.
+ OS.write("\x01\0", 2);
Count -= 2;
}
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
index cb6117e..70127e3 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
@@ -324,7 +324,7 @@ static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State,
// Implements the RISC-V calling convention. Returns true upon failure.
bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
+ CCState &State, bool IsRet, Type *OrigTy) {
const MachineFunction &MF = State.getMachineFunction();
const DataLayout &DL = MF.getDataLayout();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
@@ -379,12 +379,12 @@ bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
break;
case RISCVABI::ABI_ILP32F:
case RISCVABI::ABI_LP64F:
- UseGPRForF16_F32 = !IsFixed;
+ UseGPRForF16_F32 = ArgFlags.isVarArg();
break;
case RISCVABI::ABI_ILP32D:
case RISCVABI::ABI_LP64D:
- UseGPRForF16_F32 = !IsFixed;
- UseGPRForF64 = !IsFixed;
+ UseGPRForF16_F32 = ArgFlags.isVarArg();
+ UseGPRForF64 = ArgFlags.isVarArg();
break;
}
@@ -465,7 +465,7 @@ bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
// currently if we are using ILP32E calling convention. This behavior may be
// changed when RV32E/ILP32E is ratified.
unsigned TwoXLenInBytes = (2 * XLen) / 8;
- if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
+ if (ArgFlags.isVarArg() && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
ABI != RISCVABI::ABI_ILP32E) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
@@ -620,8 +620,8 @@ bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
// benchmark. But theoretically, it may have benefit for some cases.
bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State,
- bool IsFixed, bool IsRet, Type *OrigTy) {
+ ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet,
+ Type *OrigTy) {
const MachineFunction &MF = State.getMachineFunction();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.h b/llvm/lib/Target/RISCV/RISCVCallingConv.h
index bf823b7..2030ce1 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.h
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.h
@@ -21,15 +21,15 @@ namespace llvm {
typedef bool RISCVCCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State,
- bool IsFixed, bool IsRet, Type *OrigTy);
+ bool IsRet, Type *OrigTy);
bool CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, bool IsFixed, bool IsRet, Type *OrigTy);
+ CCState &State, bool IsRet, Type *OrigTy);
bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, bool IsFixed, bool IsRet, Type *OrigTy);
+ CCState &State, bool IsRet, Type *OrigTy);
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 03e54b3..e63b937 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1844,6 +1844,17 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
/*IsStore*/ true,
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
+ case Intrinsic::riscv_sseg2_store_mask:
+ case Intrinsic::riscv_sseg3_store_mask:
+ case Intrinsic::riscv_sseg4_store_mask:
+ case Intrinsic::riscv_sseg5_store_mask:
+ case Intrinsic::riscv_sseg6_store_mask:
+ case Intrinsic::riscv_sseg7_store_mask:
+ case Intrinsic::riscv_sseg8_store_mask:
+ // Operands are (vec, ..., vec, ptr, offset, mask, vl)
+ return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
+ /*IsStore*/ true,
+ /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
case Intrinsic::riscv_vlm:
return SetRVVLoadStoreInfo(/*PtrOp*/ 0,
/*IsStore*/ false,
@@ -11084,69 +11095,118 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
}
-SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
- SelectionDAG &DAG) const {
- unsigned IntNo = Op.getConstantOperandVal(1);
+static SDValue
+lowerFixedVectorSegStoreIntrinsics(unsigned IntNo, SDValue Op,
+ const RISCVSubtarget &Subtarget,
+ SelectionDAG &DAG) {
+ bool IsStrided;
switch (IntNo) {
- default:
- break;
case Intrinsic::riscv_seg2_store_mask:
case Intrinsic::riscv_seg3_store_mask:
case Intrinsic::riscv_seg4_store_mask:
case Intrinsic::riscv_seg5_store_mask:
case Intrinsic::riscv_seg6_store_mask:
case Intrinsic::riscv_seg7_store_mask:
- case Intrinsic::riscv_seg8_store_mask: {
- SDLoc DL(Op);
- static const Intrinsic::ID VssegInts[] = {
- Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
- Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
- Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
- Intrinsic::riscv_vsseg8_mask};
+ case Intrinsic::riscv_seg8_store_mask:
+ IsStrided = false;
+ break;
+ case Intrinsic::riscv_sseg2_store_mask:
+ case Intrinsic::riscv_sseg3_store_mask:
+ case Intrinsic::riscv_sseg4_store_mask:
+ case Intrinsic::riscv_sseg5_store_mask:
+ case Intrinsic::riscv_sseg6_store_mask:
+ case Intrinsic::riscv_sseg7_store_mask:
+ case Intrinsic::riscv_sseg8_store_mask:
+ IsStrided = true;
+ break;
+ default:
+ llvm_unreachable("unexpected intrinsic ID");
+ }
- // Operands: (chain, int_id, vec*, ptr, mask, vl)
- unsigned NF = Op->getNumOperands() - 5;
- assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
- MVT XLenVT = Subtarget.getXLenVT();
- MVT VT = Op->getOperand(2).getSimpleValueType();
- MVT ContainerVT = getContainerForFixedLengthVector(VT);
- unsigned Sz = NF * ContainerVT.getVectorMinNumElements() *
- ContainerVT.getScalarSizeInBits();
- EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
+ SDLoc DL(Op);
+ static const Intrinsic::ID VssegInts[] = {
+ Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
+ Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
+ Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
+ Intrinsic::riscv_vsseg8_mask};
+ static const Intrinsic::ID VsssegInts[] = {
+ Intrinsic::riscv_vssseg2_mask, Intrinsic::riscv_vssseg3_mask,
+ Intrinsic::riscv_vssseg4_mask, Intrinsic::riscv_vssseg5_mask,
+ Intrinsic::riscv_vssseg6_mask, Intrinsic::riscv_vssseg7_mask,
+ Intrinsic::riscv_vssseg8_mask};
+
+ // Operands: (chain, int_id, vec*, ptr, mask, vl) or
+ // (chain, int_id, vec*, ptr, stride, mask, vl)
+ unsigned NF = Op->getNumOperands() - (IsStrided ? 6 : 5);
+ assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT VT = Op->getOperand(2).getSimpleValueType();
+ MVT ContainerVT = ::getContainerForFixedLengthVector(DAG, VT, Subtarget);
+ unsigned Sz = NF * ContainerVT.getVectorMinNumElements() *
+ ContainerVT.getScalarSizeInBits();
+ EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
- SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
- SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
- MVT MaskVT = Mask.getSimpleValueType();
- MVT MaskContainerVT =
- ::getContainerForFixedLengthVector(DAG, MaskVT, Subtarget);
- Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
+ SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
+ SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
+ MVT MaskVT = Mask.getSimpleValueType();
+ MVT MaskContainerVT =
+ ::getContainerForFixedLengthVector(DAG, MaskVT, Subtarget);
+ Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
- SDValue IntID = DAG.getTargetConstant(VssegInts[NF - 2], DL, XLenVT);
- SDValue Ptr = Op->getOperand(NF + 2);
+ SDValue IntID = DAG.getTargetConstant(
+ IsStrided ? VsssegInts[NF - 2] : VssegInts[NF - 2], DL, XLenVT);
+ SDValue Ptr = Op->getOperand(NF + 2);
- auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(Op);
+ auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(Op);
- SDValue StoredVal = DAG.getUNDEF(VecTupTy);
- for (unsigned i = 0; i < NF; i++)
- StoredVal = DAG.getNode(
- RISCVISD::TUPLE_INSERT, DL, VecTupTy, StoredVal,
- convertToScalableVector(
- ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget),
- DAG.getTargetConstant(i, DL, MVT::i32));
+ SDValue StoredVal = DAG.getUNDEF(VecTupTy);
+ for (unsigned i = 0; i < NF; i++)
+ StoredVal = DAG.getNode(
+ RISCVISD::TUPLE_INSERT, DL, VecTupTy, StoredVal,
+ convertToScalableVector(ContainerVT, FixedIntrinsic->getOperand(2 + i),
+ DAG, Subtarget),
+ DAG.getTargetConstant(i, DL, MVT::i32));
+
+ SmallVector<SDValue, 10> Ops = {
+ FixedIntrinsic->getChain(),
+ IntID,
+ StoredVal,
+ Ptr,
+ Mask,
+ VL,
+ DAG.getTargetConstant(Log2_64(VT.getScalarSizeInBits()), DL, XLenVT)};
+ // Insert the stride operand.
+ if (IsStrided)
+ Ops.insert(std::next(Ops.begin(), 4),
+ Op.getOperand(Op.getNumOperands() - 3));
+
+ return DAG.getMemIntrinsicNode(
+ ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops,
+ FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
+}
+
+SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::riscv_seg2_store_mask:
+ case Intrinsic::riscv_seg3_store_mask:
+ case Intrinsic::riscv_seg4_store_mask:
+ case Intrinsic::riscv_seg5_store_mask:
+ case Intrinsic::riscv_seg6_store_mask:
+ case Intrinsic::riscv_seg7_store_mask:
+ case Intrinsic::riscv_seg8_store_mask:
+ case Intrinsic::riscv_sseg2_store_mask:
+ case Intrinsic::riscv_sseg3_store_mask:
+ case Intrinsic::riscv_sseg4_store_mask:
+ case Intrinsic::riscv_sseg5_store_mask:
+ case Intrinsic::riscv_sseg6_store_mask:
+ case Intrinsic::riscv_sseg7_store_mask:
+ case Intrinsic::riscv_sseg8_store_mask:
+ return lowerFixedVectorSegStoreIntrinsics(IntNo, Op, Subtarget, DAG);
- SDValue Ops[] = {
- FixedIntrinsic->getChain(),
- IntID,
- StoredVal,
- Ptr,
- Mask,
- VL,
- DAG.getTargetConstant(Log2_64(VT.getScalarSizeInBits()), DL, XLenVT)};
-
- return DAG.getMemIntrinsicNode(
- ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops,
- FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
- }
case Intrinsic::riscv_sf_vc_xv_se:
return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_XV_SE);
case Intrinsic::riscv_sf_vc_iv_se:
@@ -22282,8 +22342,8 @@ void RISCVTargetLowering::analyzeInputArgs(
else if (In.isOrigArg())
ArgTy = FType->getParamType(In.getOrigArgIndex());
- if (Fn(Idx, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo,
- /*IsFixed=*/true, IsRet, ArgTy)) {
+ if (Fn(Idx, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, IsRet,
+ ArgTy)) {
LLVM_DEBUG(dbgs() << "InputArg #" << Idx << " has unhandled type "
<< ArgVT << '\n');
llvm_unreachable(nullptr);
@@ -22300,8 +22360,8 @@ void RISCVTargetLowering::analyzeOutputArgs(
ISD::ArgFlagsTy ArgFlags = Out.Flags;
Type *OrigTy = CLI ? CLI->getArgs()[Out.OrigArgIndex].Ty : nullptr;
- if (Fn(Idx, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, Out.IsFixed,
- IsRet, OrigTy)) {
+ if (Fn(Idx, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, IsRet,
+ OrigTy)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << Idx << " has unhandled type "
<< ArgVT << "\n");
llvm_unreachable(nullptr);
@@ -23083,7 +23143,7 @@ bool RISCVTargetLowering::CanLowerReturn(
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (CC_RISCV(i, VT, VT, CCValAssign::Full, ArgFlags, CCInfo,
- /*IsFixed=*/true, /*IsRet=*/true, nullptr))
+ /*IsRet=*/true, nullptr))
return false;
}
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 413ad8b..ee623d3a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -692,6 +692,11 @@ def : Pat<(binop_allwusers<or>
(shl GPR:$op1rs1, (XLenVT 24))),
(shl (zexti8 (XLenVT GPR:$op1rs2)), (XLenVT 16))),
(PACKW GPR:$rs1, (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
+
+def : Pat<(i64 (or (or (zexti16 (XLenVT GPR:$rs1)),
+ (shl (zexti8 (XLenVT GPR:$op1rs2)), (XLenVT 16))),
+ (sext_inreg (shl GPR:$op1rs1, (XLenVT 24)), i32))),
+ (PACKW GPR:$rs1, (XLenVT (PACKH GPR:$op1rs1, GPR:$op1rs2)))>;
} // Predicates = [HasStdExtZbkb, IsRV64]
let Predicates = [HasStdExtZbb, IsRV32] in
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td b/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td
index bf23812..24ebbc3 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td
@@ -13,78 +13,113 @@
//
//===----------------------------------------------------------------------===//
-class SMX60IsWorstCaseMX<string mx, list<string> MxList> {
- string LLMUL = LargestLMUL<MxList>.r;
- bit c = !eq(mx, LLMUL);
-}
+//===----------------------------------------------------------------------===//
+// Helpers
+
+// Maps LMUL string to corresponding value from the Values array
+// LMUL values map to array indices as follows:
+// MF8 -> Values[0], MF4 -> Values[1], MF2 -> Values[2], M1 -> Values[3],
+// M2 -> Values[4], M4 -> Values[5], M8 -> Values[6]
+// Shorter lists are allowed, e.g., widening instructions don't work on M8
+class GetLMULValue<list<int> Values, string LMUL> {
+ defvar Index = !cond(
+ !eq(LMUL, "MF8"): 0,
+ !eq(LMUL, "MF4"): 1,
+ !eq(LMUL, "MF2"): 2,
+ !eq(LMUL, "M1"): 3,
+ !eq(LMUL, "M2"): 4,
+ !eq(LMUL, "M4"): 5,
+ !eq(LMUL, "M8"): 6,
+ );
-class SMX60IsWorstCaseMXSEW<string mx, int sew, list<string> MxList, bit isF = 0> {
- string LLMUL = LargestLMUL<MxList>.r;
- int SSEW = SmallestSEW<mx, isF>.r;
- bit c = !and(!eq(mx, LLMUL), !eq(sew, SSEW));
+ assert !lt(Index, !size(Values)),
+ "Missing LMUL value for '" # LMUL # "'. " #
+ "Expected at least " # !add(Index, 1) # " elements, but got " #
+ !size(Values) # ".";
+
+ int c = Values[Index];
}
-defvar SMX60VLEN = 256;
-defvar SMX60DLEN = !div(SMX60VLEN, 2);
+// Returns BaseValue for LMUL values before startLMUL, Value for startLMUL,
+// then doubles Value for each subsequent LMUL
+// Example: ConstValueUntilLMULThenDoubleBase<"M1", 2, 4, "M8"> returns:
+// MF8->2, MF4->2, MF2->2, M1->4, M2->8, M4->16, M8->32
+// This is useful for modeling scheduling parameters that scale with LMUL.
+class ConstValueUntilLMULThenDoubleBase<string startLMUL, int BaseValue, int Value, string currentLMUL> {
+ assert !le(BaseValue, Value), "BaseValue must be less-equal to Value";
+ defvar startPos = GetLMULValue<[0, 1, 2, 3, 4, 5, 6], startLMUL>.c;
+ defvar currentPos = GetLMULValue<[0, 1, 2, 3, 4, 5, 6], currentLMUL>.c;
+
+ // Calculate the difference in positions
+ defvar posDiff = !sub(currentPos, startPos);
-class Get1248Latency<string mx> {
+ // Calculate Value * (2^posDiff)
int c = !cond(
- !eq(mx, "M2") : 2,
- !eq(mx, "M4") : 4,
- !eq(mx, "M8") : 8,
- true: 1
+ !eq(posDiff, 0) : Value,
+ !eq(posDiff, 1) : !mul(Value, 2),
+ !eq(posDiff, 2) : !mul(Value, 4),
+ !eq(posDiff, 3) : !mul(Value, 8),
+ !eq(posDiff, 4) : !mul(Value, 16),
+ !eq(posDiff, 5) : !mul(Value, 32),
+ !eq(posDiff, 6) : !mul(Value, 64),
+ true : BaseValue
);
}
-// Used for: logical opsz, shifts, sign ext, merge/move, FP sign/recip/convert, mask ops, slides
-class Get4816Latency<string mx> {
- int c = !cond(
- !eq(mx, "M4") : 8,
- !eq(mx, "M8") : 16,
- true: 4
- );
+// Same as the previous function but BaseValue == Value
+class ConstValueUntilLMULThenDouble<string startLMUL, int Value, string currentLMUL> {
+ int c = ConstValueUntilLMULThenDoubleBase<startLMUL, Value, Value, currentLMUL>.c;
+}
+
+// Returns MF8->1, MF4->1, MF2->2, M1->4, M2->8, M4->16, M8->32
+class ConstOneUntilMF4ThenDouble<string mx> {
+ int c = ConstValueUntilLMULThenDouble<"MF4", 1, mx>.c;
}
+// Returns MF8->1, MF4->1, MF2->1, M1->2, M2->4, M4->8, M8->16
+class ConstOneUntilMF2ThenDouble<string mx> {
+ int c = ConstValueUntilLMULThenDouble<"MF2", 1, mx>.c;
+}
+
+// Returns MF8->1, MF4->1, MF2->1, M1->1, M2->2, M4->4, M8->8
+class ConstOneUntilM1ThenDouble<string mx> {
+ int c = ConstValueUntilLMULThenDouble<"M1", 1, mx>.c;
+}
+
+//===----------------------------------------------------------------------===//
+// Latency helper classes
+
// Used for: arithmetic (add/sub/min/max), saturating/averaging, FP add/sub/min/max
-class Get458Latency<string mx> {
- int c = !cond(
- !eq(mx, "M4") : 5,
- !eq(mx, "M8") : 8,
- true: 4
- );
+class Get4458Latency<string mx> {
+ int c = GetLMULValue<[/*MF8=*/4, /*MF4=*/4, /*MF2=*/4, /*M1=*/4, /*M2=*/4, /*M4=*/5, /*M8=*/8], mx>.c;
}
-// Widening scaling pattern (4,4,4,4,5,8,8): plateaus at higher LMULs
-// Used for: widening operations
+// Used for: widening operations (no M8)
class Get4588Latency<string mx> {
- int c = !cond(
- !eq(mx, "M2") : 5,
- !eq(mx, "M4") : 8,
- !eq(mx, "M8") : 8, // M8 not supported for most widening, fallback
- true: 4
- );
+ int c = GetLMULValue<[/*MF8=*/4, /*MF4=*/4, /*MF2=*/4, /*M1=*/4, /*M2=*/5, /*M4=*/8], mx>.c;
}
// Used for: mask-producing comparisons, carry ops with mask, FP comparisons
class Get461018Latency<string mx> {
- int c = !cond(
- !eq(mx, "M2") : 6,
- !eq(mx, "M4") : 10,
- !eq(mx, "M8") : 18,
- true: 4
- );
+ int c = GetLMULValue<[/*MF8=*/4, /*MF4=*/4, /*MF2=*/4, /*M1=*/4, /*M2=*/6, /*M4=*/10, /*M8=*/18], mx>.c;
}
-// Used for: e64 multiply pattern, complex ops
-class Get781632Latency<string mx> {
- int c = !cond(
- !eq(mx, "M2") : 8,
- !eq(mx, "M4") : 16,
- !eq(mx, "M8") : 32,
- true: 7
- );
+//===----------------------------------------------------------------------===//
+
+class SMX60IsWorstCaseMX<string mx, list<string> MxList> {
+ string LLMUL = LargestLMUL<MxList>.r;
+ bit c = !eq(mx, LLMUL);
}
+class SMX60IsWorstCaseMXSEW<string mx, int sew, list<string> MxList, bit isF = 0> {
+ string LLMUL = LargestLMUL<MxList>.r;
+ int SSEW = SmallestSEW<mx, isF>.r;
+ bit c = !and(!eq(mx, LLMUL), !eq(sew, SSEW));
+}
+
+defvar SMX60VLEN = 256;
+defvar SMX60DLEN = !div(SMX60VLEN, 2);
+
def SpacemitX60Model : SchedMachineModel {
let IssueWidth = 2; // dual-issue
let MicroOpBufferSize = 0; // in-order
@@ -383,12 +418,13 @@ foreach LMul = [1, 2, 4, 8] in {
foreach mx = SchedMxList in {
defvar IsWorstCase = SMX60IsWorstCaseMX<mx, SchedMxList>.c;
- let Latency = Get458Latency<mx>.c, ReleaseAtCycles = [4] in {
+ let Latency = Get4458Latency<mx>.c, ReleaseAtCycles = [4] in {
defm "" : LMULWriteResMX<"WriteVIMinMaxV", [SMX60_VIEU], mx, IsWorstCase>;
defm "" : LMULWriteResMX<"WriteVIMinMaxX", [SMX60_VIEU], mx, IsWorstCase>;
}
- let Latency = Get4816Latency<mx>.c, ReleaseAtCycles = [4] in {
+ defvar VIALULat = ConstValueUntilLMULThenDouble<"M2", 4, mx>.c;
+ let Latency = VIALULat, ReleaseAtCycles = [4] in {
// Pattern of vadd, vsub, vrsub: 4/4/5/8
// Pattern of vand, vor, vxor: 4/4/8/16
// They are grouped together, so we used the worst case 4/4/8/16
@@ -425,7 +461,7 @@ foreach mx = SchedMxList in {
// Pattern of vmacc, vmadd, vmul, vmulh, etc.: e8/e16 = 4/4/5/8, e32 = 5,5,5,8,
// e64 = 7,8,16,32. We use the worst-case until we can split the SEW.
// TODO: change WriteVIMulV, etc to be defined with LMULSEWSchedWrites
- let Latency = Get781632Latency<mx>.c, ReleaseAtCycles = [7] in {
+ let Latency = ConstValueUntilLMULThenDoubleBase<"M2", 7, 8, mx>.c, ReleaseAtCycles = [7] in {
defm "" : LMULWriteResMX<"WriteVIMulV", [SMX60_VIEU], mx, IsWorstCase>;
defm "" : LMULWriteResMX<"WriteVIMulX", [SMX60_VIEU], mx, IsWorstCase>;
defm "" : LMULWriteResMX<"WriteVIMulAddV", [SMX60_VIEU], mx, IsWorstCase>;
@@ -461,15 +497,8 @@ foreach mx = SchedMxList in {
foreach sew = SchedSEWSet<mx>.val in {
defvar IsWorstCase = SMX60IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
- // Slightly reduced for fractional LMULs
- defvar Multiplier = !cond(
- !eq(mx, "MF8") : 12,
- !eq(mx, "MF4") : 12,
- !eq(mx, "MF2") : 12,
- true: 24
- );
-
- let Latency = !mul(Get1248Latency<mx>.c, Multiplier), ReleaseAtCycles = [12] in {
+ defvar VIDivLat = ConstValueUntilLMULThenDouble<"MF2", 12, mx>.c;
+ let Latency = VIDivLat, ReleaseAtCycles = [12] in {
defm "" : LMULSEWWriteResMXSEW<"WriteVIDivV", [SMX60_VIEU], mx, sew, IsWorstCase>;
defm "" : LMULSEWWriteResMXSEW<"WriteVIDivX", [SMX60_VIEU], mx, sew, IsWorstCase>;
}
@@ -480,14 +509,8 @@ foreach mx = SchedMxList in {
foreach mx = SchedMxListW in {
defvar IsWorstCase = SMX60IsWorstCaseMX<mx, SchedMxListW>.c;
- // Slightly increased for integer LMULs
- defvar Multiplier = !cond(
- !eq(mx, "M2") : 2,
- !eq(mx, "M4") : 2,
- true: 1
- );
-
- let Latency = !mul(Get4816Latency<mx>.c, Multiplier), ReleaseAtCycles = [4] in {
+ defvar VNarrowingLat = ConstValueUntilLMULThenDouble<"M1", 4, mx>.c;
+ let Latency = VNarrowingLat, ReleaseAtCycles = [4] in {
defm "" : LMULWriteResMX<"WriteVNShiftV", [SMX60_VIEU], mx, IsWorstCase>;
defm "" : LMULWriteResMX<"WriteVNShiftX", [SMX60_VIEU], mx, IsWorstCase>;
defm "" : LMULWriteResMX<"WriteVNShiftI", [SMX60_VIEU], mx, IsWorstCase>;
@@ -501,16 +524,33 @@ foreach mx = SchedMxListW in {
foreach mx = SchedMxList in {
defvar IsWorstCase = SMX60IsWorstCaseMX<mx, SchedMxList>.c;
- defm "" : LMULWriteResMX<"WriteVSALUV", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSALUX", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSALUI", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVAALUV", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVAALUX", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSMulV", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSMulX", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSShiftV", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSShiftX", [SMX60_VIEU], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSShiftI", [SMX60_VIEU], mx, IsWorstCase>;
+ let Latency = Get4458Latency<mx>.c, ReleaseAtCycles = [ConstOneUntilM1ThenDouble<mx>.c] in {
+ defm "" : LMULWriteResMX<"WriteVSALUV", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSALUX", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSALUI", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVAALUV", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVAALUX", [SMX60_VIEU], mx, IsWorstCase>;
+ }
+
+ // Latency of vsmul: e8/e16 = 4/4/5/8, e32 = 5/5/5/8, e64 = 7/8/16/32
+ // We use the worst-case until we can split the SEW.
+ defvar VSMulLat = ConstValueUntilLMULThenDoubleBase<"M2", 7, 8, mx>.c;
+ // Latency of vsmul: e8/e16/e32 = 1/2/4/8, e64 = 4/8/16/32
+ // We use the worst-case until we can split the SEW.
+ defvar VSMulOcc = ConstValueUntilLMULThenDoubleBase<"M1", 1, 4, mx>.c;
+ // TODO: change WriteVSMulV/X to be defined with LMULSEWSchedWrites
+ let Latency = VSMulLat, ReleaseAtCycles = [VSMulOcc] in {
+ defm "" : LMULWriteResMX<"WriteVSMulV", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSMulX", [SMX60_VIEU], mx, IsWorstCase>;
+ }
+
+ defvar VSShiftLat = ConstValueUntilLMULThenDouble<"M2", 4, mx>.c;
+ defvar VSShiftOcc = ConstOneUntilMF2ThenDouble<mx>.c;
+ let Latency = VSShiftLat, ReleaseAtCycles = [VSShiftOcc] in {
+ defm "" : LMULWriteResMX<"WriteVSShiftV", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSShiftX", [SMX60_VIEU], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSShiftI", [SMX60_VIEU], mx, IsWorstCase>;
+ }
}
// 13. Vector Floating-Point Instructions
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 05d504c..6a1f4b3 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -114,6 +114,9 @@ public:
bool enableScalableVectorization() const override {
return ST->hasVInstructions();
}
+ bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override {
+ return ST->hasVInstructions();
+ }
TailFoldingStyle
getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const override {
return ST->hasVInstructions() ? TailFoldingStyle::DataWithEVL
diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
index 74aec4f..2b34f61 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
@@ -359,18 +359,15 @@ static void lowerExpectAssume(IntrinsicInst *II) {
}
}
-static bool toSpvOverloadedIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID,
- ArrayRef<unsigned> OpNos) {
- Function *F = nullptr;
- if (OpNos.empty()) {
- F = Intrinsic::getOrInsertDeclaration(II->getModule(), NewID);
- } else {
- SmallVector<Type *, 4> Tys;
- for (unsigned OpNo : OpNos)
- Tys.push_back(II->getOperand(OpNo)->getType());
- F = Intrinsic::getOrInsertDeclaration(II->getModule(), NewID, Tys);
- }
- II->setCalledFunction(F);
+static bool toSpvLifetimeIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID) {
+ IRBuilder<> Builder(II);
+ auto *Alloca = cast<AllocaInst>(II->getArgOperand(0));
+ std::optional<TypeSize> Size =
+ Alloca->getAllocationSize(Alloca->getDataLayout());
+ Value *SizeVal = Builder.getInt64(Size ? *Size : -1);
+ Builder.CreateIntrinsic(NewID, Alloca->getType(),
+ {SizeVal, II->getArgOperand(0)});
+ II->eraseFromParent();
return true;
}
@@ -406,8 +403,8 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
break;
case Intrinsic::lifetime_start:
if (!STI.isShader()) {
- Changed |= toSpvOverloadedIntrinsic(
- II, Intrinsic::SPVIntrinsics::spv_lifetime_start, {1});
+ Changed |= toSpvLifetimeIntrinsic(
+ II, Intrinsic::SPVIntrinsics::spv_lifetime_start);
} else {
II->eraseFromParent();
Changed = true;
@@ -415,8 +412,8 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
break;
case Intrinsic::lifetime_end:
if (!STI.isShader()) {
- Changed |= toSpvOverloadedIntrinsic(
- II, Intrinsic::SPVIntrinsics::spv_lifetime_end, {1});
+ Changed |= toSpvLifetimeIntrinsic(
+ II, Intrinsic::SPVIntrinsics::spv_lifetime_end);
} else {
II->eraseFromParent();
Changed = true;
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 1aa8efe..c0fc3a6 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1179,7 +1179,7 @@ static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
continue;
// The fixed arguments to a varargs function still go in FP registers.
- if (Outs[VA.getValNo()].IsFixed)
+ if (!Outs[VA.getValNo()].Flags.isVarArg())
continue;
// This floating point argument should be reassigned.
diff --git a/llvm/lib/Target/SystemZ/SystemZCallingConv.h b/llvm/lib/Target/SystemZ/SystemZCallingConv.h
index 25f4aac..fbb98ff 100644
--- a/llvm/lib/Target/SystemZ/SystemZCallingConv.h
+++ b/llvm/lib/Target/SystemZ/SystemZCallingConv.h
@@ -31,10 +31,6 @@ namespace SystemZ {
class SystemZCCState : public CCState {
private:
- /// Records whether the value was a fixed argument.
- /// See ISD::OutputArg::IsFixed.
- SmallVector<bool, 4> ArgIsFixed;
-
/// Records whether the value was widened from a short vector type.
SmallVector<bool, 4> ArgIsShortVector;
@@ -50,10 +46,6 @@ public:
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
- // Formal arguments are always fixed.
- ArgIsFixed.clear();
- for (unsigned i = 0; i < Ins.size(); ++i)
- ArgIsFixed.push_back(true);
// Record whether the call operand was a short vector.
ArgIsShortVector.clear();
for (unsigned i = 0; i < Ins.size(); ++i)
@@ -64,10 +56,6 @@ public:
void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
- // Record whether the call operand was a fixed argument.
- ArgIsFixed.clear();
- for (unsigned i = 0; i < Outs.size(); ++i)
- ArgIsFixed.push_back(Outs[i].IsFixed);
// Record whether the call operand was a short vector.
ArgIsShortVector.clear();
for (unsigned i = 0; i < Outs.size(); ++i)
@@ -77,12 +65,11 @@ public:
}
// This version of AnalyzeCallOperands in the base class is not usable
- // since we must provide a means of accessing ISD::OutputArg::IsFixed.
+ // since we must provide a means of accessing ISD::OutputArg::IsShortVector.
void AnalyzeCallOperands(const SmallVectorImpl<MVT> &Outs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn) = delete;
- bool IsFixed(unsigned ValNo) { return ArgIsFixed[ValNo]; }
bool IsShortVector(unsigned ValNo) { return ArgIsShortVector[ValNo]; }
};
diff --git a/llvm/lib/Target/SystemZ/SystemZCallingConv.td b/llvm/lib/Target/SystemZ/SystemZCallingConv.td
index 0ad872b..059f31f 100644
--- a/llvm/lib/Target/SystemZ/SystemZCallingConv.td
+++ b/llvm/lib/Target/SystemZ/SystemZCallingConv.td
@@ -16,14 +16,6 @@ class CCIfSubtarget<string F, CCAction A>
"getSubtarget<SystemZSubtarget>().", F),
A>;
-// Match if this specific argument is a fixed (i.e. named) argument.
-class CCIfFixed<CCAction A>
- : CCIf<"static_cast<SystemZCCState *>(&State)->IsFixed(ValNo)", A>;
-
-// Match if this specific argument is not a fixed (i.e. vararg) argument.
-class CCIfNotFixed<CCAction A>
- : CCIf<"!(static_cast<SystemZCCState *>(&State)->IsFixed(ValNo))", A>;
-
// Match if this specific argument was widened from a short vector type.
class CCIfShortVector<CCAction A>
: CCIf<"static_cast<SystemZCCState *>(&State)->IsShortVector(ValNo)", A>;
@@ -79,7 +71,7 @@ def CC_SystemZ_GHC : CallingConv<[
// Pass in STG registers: XMM1, ..., XMM6
CCIfSubtarget<"hasVector()",
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfFixed<CCAssignToReg<[V16, V17, V18, V19, V20, V21]>>>>,
+ CCIfArgFixed<CCAssignToReg<[V16, V17, V18, V19, V20, V21]>>>>,
// Fail otherwise
CCCustom<"CC_SystemZ_GHC_Error">
@@ -125,8 +117,8 @@ def CC_SystemZ_ELF : CallingConv<[
// during type legalization.
CCIfSubtarget<"hasVector()",
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfFixed<CCAssignToReg<[V24, V26, V28, V30,
- V25, V27, V29, V31]>>>>,
+ CCIfArgFixed<CCAssignToReg<[V24, V26, V28, V30,
+ V25, V27, V29, V31]>>>>,
// However, sub-128 vectors which need to go on the stack occupy just a
// single 8-byte-aligned 8-byte stack slot. Pass as i64.
@@ -227,17 +219,17 @@ def CC_SystemZ_XPLINK64 : CallingConv<[
// Promote f32 to f64 and bitcast to i64, if it needs to be passed in GPRs.
// Although we assign the f32 vararg to be bitcast, it will first be promoted
// to an f64 within convertValVTToLocVT().
- CCIfType<[f32, f64], CCIfNotFixed<CCBitConvertToType<i64>>>,
+ CCIfType<[f32, f64], CCIfArgVarArg<CCBitConvertToType<i64>>>,
// Pointers are always passed in full 64-bit registers.
CCIfPtr<CCCustom<"CC_XPLINK64_Pointer">>,
// long double, can only be passed in GPR2 and GPR3, if available,
// hence R2Q
- CCIfType<[f128], CCIfNotFixed<CCCustom<"CC_XPLINK64_Allocate128BitVararg">>>,
+ CCIfType<[f128], CCIfArgVarArg<CCCustom<"CC_XPLINK64_Allocate128BitVararg">>>,
// Non fixed vector arguments are treated in the same way as long
// doubles.
CCIfSubtarget<"hasVector()",
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfNotFixed<CCCustom<"CC_XPLINK64_Allocate128BitVararg">>>>,
+ CCIfArgVarArg<CCCustom<"CC_XPLINK64_Allocate128BitVararg">>>>,
// A SwiftSelf is passed in callee-saved R10.
CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R10D]>>>,
@@ -260,22 +252,24 @@ def CC_SystemZ_XPLINK64 : CallingConv<[
// during type legalization.
CCIfSubtarget<"hasVector()",
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfFixed<CCCustom<"CC_XPLINK64_Shadow_Reg">>>>,
+ CCIfArgFixed<CCCustom<"CC_XPLINK64_Shadow_Reg">>>>,
CCIfSubtarget<"hasVector()",
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfFixed<CCAssignToRegAndStack<[V24, V25, V26, V27,
- V28, V29, V30, V31], 16, 8>>>>,
+ CCIfArgFixed<CCAssignToRegAndStack<[V24, V25, V26, V27,
+ V28, V29, V30, V31], 16, 8>>>>,
// The first 4 named float and double arguments are passed in registers
// FPR0-FPR6. The rest will be passed in the user area.
- CCIfType<[f32, f64], CCIfFixed<CCCustom<"CC_XPLINK64_Shadow_Reg">>>,
- CCIfType<[f32], CCIfFixed<CCAssignToRegAndStack<[F0S, F2S, F4S, F6S], 4, 8>>>,
- CCIfType<[f64], CCIfFixed<CCAssignToRegAndStack<[F0D, F2D, F4D, F6D], 8, 8>>>,
+ CCIfType<[f32, f64], CCIfArgFixed<CCCustom<"CC_XPLINK64_Shadow_Reg">>>,
+ CCIfType<[f32],
+ CCIfArgFixed<CCAssignToRegAndStack<[F0S, F2S, F4S, F6S], 4, 8>>>,
+ CCIfType<[f64],
+ CCIfArgFixed<CCAssignToRegAndStack<[F0D, F2D, F4D, F6D], 8, 8>>>,
// The first 2 long double arguments are passed in register FPR0/FPR2
// and FPR4/FPR6. The rest will be passed in the user area.
- CCIfType<[f128], CCIfFixed<CCCustom<"CC_XPLINK64_Shadow_Reg">>>,
- CCIfType<[f128], CCIfFixed<CCAssignToRegAndStack<[F0Q, F4Q], 16, 8>>>,
+ CCIfType<[f128], CCIfArgFixed<CCCustom<"CC_XPLINK64_Shadow_Reg">>>,
+ CCIfType<[f128], CCIfArgFixed<CCAssignToRegAndStack<[F0Q, F4Q], 16, 8>>>,
// Other arguments are passed in 8-byte-aligned 8-byte stack slots.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
diff --git a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index d76babe..afe838a 100644
--- a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -181,8 +181,7 @@ static SDValue addIPMSequence(const SDLoc &DL, SDValue CCReg,
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForMemcmp(
SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1,
- SDValue Src2, SDValue Size, MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const {
+ SDValue Src2, SDValue Size, const CallInst *CI) const {
SDValue CCReg;
// Swap operands to invert CC == 1 vs. CC == 2 cases.
if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
diff --git a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
index c928f34..5a1e0cd 100644
--- a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -41,8 +41,7 @@ public:
std::pair<SDValue, SDValue>
EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src1, SDValue Src2, SDValue Size,
- MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const override;
+ const CallInst *CI) const override;
std::pair<SDValue, SDValue>
EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index f32c9bd..2611c29 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -436,20 +436,6 @@ bool SystemZTTIImpl::isLSRCostLess(
C2.ScaleCost, C2.SetupCost);
}
-bool SystemZTTIImpl::areInlineCompatible(const Function *Caller,
- const Function *Callee) const {
- const TargetMachine &TM = getTLI()->getTargetMachine();
-
- const FeatureBitset &CallerBits =
- TM.getSubtargetImpl(*Caller)->getFeatureBits();
- const FeatureBitset &CalleeBits =
- TM.getSubtargetImpl(*Callee)->getFeatureBits();
-
- // Support only equal feature bitsets. Restriction should be relaxed in the
- // future to allow inlining when callee's bits are subset of the caller's.
- return CallerBits == CalleeBits;
-}
-
unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
bool Vector = (ClassID == 1);
if (!Vector)
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
index dc5736e..fc681de 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
@@ -65,9 +65,6 @@ public:
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
const TargetTransformInfo::LSRCost &C2) const override;
- bool areInlineCompatible(const Function *Caller,
- const Function *Callee) const override;
-
/// @}
/// \name Vector TTI Implementations
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
index b03b350..fc852d0 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
@@ -136,6 +136,15 @@ static APInt encodeFunctionSignature(SelectionDAG *DAG, SDLoc &DL,
if (VT == MVT::f64) {
return wasm::ValType::F64;
}
+ if (VT == MVT::externref) {
+ return wasm::ValType::EXTERNREF;
+ }
+ if (VT == MVT::funcref) {
+ return wasm::ValType::FUNCREF;
+ }
+ if (VT == MVT::exnref) {
+ return wasm::ValType::EXNREF;
+ }
LLVM_DEBUG(errs() << "Unhandled type for llvm.wasm.ref.test.func: " << VT
<< "\n");
llvm_unreachable("Unhandled type for llvm.wasm.ref.test.func");
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 3f80b2a..f9eba4b 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1309,7 +1309,7 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
OutVal = FINode;
}
// Count the number of fixed args *after* legalization.
- NumFixedArgs += Out.IsFixed;
+ NumFixedArgs += !Out.Flags.isVarArg();
}
bool IsVarArg = CLI.IsVarArg;
@@ -1503,7 +1503,7 @@ SDValue WebAssemblyTargetLowering::LowerReturn(
for (const ISD::OutputArg &Out : Outs) {
assert(!Out.Flags.isByVal() && "byval is not valid for return values");
assert(!Out.Flags.isNest() && "nest is not valid for return values");
- assert(Out.IsFixed && "non-fixed return value is not valid");
+ assert(!Out.Flags.isVarArg() && "non-fixed return value is not valid");
if (Out.Flags.isInAlloca())
fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
if (Out.Flags.isInConsecutiveRegs())
diff --git a/llvm/lib/Target/X86/GISel/X86CallLowering.cpp b/llvm/lib/Target/X86/GISel/X86CallLowering.cpp
index c0a6035..d9f4405 100644
--- a/llvm/lib/Target/X86/GISel/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/GISel/X86CallLowering.cpp
@@ -75,7 +75,7 @@ public:
static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
X86::XMM3, X86::XMM4, X86::XMM5,
X86::XMM6, X86::XMM7};
- if (!Info.IsFixed)
+ if (Flags.isVarArg())
NumXMMRegs = State.getFirstUnallocated(XMMArgRegs);
return Res;
@@ -363,7 +363,8 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
Info.CallConv, Info.IsVarArg))
return false;
- bool IsFixed = Info.OrigArgs.empty() ? true : Info.OrigArgs.back().IsFixed;
+ bool IsFixed =
+ Info.OrigArgs.empty() ? true : !Info.OrigArgs.back().Flags[0].isVarArg();
if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(Info.CallConv)) {
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 3320508..b775c43 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -1821,7 +1821,7 @@ static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
// only used outside the region.
if (Valid && Lifetimes.size() != 0) {
auto *NewLifetime = Lifetimes[0]->clone();
- NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
+ NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(0), AI);
NewLifetime->insertBefore(DomBB->getTerminator()->getIterator());
// All the outsided lifetime.start markers are no longer necessary.
diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
index da60f52..6ed3b62 100644
--- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
+++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
@@ -226,13 +226,6 @@ public:
/*IsVarArgs=*/false);
}
- static ConstantInt *sizeOfAlloca(LLVMContext &Ctx, const DataLayout &DL,
- AllocaInst *Alloced) {
- std::optional<TypeSize> AllocaTypeSize = Alloced->getAllocationSize(DL);
- uint64_t AsInt = AllocaTypeSize ? AllocaTypeSize->getFixedValue() : 0;
- return ConstantInt::get(Type::getInt64Ty(Ctx), AsInt);
- }
-
bool expansionApplicableToFunction(Module &M, Function *F) {
if (F->isIntrinsic() || !F->isVarArg() ||
F->hasFnAttribute(Attribute::Naked))
@@ -577,8 +570,7 @@ ExpandVariadics::defineVariadicWrapper(Module &M, IRBuilder<> &Builder,
AllocaInst *VaListInstance =
Builder.CreateAlloca(VaListTy, nullptr, "va_start");
- Builder.CreateLifetimeStart(VaListInstance,
- sizeOfAlloca(Ctx, DL, VaListInstance));
+ Builder.CreateLifetimeStart(VaListInstance);
Builder.CreateIntrinsic(Intrinsic::vastart, {DL.getAllocaPtrType(Ctx)},
{VaListInstance});
@@ -595,8 +587,7 @@ ExpandVariadics::defineVariadicWrapper(Module &M, IRBuilder<> &Builder,
Builder.CreateIntrinsic(Intrinsic::vaend, {DL.getAllocaPtrType(Ctx)},
{VaListInstance});
- Builder.CreateLifetimeEnd(VaListInstance,
- sizeOfAlloca(Ctx, DL, VaListInstance));
+ Builder.CreateLifetimeEnd(VaListInstance);
if (Result->getType()->isVoidTy())
Builder.CreateRetVoid();
@@ -746,7 +737,7 @@ bool ExpandVariadics::expandCall(Module &M, IRBuilder<> &Builder, CallBase *CB,
// Initialize the fields in the struct
Builder.SetInsertPoint(CB);
- Builder.CreateLifetimeStart(Alloced, sizeOfAlloca(Ctx, DL, Alloced));
+ Builder.CreateLifetimeStart(Alloced);
Frame.initializeStructAlloca(DL, Builder, Alloced);
const unsigned NumArgs = FuncType->getNumParams();
@@ -762,7 +753,7 @@ bool ExpandVariadics::expandCall(Module &M, IRBuilder<> &Builder, CallBase *CB,
Builder.SetCurrentDebugLocation(CB->getStableDebugLoc());
VaList = Builder.CreateAlloca(VaListTy, nullptr, "va_argument");
Builder.SetInsertPoint(CB);
- Builder.CreateLifetimeStart(VaList, sizeOfAlloca(Ctx, DL, VaList));
+ Builder.CreateLifetimeStart(VaList);
}
Builder.SetInsertPoint(CB);
Args.push_back(ABI->initializeVaList(M, Ctx, Builder, VaList, Alloced));
@@ -802,9 +793,9 @@ bool ExpandVariadics::expandCall(Module &M, IRBuilder<> &Builder, CallBase *CB,
}
if (VaList)
- Builder.CreateLifetimeEnd(VaList, sizeOfAlloca(Ctx, DL, VaList));
+ Builder.CreateLifetimeEnd(VaList);
- Builder.CreateLifetimeEnd(Alloced, sizeOfAlloca(Ctx, DL, Alloced));
+ Builder.CreateLifetimeEnd(Alloced);
NewCB->setAttributes(PAL);
NewCB->takeName(CB);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 47e017e..d7a2ef7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1532,6 +1532,51 @@ static Instruction *foldBitOrderCrossLogicOp(Value *V,
return nullptr;
}
+/// Helper to match idempotent binary intrinsics, namely, intrinsics where
+/// `f(f(x, y), y) == f(x, y)` holds.
+static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID) {
+ switch (IID) {
+ case Intrinsic::smax:
+ case Intrinsic::smin:
+ case Intrinsic::umax:
+ case Intrinsic::umin:
+ case Intrinsic::maximum:
+ case Intrinsic::minimum:
+ case Intrinsic::maximumnum:
+ case Intrinsic::minimumnum:
+ case Intrinsic::maxnum:
+ case Intrinsic::minnum:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Attempt to simplify value-accumulating recurrences of kind:
+/// %umax.acc = phi i8 [ %umax, %backedge ], [ %a, %entry ]
+/// %umax = call i8 @llvm.umax.i8(i8 %umax.acc, i8 %b)
+/// And let the idempotent binary intrinsic be hoisted, when the operands are
+/// known to be loop-invariant.
+static Value *foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC,
+ IntrinsicInst *II) {
+ PHINode *PN;
+ Value *Init, *OtherOp;
+
+ // A binary intrinsic recurrence with loop-invariant operands is equivalent to
+ // `call @llvm.binary.intrinsic(Init, OtherOp)`.
+ auto IID = II->getIntrinsicID();
+ if (!isIdempotentBinaryIntrinsic(IID) ||
+ !matchSimpleBinaryIntrinsicRecurrence(II, PN, Init, OtherOp) ||
+ !IC.getDominatorTree().dominates(OtherOp, PN))
+ return nullptr;
+
+ auto *InvariantBinaryInst =
+ IC.Builder.CreateBinaryIntrinsic(IID, Init, OtherOp);
+ if (isa<FPMathOperator>(InvariantBinaryInst))
+ cast<Instruction>(InvariantBinaryInst)->copyFastMathFlags(II);
+ return InvariantBinaryInst;
+}
+
static Value *simplifyReductionOperand(Value *Arg, bool CanReorderLanes) {
if (!CanReorderLanes)
return nullptr;
@@ -3912,6 +3957,9 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (Value *Reverse = foldReversedIntrinsicOperands(II))
return replaceInstUsesWith(*II, Reverse);
+ if (Value *Res = foldIdempotentBinaryIntrinsicRecurrence(*this, II))
+ return replaceInstUsesWith(*II, Res);
+
// Some intrinsics (like experimental_gc_statepoint) can be used in invoke
// context, so it is handled in visitCallBase and we should trigger it.
return visitCallBase(*II);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index fe0f308..b17cf17 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -3042,7 +3042,7 @@ Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Value *V = LHS;
unsigned MaskElems = Mask.size();
auto *SrcTy = cast<FixedVectorType>(V->getType());
- unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedValue();
+ unsigned VecBitWidth = DL.getTypeSizeInBits(SrcTy);
unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType());
assert(SrcElemBitWidth && "vector elements must have a bitwidth");
unsigned SrcNumElems = SrcTy->getNumElements();
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 8da65c5..50258af 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1211,23 +1211,19 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
return;
if (!II.isLifetimeStartOrEnd())
return;
- // Found lifetime intrinsic, add ASan instrumentation if necessary.
- auto *Size = cast<ConstantInt>(II.getArgOperand(0));
- // If size argument is undefined, don't do anything.
- if (Size->isMinusOne()) return;
- // Check that size doesn't saturate uint64_t and can
- // be stored in IntptrTy.
- const uint64_t SizeValue = Size->getValue().getLimitedValue();
- if (SizeValue == ~0ULL ||
- !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
- return;
// Find alloca instruction that corresponds to llvm.lifetime argument.
- AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(1));
+ AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
// We're interested only in allocas we can handle.
if (!AI || !ASan.isInterestingAlloca(*AI))
return;
+
+ std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
+ // Check that size is known and can be stored in IntptrTy.
+ if (!Size || !ConstantInt::isValueValidForType(IntptrTy, *Size))
+ return;
+
bool DoPoison = (ID == Intrinsic::lifetime_end);
- AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
+ AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
if (AI->isStaticAlloca())
StaticAllocaPoisonCallVec.push_back(APC);
else if (ClInstrumentDynamicAllocas)
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index bcb90d6..fc34d14 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1469,22 +1469,6 @@ void HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
size_t Size = memtag::getAllocaSizeInBytes(*AI);
size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
- auto HandleLifetime = [&](IntrinsicInst *II) {
- // Set the lifetime intrinsic to cover the whole alloca. This reduces the
- // set of assumptions we need to make about the lifetime. Without this we
- // would need to ensure that we can track the lifetime pointer to a
- // constant offset from the alloca, and would still need to change the
- // size to include the extra alignment we use for the untagging to make
- // the size consistent.
- //
- // The check for standard lifetime below makes sure that we have exactly
- // one set of start / end in any execution (i.e. the ends are not
- // reachable from each other), so this will not cause any problems.
- II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
- };
- llvm::for_each(Info.LifetimeStart, HandleLifetime);
- llvm::for_each(Info.LifetimeEnd, HandleLifetime);
-
AI->replaceUsesWithIf(Replacement, [AILong](const Use &U) {
auto *User = U.getUser();
return User != AILong && !isa<LifetimeIntrinsic>(User);
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 7d3c940..6e81387 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3301,7 +3301,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleLifetimeStart(IntrinsicInst &I) {
if (!PoisonStack)
return;
- AllocaInst *AI = dyn_cast<AllocaInst>(I.getArgOperand(1));
+ AllocaInst *AI = dyn_cast<AllocaInst>(I.getArgOperand(0));
if (AI)
LifetimeStartList.push_back(std::make_pair(&I, AI));
}
diff --git a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
index 46b5673..9471ae3 100644
--- a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
@@ -789,6 +789,13 @@ bool TypeSanitizer::instrumentMemInst(Value *V, Instruction *ShadowBase,
bool NeedsMemMove = false;
IRBuilder<> IRB(BB, IP);
+ auto GetAllocaSize = [&](AllocaInst *AI) {
+ return IRB.CreateMul(
+ IRB.CreateZExtOrTrunc(AI->getArraySize(), IntptrTy),
+ ConstantInt::get(IntptrTy,
+ DL.getTypeAllocSize(AI->getAllocatedType())));
+ };
+
if (auto *A = dyn_cast<Argument>(V)) {
assert(A->hasByValAttr() && "Type reset for non-byval argument?");
@@ -811,8 +818,12 @@ bool TypeSanitizer::instrumentMemInst(Value *V, Instruction *ShadowBase,
}
}
} else if (auto *II = dyn_cast<LifetimeIntrinsic>(I)) {
- Size = II->getArgOperand(0);
- Dest = II->getArgOperand(1);
+ auto *AI = dyn_cast<AllocaInst>(II->getArgOperand(0));
+ if (!AI)
+ return false;
+
+ Size = GetAllocaSize(AI);
+ Dest = II->getArgOperand(0);
} else if (auto *AI = dyn_cast<AllocaInst>(I)) {
// We need to clear the types for new stack allocations (or else we might
// read stale type information from a previous function execution).
@@ -820,10 +831,7 @@ bool TypeSanitizer::instrumentMemInst(Value *V, Instruction *ShadowBase,
IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(I)));
IRB.SetInstDebugLocation(I);
- Size = IRB.CreateMul(
- IRB.CreateZExtOrTrunc(AI->getArraySize(), IntptrTy),
- ConstantInt::get(IntptrTy,
- DL.getTypeAllocSize(AI->getAllocatedType())));
+ Size = GetAllocaSize(AI);
Dest = I;
} else {
return false;
diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
index 938aab5..a7ba54f 100644
--- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
@@ -582,15 +582,17 @@ struct AllSwitchPaths {
VisitedBlocks VB;
// Get paths from the determinator BBs to SwitchPhiDefBB
std::vector<ThreadingPath> PathsToPhiDef =
- getPathsFromStateDefMap(StateDef, SwitchPhi, VB);
+ getPathsFromStateDefMap(StateDef, SwitchPhi, VB, MaxNumPaths);
if (SwitchPhiDefBB == SwitchBlock) {
TPaths = std::move(PathsToPhiDef);
return;
}
+ assert(MaxNumPaths >= PathsToPhiDef.size());
+ auto PathsLimit = MaxNumPaths / PathsToPhiDef.size();
// Find and append paths from SwitchPhiDefBB to SwitchBlock.
PathsType PathsToSwitchBB =
- paths(SwitchPhiDefBB, SwitchBlock, VB, /* PathDepth = */ 1);
+ paths(SwitchPhiDefBB, SwitchBlock, VB, /* PathDepth = */ 1, PathsLimit);
if (PathsToSwitchBB.empty())
return;
@@ -611,13 +613,16 @@ private:
typedef DenseMap<const BasicBlock *, const PHINode *> StateDefMap;
std::vector<ThreadingPath> getPathsFromStateDefMap(StateDefMap &StateDef,
PHINode *Phi,
- VisitedBlocks &VB) {
+ VisitedBlocks &VB,
+ unsigned PathsLimit) {
std::vector<ThreadingPath> Res;
auto *PhiBB = Phi->getParent();
VB.insert(PhiBB);
VisitedBlocks UniqueBlocks;
for (auto *IncomingBB : Phi->blocks()) {
+ if (Res.size() >= PathsLimit)
+ break;
if (!UniqueBlocks.insert(IncomingBB).second)
continue;
if (!SwitchOuterLoop->contains(IncomingBB))
@@ -653,8 +658,9 @@ private:
// Direct predecessor, just add to the path.
if (IncomingPhiDefBB == IncomingBB) {
- std::vector<ThreadingPath> PredPaths =
- getPathsFromStateDefMap(StateDef, IncomingPhi, VB);
+ assert(PathsLimit > Res.size());
+ std::vector<ThreadingPath> PredPaths = getPathsFromStateDefMap(
+ StateDef, IncomingPhi, VB, PathsLimit - Res.size());
for (ThreadingPath &Path : PredPaths) {
Path.push_back(PhiBB);
Res.push_back(std::move(Path));
@@ -667,13 +673,17 @@ private:
continue;
PathsType IntermediatePaths;
- IntermediatePaths =
- paths(IncomingPhiDefBB, IncomingBB, VB, /* PathDepth = */ 1);
+ assert(PathsLimit > Res.size());
+ auto InterPathLimit = PathsLimit - Res.size();
+ IntermediatePaths = paths(IncomingPhiDefBB, IncomingBB, VB,
+ /* PathDepth = */ 1, InterPathLimit);
if (IntermediatePaths.empty())
continue;
+ assert(InterPathLimit >= IntermediatePaths.size());
+ auto PredPathLimit = InterPathLimit / IntermediatePaths.size();
std::vector<ThreadingPath> PredPaths =
- getPathsFromStateDefMap(StateDef, IncomingPhi, VB);
+ getPathsFromStateDefMap(StateDef, IncomingPhi, VB, PredPathLimit);
for (const ThreadingPath &Path : PredPaths) {
for (const PathType &IPath : IntermediatePaths) {
ThreadingPath NewPath(Path);
@@ -688,7 +698,7 @@ private:
}
PathsType paths(BasicBlock *BB, BasicBlock *ToBB, VisitedBlocks &Visited,
- unsigned PathDepth) {
+ unsigned PathDepth, unsigned PathsLimit) {
PathsType Res;
// Stop exploring paths after visiting MaxPathLength blocks
@@ -715,6 +725,8 @@ private:
// is used to prevent a duplicate path from being generated
SmallSet<BasicBlock *, 4> Successors;
for (BasicBlock *Succ : successors(BB)) {
+ if (Res.size() >= PathsLimit)
+ break;
if (!Successors.insert(Succ).second)
continue;
@@ -736,14 +748,12 @@ private:
// coverage and compile time.
if (LI->getLoopFor(Succ) != CurrLoop)
continue;
-
- PathsType SuccPaths = paths(Succ, ToBB, Visited, PathDepth + 1);
+ assert(PathsLimit > Res.size());
+ PathsType SuccPaths =
+ paths(Succ, ToBB, Visited, PathDepth + 1, PathsLimit - Res.size());
for (PathType &Path : SuccPaths) {
Path.push_front(BB);
Res.push_back(Path);
- if (Res.size() >= MaxNumPaths) {
- return Res;
- }
}
}
// This block could now be visited again from a different predecessor. Note
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 9b87180..f46d54b 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1363,7 +1363,7 @@ struct DSEState {
if (auto *CB = dyn_cast<CallBase>(I)) {
if (CB->getIntrinsicID() == Intrinsic::lifetime_end)
return {
- std::make_pair(MemoryLocation::getForArgument(CB, 1, &TLI), false)};
+ std::make_pair(MemoryLocation::getForArgument(CB, 0, &TLI), false)};
if (Value *FreedOp = getFreedOperand(CB, &TLI))
return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
}
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 85ee824..a097d33 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -434,7 +434,7 @@ bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
NewV = NewV->stripPointerCasts();
Function *NewDecl = Intrinsic::getOrInsertDeclaration(
M, II->getIntrinsicID(), {NewV->getType()});
- II->setArgOperand(1, NewV);
+ II->setArgOperand(0, NewV);
II->setCalledFunction(NewDecl);
return true;
}
@@ -491,7 +491,7 @@ void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
}
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
- appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(1),
+ appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
PostorderStack, Visited);
break;
}
diff --git a/llvm/lib/Transforms/Scalar/InferAlignment.cpp b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
index 0ddc231..e9bf59c 100644
--- a/llvm/lib/Transforms/Scalar/InferAlignment.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
@@ -58,14 +58,55 @@ bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
}
// Compute alignment from known bits.
+ auto InferFromKnownBits = [&](Instruction &I, Value *PtrOp) {
+ KnownBits Known = computeKnownBits(PtrOp, DL, &AC, &I, &DT);
+ unsigned TrailZ =
+ std::min(Known.countMinTrailingZeros(), +Value::MaxAlignmentExponent);
+ return Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
+ };
+
+ // Propagate alignment between loads and stores that originate from the
+ // same base pointer.
+ DenseMap<Value *, Align> BestBasePointerAligns;
+ auto InferFromBasePointer = [&](Value *PtrOp, Align LoadStoreAlign) {
+ APInt OffsetFromBase(DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
+ PtrOp = PtrOp->stripAndAccumulateConstantOffsets(DL, OffsetFromBase, true);
+ // Derive the base pointer alignment from the load/store alignment
+ // and the offset from the base pointer.
+ Align BasePointerAlign =
+ commonAlignment(LoadStoreAlign, OffsetFromBase.getLimitedValue());
+
+ auto [It, Inserted] =
+ BestBasePointerAligns.try_emplace(PtrOp, BasePointerAlign);
+ if (!Inserted) {
+ // If the stored base pointer alignment is better than the
+ // base pointer alignment we derived, we may be able to use it
+ // to improve the load/store alignment. If not, store the
+ // improved base pointer alignment for future iterations.
+ if (It->second > BasePointerAlign) {
+ Align BetterLoadStoreAlign =
+ commonAlignment(It->second, OffsetFromBase.getLimitedValue());
+ return BetterLoadStoreAlign;
+ }
+ It->second = BasePointerAlign;
+ }
+ return LoadStoreAlign;
+ };
+
for (BasicBlock &BB : F) {
+ // We need to reset the map for each block because alignment information
+ // can only be propagated from instruction A to B if A dominates B.
+ // This is because control flow (and exception throwing) could be dependent
+ // on the address (and its alignment) at runtime. Some sort of dominator
+ // tree approach could be better, but doing a simple forward pass through a
+ // single basic block is correct too.
+ BestBasePointerAligns.clear();
+
for (Instruction &I : BB) {
Changed |= tryToImproveAlign(
DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
- KnownBits Known = computeKnownBits(PtrOp, DL, &AC, &I, &DT);
- unsigned TrailZ = std::min(Known.countMinTrailingZeros(),
- +Value::MaxAlignmentExponent);
- return Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
+ return std::max(InferFromKnownBits(I, PtrOp),
+ InferFromBasePointer(PtrOp, OldAlign));
});
}
}
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index c3f80f9..f8a18ea 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -472,7 +472,7 @@ bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
if (Preheader)
Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
- LicmAllowSpeculation);
+ LicmAllowSpeculation, HasCoroSuspendInst);
// Now that all loop invariants have been removed from the loop, promote any
// memory references to scalars that we can.
@@ -881,7 +881,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
ICFLoopSafetyInfo *SafetyInfo,
SinkAndHoistLICMFlags &Flags,
OptimizationRemarkEmitter *ORE, bool LoopNestMode,
- bool AllowSpeculation) {
+ bool AllowSpeculation, bool HasCoroSuspendInst) {
// Verify inputs.
assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
CurLoop != nullptr && SafetyInfo != nullptr &&
@@ -914,11 +914,11 @@ bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
// TODO: It may be safe to hoist if we are hoisting to a conditional block
// and we have accurately duplicated the control flow from the loop header
// to that block.
- if (CurLoop->hasLoopInvariantOperands(&I) &&
+ if (CurLoop->hasLoopInvariantOperands(&I, HasCoroSuspendInst) &&
canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
- isSafeToExecuteUnconditionally(
- I, DT, TLI, CurLoop, SafetyInfo, ORE,
- Preheader->getTerminator(), AC, AllowSpeculation)) {
+ isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, ORE,
+ Preheader->getTerminator(), AC,
+ AllowSpeculation)) {
hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
MSSAU, SE, ORE);
HoistedInstructions.push_back(&I);
@@ -964,7 +964,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
};
if ((IsInvariantStart(I) || isGuard(&I)) &&
- CurLoop->hasLoopInvariantOperands(&I) &&
+ CurLoop->hasLoopInvariantOperands(&I, HasCoroSuspendInst) &&
MustExecuteWithoutWritesBefore(I)) {
hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
MSSAU, SE, ORE);
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index b3bffeb..c68149b 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -263,6 +263,7 @@ static bool isUniformShape(Value *V) {
case llvm::Instruction::FPExt:
return true;
case llvm::Instruction::AddrSpaceCast:
+ case CastInst::PtrToAddr:
case CastInst::PtrToInt:
case CastInst::IntToPtr:
return false;
@@ -2166,7 +2167,7 @@ public:
// If the loads don't alias the lifetime.end, it won't interfere with
// fusion.
- MemoryLocation EndLoc = MemoryLocation::getForArgument(End, 1, nullptr);
+ MemoryLocation EndLoc = MemoryLocation::getForArgument(End, 0, nullptr);
if (!EndLoc.Ptr)
continue;
if (AA->isNoAlias(Load0Loc, EndLoc) && AA->isNoAlias(Load1Loc, EndLoc))
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 79721dc..f237322 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -915,7 +915,7 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
// move the bitcast as well, which we don't handle.
if (SkippedLifetimeStart) {
auto *LifetimeArg =
- dyn_cast<Instruction>(SkippedLifetimeStart->getOperand(1));
+ dyn_cast<Instruction>(SkippedLifetimeStart->getOperand(0));
if (LifetimeArg && LifetimeArg->getParent() == C->getParent() &&
C->comesBefore(LifetimeArg))
return false;
@@ -1010,7 +1010,7 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
// Lifetime of srcAlloca ends at lifetime.end.
if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_end &&
- II->getArgOperand(1) == srcAlloca)
+ II->getArgOperand(0) == srcAlloca)
break;
}
@@ -1393,7 +1393,7 @@ static bool hasUndefContents(MemorySSA *MSSA, BatchAAResults &AA, Value *V,
if (auto *II = dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst()))
if (II->getIntrinsicID() == Intrinsic::lifetime_start)
if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V)))
- return II->getArgOperand(1) == Alloca;
+ return II->getArgOperand(0) == Alloca;
return false;
}
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 1a52af1..40eeeb2 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -1535,7 +1535,7 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
- auto *LifetimePtr = II->getOperand(1);
+ auto *LifetimePtr = II->getOperand(0);
if (LoadPtr == lookupOperandLeader(LifetimePtr) ||
AA->isMustAlias(LoadPtr, LifetimePtr))
return createConstantExpression(UndefValue::get(LoadType));
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 03d9f32..d6e27aa 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1260,10 +1260,7 @@ private:
return PI.setAborted(&II);
if (II.isLifetimeStartOrEnd()) {
- ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
- uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
- Length->getLimitedValue());
- insertUse(II, Offset, Size, true);
+ insertUse(II, Offset, AllocSize, true);
return;
}
@@ -3614,30 +3611,14 @@ private:
return true;
}
- assert(II.getArgOperand(1) == OldPtr);
- // Lifetime intrinsics are only promotable if they cover the whole alloca.
- // Therefore, we drop lifetime intrinsics which don't cover the whole
- // alloca.
- // (In theory, intrinsics which partially cover an alloca could be
- // promoted, but PromoteMemToReg doesn't handle that case.)
- // FIXME: Check whether the alloca is promotable before dropping the
- // lifetime intrinsics?
- if (NewBeginOffset != NewAllocaBeginOffset ||
- NewEndOffset != NewAllocaEndOffset)
- return true;
-
- ConstantInt *Size =
- ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
- NewEndOffset - NewBeginOffset);
- // Lifetime intrinsics always expect an i8* so directly get such a pointer
- // for the new alloca slice.
+ assert(II.getArgOperand(0) == OldPtr);
Type *PointerTy = IRB.getPtrTy(OldPtr->getType()->getPointerAddressSpace());
Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy);
Value *New;
if (II.getIntrinsicID() == Intrinsic::lifetime_start)
- New = IRB.CreateLifetimeStart(Ptr, Size);
+ New = IRB.CreateLifetimeStart(Ptr);
else
- New = IRB.CreateLifetimeEnd(Ptr, Size);
+ New = IRB.CreateLifetimeEnd(Ptr);
(void)New;
LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 7a9dd37..bbd1ed6 100644
--- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -1099,7 +1099,7 @@ static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
// Get the memory operand of the lifetime marker. If the underlying
// object is a sunk alloca, or is otherwise defined in the extraction
// region, the lifetime marker must not be erased.
- Value *Mem = II->getOperand(1)->stripInBoundsOffsets();
+ Value *Mem = II->getOperand(0);
if (SunkAllocas.count(Mem) || definedInRegion(Blocks, Mem))
continue;
@@ -1115,8 +1115,6 @@ static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
static void insertLifetimeMarkersSurroundingCall(
Module *M, ArrayRef<Value *> LifetimesStart, ArrayRef<Value *> LifetimesEnd,
CallInst *TheCall) {
- LLVMContext &Ctx = M->getContext();
- auto NegativeOne = ConstantInt::getSigned(Type::getInt64Ty(Ctx), -1);
Instruction *Term = TheCall->getParent()->getTerminator();
// Emit lifetime markers for the pointers given in \p Objects. Insert the
@@ -1130,7 +1128,7 @@ static void insertLifetimeMarkersSurroundingCall(
Function *Func =
Intrinsic::getOrInsertDeclaration(M, MarkerFunc, Mem->getType());
- auto Marker = CallInst::Create(Func, {NegativeOne, Mem});
+ auto Marker = CallInst::Create(Func, Mem);
if (InsertBefore)
Marker->insertBefore(TheCall->getIterator());
else
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index ed3dca2..fa3c467 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -2361,15 +2361,13 @@ remapIndices(Function &Caller, BasicBlock *StartBB,
// Updating the contextual profile after an inlining means, at a high level,
// copying over the data of the callee, **intentionally without any value
// scaling**, and copying over the callees of the inlined callee.
-llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
- PGOContextualProfile &CtxProf,
- bool MergeAttributes,
- AAResults *CalleeAAR,
- bool InsertLifetime,
- Function *ForwardVarArgsTo) {
+llvm::InlineResult llvm::InlineFunction(
+ CallBase &CB, InlineFunctionInfo &IFI, PGOContextualProfile &CtxProf,
+ bool MergeAttributes, AAResults *CalleeAAR, bool InsertLifetime,
+ Function *ForwardVarArgsTo, OptimizationRemarkEmitter *ORE) {
if (!CtxProf.isInSpecializedModule())
return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
- ForwardVarArgsTo);
+ ForwardVarArgsTo, ORE);
auto &Caller = *CB.getCaller();
auto &Callee = *CB.getCalledFunction();
@@ -2387,7 +2385,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
const auto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);
auto Ret = InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
- ForwardVarArgsTo);
+ ForwardVarArgsTo, ORE);
if (!Ret.isSuccess())
return Ret;
@@ -2457,20 +2455,8 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
return Ret;
}
-/// This function inlines the called function into the basic block of the
-/// caller. This returns false if it is not possible to inline this call.
-/// The program is still in a well defined state if this occurs though.
-///
-/// Note that this only does one level of inlining. For example, if the
-/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
-/// exists in the instruction stream. Similarly this will inline a recursive
-/// function by one level.
-llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
- bool MergeAttributes,
- AAResults *CalleeAAR,
- bool InsertLifetime,
- Function *ForwardVarArgsTo,
- OptimizationRemarkEmitter *ORE) {
+llvm::InlineResult llvm::CanInlineCallSite(const CallBase &CB,
+ InlineFunctionInfo &IFI) {
assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
// FIXME: we don't inline callbr yet.
@@ -2487,7 +2473,6 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// The inliner does not know how to inline through calls with operand bundles
// in general ...
- Value *ConvergenceControlToken = nullptr;
if (CB.hasOperandBundles()) {
for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
auto OBUse = CB.getOperandBundleAt(i);
@@ -2503,7 +2488,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
if (Tag == LLVMContext::OB_kcfi)
continue;
if (Tag == LLVMContext::OB_convergencectrl) {
- ConvergenceControlToken = OBUse.Inputs[0].get();
+ IFI.ConvergenceControlToken = OBUse.Inputs[0].get();
continue;
}
@@ -2521,28 +2506,22 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// fully implements convergence control tokens, there is no mixing of
// controlled and uncontrolled convergent operations in the whole program.
if (CB.isConvergent()) {
- if (!ConvergenceControlToken &&
+ if (!IFI.ConvergenceControlToken &&
getConvergenceEntry(CalledFunc->getEntryBlock())) {
return InlineResult::failure(
"convergent call needs convergencectrl operand");
}
}
- // If the call to the callee cannot throw, set the 'nounwind' flag on any
- // calls that we inline.
- bool MarkNoUnwind = CB.doesNotThrow();
-
- BasicBlock *OrigBB = CB.getParent();
- Function *Caller = OrigBB->getParent();
+ const BasicBlock *OrigBB = CB.getParent();
+ const Function *Caller = OrigBB->getParent();
// GC poses two hazards to inlining, which only occur when the callee has GC:
// 1. If the caller has no GC, then the callee's GC must be propagated to the
// caller.
// 2. If the caller has a differing GC, it is invalid to inline.
if (CalledFunc->hasGC()) {
- if (!Caller->hasGC())
- Caller->setGC(CalledFunc->getGC());
- else if (CalledFunc->getGC() != Caller->getGC())
+ if (Caller->hasGC() && CalledFunc->getGC() != Caller->getGC())
return InlineResult::failure("incompatible GC");
}
@@ -2560,34 +2539,31 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
? Caller->getPersonalityFn()->stripPointerCasts()
: nullptr;
if (CalledPersonality) {
- if (!CallerPersonality)
- Caller->setPersonalityFn(CalledPersonality);
// If the personality functions match, then we can perform the
// inlining. Otherwise, we can't inline.
// TODO: This isn't 100% true. Some personality functions are proper
// supersets of others and can be used in place of the other.
- else if (CalledPersonality != CallerPersonality)
+ if (CallerPersonality && CalledPersonality != CallerPersonality)
return InlineResult::failure("incompatible personality");
}
// We need to figure out which funclet the callsite was in so that we may
// properly nest the callee.
- Instruction *CallSiteEHPad = nullptr;
if (CallerPersonality) {
EHPersonality Personality = classifyEHPersonality(CallerPersonality);
if (isScopedEHPersonality(Personality)) {
std::optional<OperandBundleUse> ParentFunclet =
CB.getOperandBundle(LLVMContext::OB_funclet);
if (ParentFunclet)
- CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
+ IFI.CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
// OK, the inlining site is legal. What about the target function?
- if (CallSiteEHPad) {
+ if (IFI.CallSiteEHPad) {
if (Personality == EHPersonality::MSVC_CXX) {
// The MSVC personality cannot tolerate catches getting inlined into
// cleanup funclets.
- if (isa<CleanupPadInst>(CallSiteEHPad)) {
+ if (isa<CleanupPadInst>(IFI.CallSiteEHPad)) {
// Ok, the call site is within a cleanuppad. Let's check the callee
// for catchpads.
for (const BasicBlock &CalledBB : *CalledFunc) {
@@ -2607,13 +2583,34 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
}
}
+ return InlineResult::success();
+}
+
+/// This function inlines the called function into the basic block of the
+/// caller. This returns false if it is not possible to inline this call.
+/// The program is still in a well defined state if this occurs though.
+///
+/// Note that this only does one level of inlining. For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream. Similarly this will inline a recursive
+/// function by one level.
+void llvm::InlineFunctionImpl(CallBase &CB, InlineFunctionInfo &IFI,
+ bool MergeAttributes, AAResults *CalleeAAR,
+ bool InsertLifetime, Function *ForwardVarArgsTo,
+ OptimizationRemarkEmitter *ORE) {
+ BasicBlock *OrigBB = CB.getParent();
+ Function *Caller = OrigBB->getParent();
+ Function *CalledFunc = CB.getCalledFunction();
+ assert(CalledFunc && !CalledFunc->isDeclaration() &&
+ "CanInlineCallSite should have verified direct call to definition");
+
// Determine if we are dealing with a call in an EHPad which does not unwind
// to caller.
bool EHPadForCallUnwindsLocally = false;
- if (CallSiteEHPad && isa<CallInst>(CB)) {
+ if (IFI.CallSiteEHPad && isa<CallInst>(CB)) {
UnwindDestMemoTy FuncletUnwindMap;
Value *CallSiteUnwindDestToken =
- getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
+ getUnwindDestToken(IFI.CallSiteEHPad, FuncletUnwindMap);
EHPadForCallUnwindsLocally =
CallSiteUnwindDestToken &&
@@ -2630,6 +2627,30 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
ClonedCodeInfo InlinedFunctionInfo;
Function::iterator FirstNewBlock;
+ // GC poses two hazards to inlining, which only occur when the callee has GC:
+ // 1. If the caller has no GC, then the callee's GC must be propagated to the
+ // caller.
+ // 2. If the caller has a differing GC, it is invalid to inline.
+ if (CalledFunc->hasGC()) {
+ if (!Caller->hasGC())
+ Caller->setGC(CalledFunc->getGC());
+ else {
+ assert(CalledFunc->getGC() == Caller->getGC() &&
+ "CanInlineCallSite should have verified compatible GCs");
+ }
+ }
+
+ if (CalledFunc->hasPersonalityFn()) {
+ Constant *CalledPersonality =
+ CalledFunc->getPersonalityFn()->stripPointerCasts();
+ if (!Caller->hasPersonalityFn()) {
+ Caller->setPersonalityFn(CalledPersonality);
+ } else
+ assert(Caller->getPersonalityFn()->stripPointerCasts() ==
+ CalledPersonality &&
+ "CanInlineCallSite should have verified compatible personality");
+ }
+
{ // Scope to destroy VMap after cloning.
ValueToValueMapTy VMap;
struct ByValInit {
@@ -2819,10 +2840,10 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
IFI.GetAssumptionCache(*Caller).registerAssumption(II);
}
- if (ConvergenceControlToken) {
+ if (IFI.ConvergenceControlToken) {
IntrinsicInst *IntrinsicCall = getConvergenceEntry(*FirstNewBlock);
if (IntrinsicCall) {
- IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
+ IntrinsicCall->replaceAllUsesWith(IFI.ConvergenceControlToken);
IntrinsicCall->eraseFromParent();
}
}
@@ -2869,6 +2890,10 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
}
}
+ // If the call to the callee cannot throw, set the 'nounwind' flag on any
+ // calls that we inline.
+ bool MarkNoUnwind = CB.doesNotThrow();
+
SmallVector<Value*,4> VarArgsToForward;
SmallVector<AttributeSet, 4> VarArgsAttrs;
for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
@@ -2979,31 +3004,11 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
if (hasLifetimeMarkers(AI))
continue;
- // Try to determine the size of the allocation.
- ConstantInt *AllocaSize = nullptr;
- if (ConstantInt *AIArraySize =
- dyn_cast<ConstantInt>(AI->getArraySize())) {
- auto &DL = Caller->getDataLayout();
- Type *AllocaType = AI->getAllocatedType();
- TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
- uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
-
- // Don't add markers for zero-sized allocas.
- if (AllocaArraySize == 0)
- continue;
-
- // Check that array size doesn't saturate uint64_t and doesn't
- // overflow when it's multiplied by type size.
- if (!AllocaTypeSize.isScalable() &&
- AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
- std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
- AllocaTypeSize.getFixedValue()) {
- AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
- AllocaArraySize * AllocaTypeSize);
- }
- }
+ std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
+ if (Size && Size->isZero())
+ continue;
- builder.CreateLifetimeStart(AI, AllocaSize);
+ builder.CreateLifetimeStart(AI);
for (ReturnInst *RI : Returns) {
// Don't insert llvm.lifetime.end calls between a musttail or deoptimize
// call and a return. The return kills all local allocas.
@@ -3013,7 +3018,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
if (InlinedDeoptimizeCalls &&
RI->getParent()->getTerminatingDeoptimizeCall())
continue;
- IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
+ IRBuilder<>(RI).CreateLifetimeEnd(AI);
}
}
}
@@ -3055,12 +3060,12 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// Update the lexical scopes of the new funclets and callsites.
// Anything that had 'none' as its parent is now nested inside the callsite's
// EHPad.
- if (CallSiteEHPad) {
+ if (IFI.CallSiteEHPad) {
for (Function::iterator BB = FirstNewBlock->getIterator(),
E = Caller->end();
BB != E; ++BB) {
// Add bundle operands to inlined call sites.
- PropagateOperandBundles(BB, CallSiteEHPad);
+ PropagateOperandBundles(BB, IFI.CallSiteEHPad);
// It is problematic if the inlinee has a cleanupret which unwinds to
// caller and we inline it into a call site which doesn't unwind but into
@@ -3076,11 +3081,11 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
- CatchSwitch->setParentPad(CallSiteEHPad);
+ CatchSwitch->setParentPad(IFI.CallSiteEHPad);
} else {
auto *FPI = cast<FuncletPadInst>(I);
if (isa<ConstantTokenNone>(FPI->getParentPad()))
- FPI->setParentPad(CallSiteEHPad);
+ FPI->setParentPad(IFI.CallSiteEHPad);
}
}
}
@@ -3236,7 +3241,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
// We are now done with the inlining.
- return InlineResult::success();
+ return;
}
// Otherwise, we have the normal case, of more than one block to inline or
@@ -3404,6 +3409,19 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
if (MergeAttributes)
AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
+}
- return InlineResult::success();
+llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
+ bool MergeAttributes,
+ AAResults *CalleeAAR,
+ bool InsertLifetime,
+ Function *ForwardVarArgsTo,
+ OptimizationRemarkEmitter *ORE) {
+ llvm::InlineResult Result = CanInlineCallSite(CB, IFI);
+ if (Result.isSuccess()) {
+ InlineFunctionImpl(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
+ ForwardVarArgsTo, ORE);
+ }
+
+ return Result;
}
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 2619e73..b559212 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -481,7 +481,7 @@ bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
return true;
if (II->isLifetimeStartOrEnd()) {
- auto *Arg = II->getArgOperand(1);
+ auto *Arg = II->getArgOperand(0);
if (isa<PoisonValue>(Arg))
return true;
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index 472c03f..1f59b17 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -155,7 +155,7 @@ void StackInfoBuilder::visit(OptimizationRemarkEmitter &ORE,
return;
}
if (auto *II = dyn_cast<LifetimeIntrinsic>(&Inst)) {
- AllocaInst *AI = dyn_cast<AllocaInst>(II->getArgOperand(1));
+ AllocaInst *AI = dyn_cast<AllocaInst>(II->getArgOperand(0));
if (!AI ||
getAllocaInterestingness(*AI) != AllocaInterestingness::kInteresting)
return;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 323e422..0591c22 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -548,9 +548,6 @@ public:
protected:
friend class LoopVectorizationPlanner;
- /// Returns (and creates if needed) the trip count of the widened loop.
- Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock);
-
// Create a check to see if the vector loop should be executed
Value *createIterationCountCheck(ElementCount VF, unsigned UF) const;
@@ -2272,56 +2269,6 @@ static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
return TTI.enableMaskedInterleavedAccessVectorization();
}
-Value *
-InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) {
- if (VectorTripCount)
- return VectorTripCount;
-
- Value *TC = getTripCount();
- IRBuilder<> Builder(InsertBlock->getTerminator());
-
- Type *Ty = TC->getType();
- // This is where we can make the step a runtime constant.
- Value *Step = createStepForVF(Builder, Ty, VF, UF);
-
- // If the tail is to be folded by masking, round the number of iterations N
- // up to a multiple of Step instead of rounding down. This is done by first
- // adding Step-1 and then rounding down. Note that it's ok if this addition
- // overflows: the vector induction variable will eventually wrap to zero given
- // that it starts at zero and its Step is a power of two; the loop will then
- // exit, with the last early-exit vector comparison also producing all-true.
- // For scalable vectors the VF is not guaranteed to be a power of 2, but this
- // is accounted for in emitIterationCountCheck that adds an overflow check.
- if (Cost->foldTailByMasking()) {
- assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
- "VF*UF must be a power of 2 when folding tail by masking");
- TC = Builder.CreateAdd(TC, Builder.CreateSub(Step, ConstantInt::get(Ty, 1)),
- "n.rnd.up");
- }
-
- // Now we need to generate the expression for the part of the loop that the
- // vectorized body will execute. This is equal to N - (N % Step) if scalar
- // iterations are not required for correctness, or N - Step, otherwise. Step
- // is equal to the vectorization factor (number of SIMD elements) times the
- // unroll factor (number of SIMD instructions).
- Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
-
- // There are cases where we *must* run at least one iteration in the remainder
- // loop. See the cost model for when this can happen. If the step evenly
- // divides the trip count, we set the remainder to be equal to the step. If
- // the step does not evenly divide the trip count, no adjustment is necessary
- // since there will already be scalar iterations. Note that the minimum
- // iterations check ensures that N >= Step.
- if (Cost->requiresScalarEpilogue(VF.isVector())) {
- auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
- R = Builder.CreateSelect(IsZero, Step, R);
- }
-
- VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
-
- return VectorTripCount;
-}
-
void InnerLoopVectorizer::introduceCheckBlockInVPlan(BasicBlock *CheckIRBB) {
// Note: The block with the minimum trip-count check is already connected
// during earlier VPlan construction.
@@ -7354,6 +7301,9 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
// Canonicalize EVL loops after regions are dissolved.
VPlanTransforms::canonicalizeEVLLoops(BestVPlan);
VPlanTransforms::materializeBackedgeTakenCount(BestVPlan, VectorPH);
+ VPlanTransforms::materializeVectorTripCount(
+ BestVPlan, VectorPH, CM.foldTailByMasking(),
+ CM.requiresScalarEpilogue(BestVF.isVector()));
// Perform the actual loop transformation.
VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
@@ -7410,8 +7360,7 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
//===------------------------------------------------===//
// 2. Copy and widen instructions from the old loop into the new loop.
- BestVPlan.prepareToExecute(
- ILV.getOrCreateVectorTripCount(ILV.LoopVectorPreHeader), State);
+ BestVPlan.prepareToExecute(State);
replaceVPBBWithIRVPBB(VectorPH, State.CFG.PrevBB);
// Move check blocks to their final position.
@@ -7530,9 +7479,6 @@ BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
EPI.MainLoopIterationCountCheck =
emitIterationCountCheck(LoopScalarPreHeader, false);
- // Generate the induction variable.
- EPI.VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
-
replaceVPBBWithIRVPBB(Plan.getScalarPreheader(), LoopScalarPreHeader);
return LoopVectorPreHeader;
}
@@ -8438,8 +8384,13 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
LVer.prepareNoAliasMetadata();
}
+ // Create initial base VPlan0, to serve as common starting point for all
+ // candidates built later for specific VF ranges.
+ auto VPlan0 = VPlanTransforms::buildVPlan0(
+ OrigLoop, *LI, Legal->getWidestInductionType(),
+ getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
+
auto MaxVFTimes2 = MaxVF * 2;
- auto VPlan0 = VPlanTransforms::buildPlainCFG(OrigLoop, *LI);
for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
VFRange SubRange = {VF, MaxVFTimes2};
if (auto Plan = tryToBuildVPlanWithVPRecipes(
@@ -8678,23 +8629,17 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
// visit each basic block after having visited its predecessor basic blocks.
// ---------------------------------------------------------------------------
- // Create initial VPlan skeleton, having a basic block for the pre-header
- // which contains SCEV expansions that need to happen before the CFG is
- // modified; a basic block for the vector pre-header, followed by a region for
- // the vector loop, followed by the middle basic block. The skeleton vector
- // loop region contains a header and latch basic blocks.
-
bool RequiresScalarEpilogueCheck =
LoopVectorizationPlanner::getDecisionAndClampRange(
[this](ElementCount VF) {
return !CM.requiresScalarEpilogue(VF.isVector());
},
Range);
- VPlanTransforms::prepareForVectorization(
- *Plan, Legal->getWidestInductionType(), PSE, RequiresScalarEpilogueCheck,
- CM.foldTailByMasking(), OrigLoop,
- getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()),
- Legal->hasUncountableEarlyExit(), Range);
+ VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit(),
+ Range);
+ VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
+ CM.foldTailByMasking());
+
VPlanTransforms::createLoopRegions(*Plan);
VPlanTransforms::createExtractsForLiveOuts(*Plan);
@@ -8980,11 +8925,14 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
assert(!OrigLoop->isInnermost());
assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
- auto Plan = VPlanTransforms::buildPlainCFG(OrigLoop, *LI);
- VPlanTransforms::prepareForVectorization(
- *Plan, Legal->getWidestInductionType(), PSE, true, false, OrigLoop,
- getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), false,
- Range);
+ auto Plan = VPlanTransforms::buildVPlan0(
+ OrigLoop, *LI, Legal->getWidestInductionType(),
+ getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
+ VPlanTransforms::handleEarlyExits(*Plan,
+ /*HasUncountableExit*/ false, Range);
+ VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
+ /*TailFolded*/ false);
+
VPlanTransforms::createLoopRegions(*Plan);
for (ElementCount VF : Range)
@@ -9410,13 +9358,6 @@ void VPDerivedIVRecipe::execute(VPTransformState &State) {
State.Builder, Index, getStartValue()->getLiveInIRValue(), Step, Kind,
cast_if_present<BinaryOperator>(FPBinOp));
DerivedIV->setName(Name);
- // If index is the vector trip count, the concrete value will only be set in
- // prepareToExecute, leading to missed simplifications, e.g. if it is 0.
- // TODO: Remove the special case for the vector trip count once it is computed
- // in VPlan and can be used during VPlan simplification.
- assert((DerivedIV != Index ||
- getOperand(1) == &getParent()->getPlan()->getVectorTripCount()) &&
- "IV didn't need transforming?");
State.set(this, DerivedIV, VPLane(0));
}
@@ -9808,7 +9749,7 @@ static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
static void
preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L,
const SCEV2ValueTy &ExpandedSCEVs,
- const EpilogueLoopVectorizationInfo &EPI) {
+ EpilogueLoopVectorizationInfo &EPI) {
VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
Header->setName("vec.epilog.vector.body");
@@ -9826,30 +9767,13 @@ preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L,
// loop.
using namespace llvm::PatternMatch;
PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
- assert(EPResumeVal->getType() == IV->getScalarType() &&
- match(EPResumeVal->getIncomingValueForBlock(
- EPI.MainLoopIterationCountCheck),
- m_SpecificInt(0)) &&
- EPResumeVal ==
- find_singleton<PHINode>(
- L->getLoopPreheader()->phis(),
- [&EPI, IV](PHINode &P, bool) -> PHINode * {
- if (P.getType() == IV->getScalarType() &&
- match(P.getIncomingValueForBlock(
- EPI.MainLoopIterationCountCheck),
- m_SpecificInt(0)) &&
- any_of(P.incoming_values(),
- [&EPI](Value *Inc) {
- return Inc == EPI.VectorTripCount;
- }) &&
- all_of(P.incoming_values(), [&EPI](Value *Inc) {
- return Inc == EPI.VectorTripCount ||
- match(Inc, m_SpecificInt(0));
- }))
- return &P;
- return nullptr;
- }) &&
- "Epilogue resume phis do not match!");
+ for (Value *Inc : EPResumeVal->incoming_values()) {
+ if (match(Inc, m_SpecificInt(0)))
+ continue;
+ assert(!EPI.VectorTripCount &&
+ "Must only have a single non-zero incoming value");
+ EPI.VectorTripCount = Inc;
+ }
VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
assert(all_of(IV->users(),
[](const VPUser *U) {
diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp
index f32d57f..e414c12 100644
--- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp
+++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp
@@ -81,6 +81,7 @@ LegalityAnalysis::notVectorizableBasedOnOpcodesAndTypes(
case Instruction::Opcode::FPToUI:
case Instruction::Opcode::FPToSI:
case Instruction::Opcode::FPExt:
+ case Instruction::Opcode::PtrToAddr:
case Instruction::Opcode::PtrToInt:
case Instruction::Opcode::IntToPtr:
case Instruction::Opcode::SIToFP:
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 8052e31..a820b524 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -951,15 +951,9 @@ VPlan::~VPlan() {
delete BackedgeTakenCount;
}
-void VPlan::prepareToExecute(Value *VectorTripCountV, VPTransformState &State) {
- if (!VectorTripCount.getUnderlyingValue())
- VectorTripCount.setUnderlyingValue(VectorTripCountV);
- else
- assert(VectorTripCount.getUnderlyingValue() == VectorTripCountV &&
- "VectorTripCount set earlier must much VectorTripCountV");
-
+void VPlan::prepareToExecute(VPTransformState &State) {
IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
- Type *TCTy = VectorTripCountV->getType();
+ Type *TCTy = VPTypeAnalysis(*this).inferScalarType(getTripCount());
// FIXME: Model VF * UF computation completely in VPlan.
unsigned UF = getUF();
if (VF.getNumUsers()) {
@@ -1054,12 +1048,17 @@ void VPlan::execute(VPTransformState *State) {
InstructionCost VPlan::cost(ElementCount VF, VPCostContext &Ctx) {
// For now only return the cost of the vector loop region, ignoring any other
- // blocks, like the preheader or middle blocks.
+ // blocks, like the preheader or middle blocks, expect for checking them for
+ // recipes with invalid costs.
InstructionCost Cost = getVectorLoopRegion()->cost(VF, Ctx);
- // If any instructions in the middle block are invalid return invalid.
- // TODO: Remove once no VPlans with VF == vscale x 1 and first-order recurrences are created.
- if (!getMiddleBlock()->cost(VF, Ctx).isValid())
+ // If the cost of the loop region is invalid or any recipe in the skeleton
+ // outside loop regions are invalid return an invalid cost.
+ if (!Cost.isValid() || any_of(VPBlockUtils::blocksOnly<VPBasicBlock>(
+ vp_depth_first_shallow(getEntry())),
+ [&VF, &Ctx](VPBasicBlock *VPBB) {
+ return !VPBB->cost(VF, Ctx).isValid();
+ }))
return InstructionCost::getInvalid();
return Cost;
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index c42cdd5..6f098351 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2408,11 +2408,11 @@ public:
// TODO: extend the masked interleaved-group support to reversed access.
assert((!Mask || !IG->isReverse()) &&
"Reversed masked interleave-group not supported.");
- for (unsigned i = 0; i < IG->getFactor(); ++i)
- if (Instruction *I = IG->getMember(i)) {
- if (I->getType()->isVoidTy())
+ for (unsigned I = 0; I < IG->getFactor(); ++I)
+ if (Instruction *Inst = IG->getMember(I)) {
+ if (Inst->getType()->isVoidTy())
continue;
- new VPValue(I, this);
+ new VPValue(Inst, this);
}
for (auto *SV : StoredValues)
@@ -3969,7 +3969,7 @@ public:
}
/// Prepare the plan for execution, setting up the required live-in values.
- void prepareToExecute(Value *VectorTripCount, VPTransformState &State);
+ void prepareToExecute(VPTransformState &State);
/// Generate the IR code for this VPlan.
void execute(VPTransformState *State);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
index 7e8eff31..b231a84 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
@@ -336,12 +336,6 @@ std::unique_ptr<VPlan> PlainCFGBuilder::buildPlainCFG() {
return std::move(Plan);
}
-std::unique_ptr<VPlan> VPlanTransforms::buildPlainCFG(Loop *TheLoop,
- LoopInfo &LI) {
- PlainCFGBuilder Builder(TheLoop, &LI);
- return Builder.buildPlainCFG();
-}
-
/// Checks if \p HeaderVPB is a loop header block in the plain CFG; that is, it
/// has exactly 2 predecessors (preheader and latch), where the block
/// dominates the latch and the preheader dominates the block. If it is a
@@ -457,10 +451,8 @@ static void addCanonicalIVRecipes(VPlan &Plan, VPBasicBlock *HeaderVPBB,
LatchDL);
}
-void VPlanTransforms::prepareForVectorization(
- VPlan &Plan, Type *InductionTy, PredicatedScalarEvolution &PSE,
- bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop,
- DebugLoc IVDL, bool HasUncountableEarlyExit, VFRange &Range) {
+static void addInitialSkeleton(VPlan &Plan, Type *InductionTy, DebugLoc IVDL,
+ PredicatedScalarEvolution &PSE, Loop *TheLoop) {
VPDominatorTree VPDT;
VPDT.recalculate(Plan);
@@ -486,12 +478,54 @@ void VPlanTransforms::prepareForVectorization(
addCanonicalIVRecipes(Plan, HeaderVPBB, LatchVPBB, InductionTy, IVDL);
- [[maybe_unused]] bool HandledUncountableEarlyExit = false;
+ // Create SCEV and VPValue for the trip count.
+ // We use the symbolic max backedge-taken-count, which works also when
+ // vectorizing loops with uncountable early exits.
+ const SCEV *BackedgeTakenCountSCEV = PSE.getSymbolicMaxBackedgeTakenCount();
+ assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCountSCEV) &&
+ "Invalid backedge-taken count");
+ ScalarEvolution &SE = *PSE.getSE();
+ const SCEV *TripCount = SE.getTripCountFromExitCount(BackedgeTakenCountSCEV,
+ InductionTy, TheLoop);
+ Plan.setTripCount(
+ vputils::getOrCreateVPValueForSCEVExpr(Plan, TripCount, SE));
+
+ VPBasicBlock *ScalarPH = Plan.createVPBasicBlock("scalar.ph");
+ VPBlockUtils::connectBlocks(ScalarPH, Plan.getScalarHeader());
+
+ // The connection order corresponds to the operands of the conditional branch,
+ // with the middle block already connected to the exit block.
+ VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
+ // Also connect the entry block to the scalar preheader.
+ // TODO: Also introduce a branch recipe together with the minimum trip count
+ // check.
+ VPBlockUtils::connectBlocks(Plan.getEntry(), ScalarPH);
+ Plan.getEntry()->swapSuccessors();
+}
+
+std::unique_ptr<VPlan>
+VPlanTransforms::buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy,
+ DebugLoc IVDL, PredicatedScalarEvolution &PSE) {
+ PlainCFGBuilder Builder(TheLoop, &LI);
+ std::unique_ptr<VPlan> VPlan0 = Builder.buildPlainCFG();
+ addInitialSkeleton(*VPlan0, InductionTy, IVDL, PSE, TheLoop);
+ return VPlan0;
+}
+
+void VPlanTransforms::handleEarlyExits(VPlan &Plan,
+ bool HasUncountableEarlyExit,
+ VFRange &Range) {
+ auto *MiddleVPBB = cast<VPBasicBlock>(
+ Plan.getScalarHeader()->getSinglePredecessor()->getPredecessors()[0]);
+ auto *LatchVPBB = cast<VPBasicBlock>(MiddleVPBB->getSinglePredecessor());
+ VPBlockBase *HeaderVPB = cast<VPBasicBlock>(LatchVPBB->getSuccessors()[1]);
+
// Disconnect all early exits from the loop leaving it with a single exit from
// the latch. Early exits that are countable are left for a scalar epilog. The
// condition of uncountable early exits (currently at most one is supported)
// is fused into the latch exit, and used to branch from middle block to the
// early exit destination.
+ [[maybe_unused]] bool HandledUncountableEarlyExit = false;
for (VPIRBasicBlock *EB : Plan.getExitBlocks()) {
for (VPBlockBase *Pred : to_vector(EB->getPredecessors())) {
if (Pred == MiddleVPBB)
@@ -500,7 +534,8 @@ void VPlanTransforms::prepareForVectorization(
assert(!HandledUncountableEarlyExit &&
"can handle exactly one uncountable early exit");
handleUncountableEarlyExit(cast<VPBasicBlock>(Pred), EB, Plan,
- HeaderVPBB, LatchVPBB, Range);
+ cast<VPBasicBlock>(HeaderVPB), LatchVPBB,
+ Range);
HandledUncountableEarlyExit = true;
} else {
for (VPRecipeBase &R : EB->phis())
@@ -513,36 +548,18 @@ void VPlanTransforms::prepareForVectorization(
assert((!HasUncountableEarlyExit || HandledUncountableEarlyExit) &&
"missed an uncountable exit that must be handled");
+}
- // Create SCEV and VPValue for the trip count.
- // We use the symbolic max backedge-taken-count, which works also when
- // vectorizing loops with uncountable early exits.
- const SCEV *BackedgeTakenCountSCEV = PSE.getSymbolicMaxBackedgeTakenCount();
- assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCountSCEV) &&
- "Invalid loop count");
- ScalarEvolution &SE = *PSE.getSE();
- const SCEV *TripCount = SE.getTripCountFromExitCount(BackedgeTakenCountSCEV,
- InductionTy, TheLoop);
- Plan.setTripCount(
- vputils::getOrCreateVPValueForSCEVExpr(Plan, TripCount, SE));
-
- VPBasicBlock *ScalarPH = Plan.createVPBasicBlock("scalar.ph");
- VPBlockUtils::connectBlocks(ScalarPH, Plan.getScalarHeader());
-
- // The connection order corresponds to the operands of the conditional branch,
- // with the middle block already connected to the exit block.
- VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
- // Also connect the entry block to the scalar preheader.
- // TODO: Also introduce a branch recipe together with the minimum trip count
- // check.
- VPBlockUtils::connectBlocks(Plan.getEntry(), ScalarPH);
- Plan.getEntry()->swapSuccessors();
-
+void VPlanTransforms::addMiddleCheck(VPlan &Plan,
+ bool RequiresScalarEpilogueCheck,
+ bool TailFolded) {
+ auto *MiddleVPBB = cast<VPBasicBlock>(
+ Plan.getScalarHeader()->getSinglePredecessor()->getPredecessors()[0]);
// If MiddleVPBB has a single successor then the original loop does not exit
// via the latch and the single successor must be the scalar preheader.
// There's no need to add a runtime check to MiddleVPBB.
if (MiddleVPBB->getNumSuccessors() == 1) {
- assert(MiddleVPBB->getSingleSuccessor() == ScalarPH &&
+ assert(MiddleVPBB->getSingleSuccessor() == Plan.getScalarPreheader() &&
"must have ScalarPH as single successor");
return;
}
@@ -564,6 +581,7 @@ void VPlanTransforms::prepareForVectorization(
// the corresponding compare because they may have ended up with different
// line numbers and we want to avoid awkward line stepping while debugging.
// E.g., if the compare has got a line number inside the loop.
+ auto *LatchVPBB = cast<VPBasicBlock>(MiddleVPBB->getSinglePredecessor());
DebugLoc LatchDL = LatchVPBB->getTerminator()->getDebugLoc();
VPBuilder Builder(MiddleVPBB);
VPValue *Cmp;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 1c8bd6c..34b2abf 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -3278,6 +3278,67 @@ void VPlanTransforms::materializeBackedgeTakenCount(VPlan &Plan,
BTC->replaceAllUsesWith(TCMO);
}
+void VPlanTransforms::materializeVectorTripCount(VPlan &Plan,
+ VPBasicBlock *VectorPHVPBB,
+ bool TailByMasking,
+ bool RequiresScalarEpilogue) {
+ VPValue &VectorTC = Plan.getVectorTripCount();
+ assert(VectorTC.isLiveIn() && "vector-trip-count must be a live-in");
+ // There's nothing to do if there are no users of the vector trip count or its
+ // IR value has already been set.
+ if (VectorTC.getNumUsers() == 0 || VectorTC.getLiveInIRValue())
+ return;
+
+ VPValue *TC = Plan.getTripCount();
+ Type *TCTy = VPTypeAnalysis(Plan).inferScalarType(TC);
+ VPBuilder Builder(VectorPHVPBB, VectorPHVPBB->begin());
+ VPValue *Step = &Plan.getVFxUF();
+
+ // If the tail is to be folded by masking, round the number of iterations N
+ // up to a multiple of Step instead of rounding down. This is done by first
+ // adding Step-1 and then rounding down. Note that it's ok if this addition
+ // overflows: the vector induction variable will eventually wrap to zero given
+ // that it starts at zero and its Step is a power of two; the loop will then
+ // exit, with the last early-exit vector comparison also producing all-true.
+ // For scalable vectors the VF is not guaranteed to be a power of 2, but this
+ // is accounted for in emitIterationCountCheck that adds an overflow check.
+ if (TailByMasking) {
+ TC = Builder.createNaryOp(
+ Instruction::Add,
+ {TC, Builder.createNaryOp(
+ Instruction::Sub,
+ {Step, Plan.getOrAddLiveIn(ConstantInt::get(TCTy, 1))})},
+ DebugLoc::getCompilerGenerated(), "n.rnd.up");
+ }
+
+ // Now we need to generate the expression for the part of the loop that the
+ // vectorized body will execute. This is equal to N - (N % Step) if scalar
+ // iterations are not required for correctness, or N - Step, otherwise. Step
+ // is equal to the vectorization factor (number of SIMD elements) times the
+ // unroll factor (number of SIMD instructions).
+ VPValue *R =
+ Builder.createNaryOp(Instruction::URem, {TC, Step},
+ DebugLoc::getCompilerGenerated(), "n.mod.vf");
+
+ // There are cases where we *must* run at least one iteration in the remainder
+ // loop. See the cost model for when this can happen. If the step evenly
+ // divides the trip count, we set the remainder to be equal to the step. If
+ // the step does not evenly divide the trip count, no adjustment is necessary
+ // since there will already be scalar iterations. Note that the minimum
+ // iterations check ensures that N >= Step.
+ if (RequiresScalarEpilogue) {
+ assert(!TailByMasking &&
+ "requiring scalar epilogue is not supported with fail folding");
+ VPValue *IsZero = Builder.createICmp(
+ CmpInst::ICMP_EQ, R, Plan.getOrAddLiveIn(ConstantInt::get(TCTy, 0)));
+ R = Builder.createSelect(IsZero, Step, R);
+ }
+
+ VPValue *Res = Builder.createNaryOp(
+ Instruction::Sub, {TC, R}, DebugLoc::getCompilerGenerated(), "n.vec");
+ VectorTC.replaceAllUsesWith(Res);
+}
+
/// Returns true if \p V is VPWidenLoadRecipe or VPInterleaveRecipe that can be
/// converted to a narrower recipe. \p V is used by a wide recipe that feeds a
/// store interleave group at index \p Idx, \p WideMember0 is the recipe feeding
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index cc50c75..e49137c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -54,21 +54,30 @@ struct VPlanTransforms {
verifyVPlanIsValid(Plan);
}
- LLVM_ABI_FOR_TEST static std::unique_ptr<VPlan> buildPlainCFG(Loop *TheLoop,
- LoopInfo &LI);
-
- /// Prepare the plan for vectorization. It will introduce a dedicated
- /// VPBasicBlock for the vector pre-header as well as a VPBasicBlock as exit
- /// block of the main vector loop (middle.block). If a check is needed to
- /// guard executing the scalar epilogue loop, it will be added to the middle
- /// block, together with VPBasicBlocks for the scalar preheader and exit
- /// blocks. \p InductionTy is the type of the canonical induction and used for
- /// related values, like the trip count expression. It also creates a VPValue
- /// expression for the original trip count.
- LLVM_ABI_FOR_TEST static void prepareForVectorization(
- VPlan &Plan, Type *InductionTy, PredicatedScalarEvolution &PSE,
- bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop,
- DebugLoc IVDL, bool HasUncountableExit, VFRange &Range);
+ /// Create a base VPlan0, serving as the common starting point for all later
+ /// candidates. It consists of an initial plain CFG loop with loop blocks from
+ /// \p TheLoop being directly translated to VPBasicBlocks with VPInstruction
+ /// corresponding to the input IR.
+ ///
+ /// The created loop is wrapped in an initial skeleton to facilitate
+ /// vectorization, consisting of a vector pre-header, an exit block for the
+ /// main vector loop (middle.block) and a new block as preheader of the scalar
+ /// loop (scalar.ph). It also adds a canonical IV and its increment, using \p
+ /// InductionTy and \p IVDL, and creates a VPValue expression for the original
+ /// trip count.
+ LLVM_ABI_FOR_TEST static std::unique_ptr<VPlan>
+ buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL,
+ PredicatedScalarEvolution &PSE);
+
+ /// Update \p Plan to account for all early exits.
+ LLVM_ABI_FOR_TEST static void
+ handleEarlyExits(VPlan &Plan, bool HasUncountableExit, VFRange &Range);
+
+ /// If a check is needed to guard executing the scalar epilogue loop, it will
+ /// be added to the middle block.
+ LLVM_ABI_FOR_TEST static void addMiddleCheck(VPlan &Plan,
+ bool RequiresScalarEpilogueCheck,
+ bool TailFolded);
/// Replace loops in \p Plan's flat CFG with VPRegionBlocks, turning \p Plan's
/// flat CFG into a hierarchical CFG.
@@ -256,6 +265,12 @@ struct VPlanTransforms {
unsigned BestUF,
PredicatedScalarEvolution &PSE);
+ /// Materialize vector trip count computations to a set of VPInstructions.
+ static void materializeVectorTripCount(VPlan &Plan,
+ VPBasicBlock *VectorPHVPBB,
+ bool TailByMasking,
+ bool RequiresScalarEpilogue);
+
/// Materialize the backedge-taken count to be computed explicitly using
/// VPInstructions.
static void materializeBackedgeTakenCount(VPlan &Plan,
diff --git a/llvm/test/Analysis/BasicAA/modref.ll b/llvm/test/Analysis/BasicAA/modref.ll
index 1aab28f3..4a91fee 100644
--- a/llvm/test/Analysis/BasicAA/modref.ll
+++ b/llvm/test/Analysis/BasicAA/modref.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -aa-pipeline=basic-aa -passes=gvn,dse -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @external(ptr)
@@ -71,7 +71,7 @@ define void @test3(i8 %X) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 2
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: store i8 2, ptr [[P2]], align 1
; CHECK-NEXT: call void @external(ptr [[P]])
; CHECK-NEXT: ret void
@@ -81,7 +81,7 @@ define void @test3(i8 %X) {
%P2 = getelementptr i8, ptr %P, i32 2
store i8 %Y, ptr %P2 ;; Not read by lifetime.end, should be removed.
- call void @llvm.lifetime.end.p0(i64 1, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
store i8 2, ptr %P2
call void @external(ptr %P)
ret void
@@ -90,7 +90,7 @@ define void @test3(i8 %X) {
define void @test3a(i8 %X) {
; CHECK-LABEL: @test3a(
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 10, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: ret void
;
%P = alloca i64
@@ -98,7 +98,7 @@ define void @test3a(i8 %X) {
%P2 = getelementptr i8, ptr %P, i32 2
store i8 %Y, ptr %P2
- call void @llvm.lifetime.end.p0(i64 10, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
ret void
}
diff --git a/llvm/test/Analysis/BasicAA/phi-values-usage.ll b/llvm/test/Analysis/BasicAA/phi-values-usage.ll
index 43df41c..680e1df 100644
--- a/llvm/test/Analysis/BasicAA/phi-values-usage.ll
+++ b/llvm/test/Analysis/BasicAA/phi-values-usage.ll
@@ -14,7 +14,7 @@ target datalayout = "p:8:8-n8"
declare void @otherfn(ptr)
declare i32 @__gxx_personality_v0(...)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
@c = external global ptr, align 1
; This function is one where if we didn't free basicaa after memcpyopt then the
@@ -65,7 +65,7 @@ for.body: ; preds = %for.cond
br label %for.cond
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 1, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
%1 = load ptr, ptr %d.0, align 1
store ptr %1, ptr @c, align 1
ret void
diff --git a/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll b/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll
index 1c9d201..b93a2a0 100644
--- a/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll
+++ b/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll
@@ -29,7 +29,7 @@
define internal void @used_by_lifetime() {
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
ret void
}
@@ -55,6 +55,6 @@ define internal void @other_cast_intrinsic_use() {
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.memset.p0.i64(ptr, i8, i64, i1 immarg)
declare void @llvm.memset.p1.i64(ptr addrspace(1), i8, i64, i1 immarg)
diff --git a/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll b/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll
index de1b39d..0a154d09 100644
--- a/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll
@@ -34,10 +34,10 @@ define void @fadd() {
define void @fadd_fp16() {
; CHECK-BASE-LABEL: 'fadd_fp16'
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fadd half undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fadd <4 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fadd <8 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fadd <16 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fadd half undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fadd <4 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fadd <8 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:16 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fadd <16 x half> undef, undef
; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-FP16-LABEL: 'fadd_fp16'
@@ -84,10 +84,10 @@ define void @fsub() {
define void @fsub_fp16() {
; CHECK-BASE-LABEL: 'fsub_fp16'
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fsub <16 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:16 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fsub <16 x half> undef, undef
; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-FP16-LABEL: 'fsub_fp16'
@@ -134,9 +134,9 @@ define void @fneg_idiom() {
define void @fneg_idiom_fp16() {
; CHECK-BASE-LABEL: 'fneg_idiom_fp16'
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half 0xH8000, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> splat (half 0xH8000), undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> splat (half 0xH8000), undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:3 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fsub half 0xH8000, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:3 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fsub <4 x half> splat (half 0xH8000), undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:6 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fsub <8 x half> splat (half 0xH8000), undef
; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-FP16-LABEL: 'fneg_idiom_fp16'
@@ -180,21 +180,13 @@ define void @fneg() {
}
define void @fneg_fp16() {
-; CHECK-BASE-LABEL: 'fneg_fp16'
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
-;
-; CHECK-FP16-LABEL: 'fneg_fp16'
-; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef
-; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef
-; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef
-; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef
-; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef
-; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+; CHECK-LABEL: 'fneg_fp16'
+; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef
+; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef
+; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef
+; CHECK-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef
+; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%F16 = fneg half undef
%V2F16 = fneg <2 x half> undef
@@ -252,16 +244,16 @@ define void @fmulfneg() {
define void @fmulneg_fp16() {
; CHECK-BASE-LABEL: 'fmulneg_fp16'
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half %F16, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> %V2F16, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> %V4F16, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> %V8F16, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> %V16F16, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half %F16, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> %V2F16, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> %V4F16, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> %V8F16, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> %V16F16, undef
; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-FP16-LABEL: 'fmulneg_fp16'
@@ -338,16 +330,16 @@ define void @fnegfmul() {
define void @fnegfmul_fp16() {
; CHECK-BASE-LABEL: 'fnegfmul_fp16'
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half %F16M
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> %V2F16M
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> %V4F16M
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> %V8F16M
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> undef, undef
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> %V16F16M
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %F16M = fmul half undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fneg half %F16M
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16M = fmul <2 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V2F16 = fneg <2 x half> %V2F16M
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16M = fmul <4 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fneg <4 x half> %V4F16M
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16M = fmul <8 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:1 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fneg <8 x half> %V8F16M
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16M = fmul <16 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fneg <16 x half> %V16F16M
; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-FP16-LABEL: 'fnegfmul_fp16'
@@ -405,12 +397,19 @@ define void @fmul() {
}
define void @fmul_fp16() {
-; CHECK-LABEL: 'fmul_fp16'
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fmul half undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fmul <4 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fmul <8 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fmul <16 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+; CHECK-BASE-LABEL: 'fmul_fp16'
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fmul half undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fmul <4 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fmul <8 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fmul <16 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+; CHECK-FP16-LABEL: 'fmul_fp16'
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %F16 = fmul half undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V4F16 = fmul <4 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %V8F16 = fmul <8 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %V16F16 = fmul <16 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%F16 = fmul half undef, undef
%V4F16 = fmul <4 x half> undef, undef
@@ -448,12 +447,19 @@ define void @fdiv() {
}
define void @fdiv_fp16() {
-; CHECK-LABEL: 'fdiv_fp16'
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = fdiv half undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = fdiv <4 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = fdiv <8 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = fdiv <16 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+; CHECK-BASE-LABEL: 'fdiv_fp16'
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = fdiv half undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = fdiv <4 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:10 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = fdiv <8 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:20 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = fdiv <16 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+; CHECK-FP16-LABEL: 'fdiv_fp16'
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = fdiv half undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = fdiv <4 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:2 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = fdiv <8 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of 4 for: %V16F16 = fdiv <16 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%F16 = fdiv half undef, undef
%V4F16 = fdiv <4 x half> undef, undef
@@ -491,12 +497,19 @@ define void @frem() {
}
define void @frem_fp16() {
-; CHECK-LABEL: 'frem_fp16'
-; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = frem half undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:52 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = frem <4 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:108 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = frem <8 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:216 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = frem <16 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+; CHECK-BASE-LABEL: 'frem_fp16'
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:13 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = frem half undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:55 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = frem <4 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:110 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = frem <8 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:220 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = frem <16 x half> undef, undef
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+; CHECK-FP16-LABEL: 'frem_fp16'
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:4 Lat:4 SizeLat:4 for: %F16 = frem half undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:52 CodeSize:4 Lat:4 SizeLat:4 for: %V4F16 = frem <4 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:108 CodeSize:4 Lat:4 SizeLat:4 for: %V8F16 = frem <8 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:216 CodeSize:4 Lat:4 SizeLat:4 for: %V16F16 = frem <16 x half> undef, undef
+; CHECK-FP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%F16 = frem half undef, undef
%V4F16 = frem <4 x half> undef, undef
@@ -729,9 +742,9 @@ define void @fmuladd() {
define void @fmuladd_fp16() {
; CHECK-BASE-LABEL: 'fmuladd_fp16'
; CHECK-BASE-NEXT: Cost Model: Found costs of 1 for: %F16 = call half @llvm.fmuladd.f16(half undef, half undef, half undef)
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:6 SizeLat:2 for: %V4F16 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef)
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:6 SizeLat:2 for: %V8F16 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef)
-; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:2 Lat:6 SizeLat:2 for: %V16F16 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef)
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:9 CodeSize:2 Lat:6 SizeLat:2 for: %V4F16 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef)
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:18 CodeSize:2 Lat:6 SizeLat:2 for: %V8F16 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef)
+; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:36 CodeSize:2 Lat:6 SizeLat:2 for: %V16F16 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef)
; CHECK-BASE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-FP16-LABEL: 'fmuladd_fp16'
diff --git a/llvm/test/Analysis/CostModel/AArch64/extract_float.ll b/llvm/test/Analysis/CostModel/AArch64/extract_float.ll
index d2b75faa..c214021 100644
--- a/llvm/test/Analysis/CostModel/AArch64/extract_float.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/extract_float.ll
@@ -11,6 +11,7 @@ define double @extract_case1(<2 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <2 x double> %a, i32 1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <2 x double> %a, i32 0
%2 = extractelement <2 x double> %a, i32 1
@@ -24,6 +25,7 @@ define double @extract_case2(<2 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %0 = extractelement <2 x double> %a, i32 1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %0
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <2 x double> %a, i32 1
%res = fmul double %1, %1
@@ -36,6 +38,7 @@ define double @extract_case3(<2 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x double> %a, i32 0
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %0
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <2 x double> %a, i32 0
%res = fmul double %1, %1
@@ -48,6 +51,7 @@ define double @extract_case4(<2 x double> %a, double %b) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x double> %a, i32 0
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %b
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <2 x double> %a, i32 0
%res = fmul double %1, %b
@@ -60,6 +64,7 @@ define double @extract_case5(<2 x double> %a, double %b) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x double> %a, i32 1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %b
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <2 x double> %a, i32 1
%res = fmul double %1, %b
@@ -74,6 +79,7 @@ define double @extract_case6(<3 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <3 x double> %a, i32 1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <3 x double> %a, i32 0
%2 = extractelement <3 x double> %a, i32 1
@@ -90,6 +96,7 @@ define double @extract_case7(<4 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <4 x double> %a, i32 2
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <4 x double> %a, i32 1
%2 = extractelement <4 x double> %a, i32 2
@@ -108,6 +115,7 @@ define double @extract_case8(<2 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = fmul double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = fmul double %3, %4
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %5
+;
entry:
%1 = extractelement <2 x double> %a, i32 0
%2 = extractelement <2 x double> %a, i32 1
@@ -129,6 +137,7 @@ define double @extract_case9(<2 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = fmul double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = fmul double %3, %4
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %5
+;
entry:
%1 = extractelement <2 x double> %a, i32 0
%2 = extractelement <2 x double> %a, i32 1
@@ -148,6 +157,7 @@ define double @extract_case10(<4 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @foo(double %1)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = fmul double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %2
+;
entry:
%1 = extractelement <4 x double> %a, i32 0
%2 = extractelement <4 x double> %a, i32 1
@@ -161,7 +171,7 @@ define half @extract_case11(<2 x half> %a) {
; NOFP16-LABEL: 'extract_case11'
; NOFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %0 = extractelement <2 x half> %a, i32 0
; NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = extractelement <2 x half> %a, i32 1
-; NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul half %0, %1
+; NOFP16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %res = fmul half %0, %1
; NOFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %res
;
; FULLFP16-LABEL: 'extract_case11'
@@ -169,6 +179,7 @@ define half @extract_case11(<2 x half> %a) {
; FULLFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <2 x half> %a, i32 1
; FULLFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul half %0, %1
; FULLFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret half %res
+;
entry:
%1 = extractelement <2 x half> %a, i32 0
%2 = extractelement <2 x half> %a, i32 1
@@ -183,6 +194,7 @@ define float @extract_case12(<2 x float> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %1 = extractelement <2 x float> %a, i32 1
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %res = fmul float %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %res
+;
entry:
%1 = extractelement <2 x float> %a, i32 0
%2 = extractelement <2 x float> %a, i32 1
@@ -198,6 +210,7 @@ define double @extract_case13(<2 x double> %a) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = extractelement <2 x double> %a, i32 1
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res = fadd double %0, %1
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double %res
+;
entry:
%1 = extractelement <2 x double> %a, i32 0
%2 = extractelement <2 x double> %a, i32 1
diff --git a/llvm/test/Analysis/CostModel/AArch64/histograms.ll b/llvm/test/Analysis/CostModel/AArch64/histograms.ll
new file mode 100644
index 0000000..c048958
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/AArch64/histograms.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu | FileCheck %s --check-prefix=CHECK-NEON
+; RUN: opt < %s -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve | FileCheck %s --check-prefix=CHECK-SVE
+; RUN: opt < %s -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve2 | FileCheck %s --check-prefix=CHECK-SVE2
+
+define void @histograms() {
+; CHECK-NEON-LABEL: 'histograms'
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 10 for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 128 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-NEON-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; CHECK-SVE-LABEL: 'histograms'
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 128 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; CHECK-SVE2-LABEL: 'histograms'
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 10 for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 128 for instruction: call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+; CHECK-SVE2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr> poison, i64 1, <vscale x 2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr> poison, i32 1, <vscale x 4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr> poison, i16 1, <vscale x 8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr> poison, i8 1, <vscale x 16 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr> poison, i64 1, <2 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr> poison, i32 1, <4 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr> poison, i16 1, <8 x i1> poison)
+ call void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr> poison, i8 1, <16 x i1> poison)
+ ret void
+}
+
+declare void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>)
+declare void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.experimental.vector.histogram.add.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>)
+declare void @llvm.experimental.vector.histogram.add.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>)
+declare void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr>, i64, <2 x i1>)
+declare void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr>, i32, <4 x i1>)
+declare void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr>, i16, <8 x i1>)
+declare void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr>, i8, <16 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.v2p0.i64(<2 x ptr>, i64, <2 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.v4p0.i32(<4 x ptr>, i32, <4 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.v8p0.i16(<8 x ptr>, i16, <8 x i1>)
+declare void @llvm.experimental.vector.histogram.uadd.sat.v16p0.i8(<16 x ptr>, i8, <16 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.v2p0.i64(<2 x ptr>, i64, <2 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.v4p0.i32(<4 x ptr>, i32, <4 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.v8p0.i16(<8 x ptr>, i16, <8 x i1>)
+declare void @llvm.experimental.vector.histogram.umax.v16p0.i8(<16 x ptr>, i8, <16 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.nxv2p0.i64(<vscale x 2 x ptr>, i64, <vscale x 2 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.nxv4p0.i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.nxv8p0.i16(<vscale x 8 x ptr>, i16, <vscale x 8 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.nxv16p0.i8(<vscale x 16 x ptr>, i8, <vscale x 16 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.v2p0.i64(<2 x ptr>, i64, <2 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.v4p0.i32(<4 x ptr>, i32, <4 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.v8p0.i16(<8 x ptr>, i16, <8 x i1>)
+declare void @llvm.experimental.vector.histogram.umin.v16p0.i8(<16 x ptr>, i8, <16 x i1>)
diff --git a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll
index f565924..c4236d2 100644
--- a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll
@@ -26,10 +26,10 @@ define void @strict_fp_reductions() {
define void @strict_fp_reductions_fp16() {
; CHECK-NOFP16-LABEL: 'strict_fp_reductions_fp16'
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:76 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:12 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:26 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:108 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-F16-LABEL: 'strict_fp_reductions_fp16'
@@ -40,10 +40,10 @@ define void @strict_fp_reductions_fp16() {
; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-BF16-LABEL: 'strict_fp_reductions_fp16'
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:76 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:12 CodeSize:5 Lat:10 SizeLat:6 for: %fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:26 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f16 = call half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:23 Lat:46 SizeLat:30 for: %fadd_v8f16 = call half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:108 CodeSize:46 Lat:92 SizeLat:60 for: %fadd_v16f16 = call half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%fadd_v2f16 = call half @llvm.vector.reduce.fadd.v2f16(half 0.0, <2 x half> undef)
@@ -55,15 +55,15 @@ define void @strict_fp_reductions_fp16() {
define void @strict_fp_reductions_bf16() {
; CHECK-NOFP16-LABEL: 'strict_fp_reductions_bf16'
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef)
; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-F16-LABEL: 'strict_fp_reductions_bf16'
-; CHECK-F16-NEXT: Cost Model: Found costs of RThru:18 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef)
+; CHECK-F16-NEXT: Cost Model: Found costs of RThru:54 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef)
; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-BF16-LABEL: 'strict_fp_reductions_bf16'
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:26 CodeSize:11 Lat:22 SizeLat:14 for: %fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR0000, <4 x bfloat> undef)
; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%fadd_v4f8 = call bfloat @llvm.vector.reduce.fadd.v4f8(bfloat 0.0, <4 x bfloat> undef)
@@ -117,16 +117,16 @@ define void @fast_fp_reductions() {
define void @fast_fp_reductions_fp16() {
; CHECK-NOFP16-LABEL: 'fast_fp_reductions_fp16'
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:203 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:248 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-F16-LABEL: 'fast_fp_reductions_fp16'
@@ -143,16 +143,16 @@ define void @fast_fp_reductions_fp16() {
; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-BF16-LABEL: 'fast_fp_reductions_fp16'
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:5 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:30 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:38 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:7 CodeSize:4 Lat:6 SizeLat:4 for: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:48 CodeSize:27 Lat:33 SizeLat:27 for: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:72 CodeSize:44 Lat:52 SizeLat:44 for: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:203 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:248 CodeSize:35 Lat:41 SizeLat:35 for: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0.0, <2 x half> undef)
@@ -175,15 +175,15 @@ define void @fast_fp_reductions_fp16() {
define void @fast_fp_reductions_bf16() {
; CHECK-NOFP16-LABEL: 'fast_fp_reductions_bf16'
-; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
+; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:28 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
; CHECK-NOFP16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-F16-LABEL: 'fast_fp_reductions_bf16'
-; CHECK-F16-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
+; CHECK-F16-NEXT: Cost Model: Found costs of RThru:28 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
; CHECK-F16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-BF16-LABEL: 'fast_fp_reductions_bf16'
-; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:8 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
+; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:14 CodeSize:8 Lat:12 SizeLat:8 for: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
; CHECK-BF16-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4f8(bfloat -0.0, <4 x bfloat> undef)
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll b/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll
index dc95eac..f7ebd40 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll
@@ -164,3 +164,55 @@ define void @frem() {
ret void
}
+
+define void @fma() {
+; CHECK-LABEL: 'fma'
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F32 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F32 = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V8F32 = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %V4F16 = call <vscale x 4 x half> @llvm.fma.v4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+ %V8F16 = call <vscale x 8 x half> @llvm.fma.v8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+ %V16F16 = call <vscale x 16 x half> @llvm.fma.v16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+
+ %V2F32 = call <vscale x 2 x float> @llvm.fma.v2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ %V4F32 = call <vscale x 4 x float> @llvm.fma.v4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ %V8F32 = call <vscale x 8 x float> @llvm.fma.v8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+
+ %V2F64 = call <vscale x 2 x double> @llvm.fma.v2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ %V4F64 = call <vscale x 4 x double> @llvm.fma.v4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+
+ ret void
+}
+
+define void @fmuladd() {
+; CHECK-LABEL: 'fmuladd'
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F16 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8F16 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F32 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F32 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V8F32 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F64 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4F64 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %V4F16 = call <vscale x 4 x half> @llvm.fmuladd.v4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+ %V8F16 = call <vscale x 8 x half> @llvm.fmuladd.v8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+ %V16F16 = call <vscale x 16 x half> @llvm.fmuladd.v16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+
+ %V2F32 = call <vscale x 2 x float> @llvm.fmuladd.v2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ %V4F32 = call <vscale x 4 x float> @llvm.fmuladd.v4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ %V8F32 = call <vscale x 8 x float> @llvm.fmuladd.v8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+
+ %V2F64 = call <vscale x 2 x double> @llvm.fmuladd.v2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ %V4F64 = call <vscale x 4 x double> @llvm.fmuladd.v4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
index 7e8d957..609a23b 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
@@ -1277,15 +1277,15 @@ define void @histogram_nxv16i8(<vscale x 16 x ptr> %buckets, <vscale x 16 x i1>
define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) {
; CHECK-VSCALE-1-LABEL: 'histogram_v2i64'
-; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
+; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:10 SizeLat:10 for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-VSCALE-2-LABEL: 'histogram_v2i64'
-; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
+; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:10 SizeLat:10 for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; TYPE_BASED_ONLY-LABEL: 'histogram_v2i64'
-; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:10 CodeSize:8 Lat:10 SizeLat:10 for: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask)
@@ -1294,15 +1294,15 @@ define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) {
define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) {
; CHECK-VSCALE-1-LABEL: 'histogram_v4i32'
-; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
+; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:20 CodeSize:16 Lat:20 SizeLat:20 for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-VSCALE-2-LABEL: 'histogram_v4i32'
-; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
+; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:20 CodeSize:16 Lat:20 SizeLat:20 for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; TYPE_BASED_ONLY-LABEL: 'histogram_v4i32'
-; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:20 CodeSize:16 Lat:20 SizeLat:20 for: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask)
@@ -1311,15 +1311,15 @@ define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) {
define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) {
; CHECK-VSCALE-1-LABEL: 'histogram_v8i16'
-; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
+; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:40 CodeSize:32 Lat:40 SizeLat:40 for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-VSCALE-2-LABEL: 'histogram_v8i16'
-; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
+; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:40 CodeSize:32 Lat:40 SizeLat:40 for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; TYPE_BASED_ONLY-LABEL: 'histogram_v8i16'
-; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:40 CodeSize:32 Lat:40 SizeLat:40 for: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask)
@@ -1328,15 +1328,15 @@ define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) {
define void @histogram_v16i8(<16 x ptr> %buckets, <16 x i1> %mask) {
; CHECK-VSCALE-1-LABEL: 'histogram_v16i8'
-; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
+; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:80 CodeSize:64 Lat:80 SizeLat:80 for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-VSCALE-2-LABEL: 'histogram_v16i8'
-; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
+; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:80 CodeSize:64 Lat:80 SizeLat:80 for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; TYPE_BASED_ONLY-LABEL: 'histogram_v16i8'
-; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of Invalid for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:80 CodeSize:64 Lat:80 SizeLat:80 for: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
call void @llvm.experimental.vector.histogram.add.v16p0.i64(<16 x ptr> %buckets, i8 1, <16 x i1> %mask)
diff --git a/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll b/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll
index 6bcf3c7..f234341 100644
--- a/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/vec3-ops.ll
@@ -206,8 +206,8 @@ define void @vec3_float(<3 x float> %a, <3 x float> %b, ptr %src, ptr %dst) {
define void @vec3_half(<3 x half> %a, <3 x half> %b, ptr %src, ptr %dst) {
; CHECK-LABEL: 'vec3_half'
; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:4 SizeLat:1 for: %l = load <3 x half>, ptr %src, align 1
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %add = fadd <3 x half> %l, %b
-; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:3 SizeLat:1 for: %sub = fsub <3 x half> %add, %a
+; CHECK-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %add = fadd <3 x half> %l, %b
+; CHECK-NEXT: Cost Model: Found costs of RThru:4 CodeSize:1 Lat:3 SizeLat:1 for: %sub = fsub <3 x half> %add, %a
; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:1 SizeLat:1 for: store <3 x half> %sub, ptr %dst, align 1
; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
diff --git a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
index ecc7fc8..245e8f7 100644
--- a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
+++ b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
@@ -23,7 +23,7 @@
%"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 }
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, ptr nocapture) #0
+declare void @llvm.lifetime.end(ptr nocapture) #0
; Function Attrs: nounwind ssp uwtable
define hidden void @fun(ptr %N, i1 %arg) #1 align 2 {
@@ -42,7 +42,6 @@ for.cond.cleanup: ; preds = %for.body, %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv190 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next191, %for.body ]
- call void @llvm.lifetime.end(i64 16, ptr nonnull null)
%indvars.iv.next191 = add nuw nsw i64 %indvars.iv190, 1
%exitcond193 = icmp eq i64 %indvars.iv.next191, %wide.trip.count192
br i1 %exitcond193, label %for.cond.cleanup, label %for.body
diff --git a/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll b/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll
index 3a54428..cef960d 100644
--- a/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll
@@ -14,8 +14,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -32,8 +32,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -49,8 +49,8 @@ define i32 @trivially_free() {
%a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
%a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
%a4 = call i1 @llvm.is.constant.i32(i32 undef)
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -66,8 +66,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr)
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
declare i1 @llvm.is.constant.i32(i32)
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
index 96064dc..2acc8e8 100644
--- a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
+++ b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
@@ -16,8 +16,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
@@ -36,8 +36,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
@@ -55,8 +55,8 @@ define i32 @trivially_free() {
%a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
%a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
%a4 = call i1 @llvm.is.constant.i32(i32 undef)
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
%a7 = call i1 @llvm.allow.ubsan.check(i8 123)
@@ -74,8 +74,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr)
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
declare i1 @llvm.is.constant.i32(i32)
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
index f989ebe..7f002d0 100644
--- a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
+++ b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
@@ -14,8 +14,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -34,8 +34,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -53,8 +53,8 @@ define i32 @trivially_free() {
%a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
%a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
%a4 = call i1 @llvm.is.constant.i32(i32 undef)
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -72,8 +72,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr)
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
declare i1 @llvm.is.constant.i32(i32)
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
diff --git a/llvm/test/Analysis/DDG/basic-loopnest.ll b/llvm/test/Analysis/DDG/basic-loopnest.ll
index 325428c..75efff5 100644
--- a/llvm/test/Analysis/DDG/basic-loopnest.ll
+++ b/llvm/test/Analysis/DDG/basic-loopnest.ll
@@ -1,5 +1,8 @@
; RUN: opt < %s -disable-output "-passes=print<ddg>" 2>&1 | FileCheck %s
+; XFAIL: *
+; At the moment, DependenceAnalysis cannot infer `n` to be positive.
+
; CHECK-LABEL: 'DDG' for loop 'test1.for.cond1.preheader':
@@ -378,4 +381,4 @@ for.inc12: ; preds = %for.body4, %test2.f
for.end14: ; preds = %for.inc12, %entry
ret void
-} \ No newline at end of file
+}
diff --git a/llvm/test/Analysis/Delinearization/fixed_size_array.ll b/llvm/test/Analysis/Delinearization/fixed_size_array.ll
new file mode 100644
index 0000000..0512044
--- /dev/null
+++ b/llvm/test/Analysis/Delinearization/fixed_size_array.ll
@@ -0,0 +1,499 @@
+; RUN: opt < %s -passes='print<delinearization>' -disable-output -delinearize-use-fixed-size-array-heuristic 2>&1 | FileCheck %s
+
+; void f(int A[][8][32]) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 8; j++)
+; for (k = 0; k < 32; k++)
+; A[i][j][k] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_j_k:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+define void @a_i_j_k(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j, i32 %k
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 32
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 8
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; void f(int A[][8][32]) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 8; j++)
+; for (k = 0; k < 32; k++)
+; A[i][7-j][k] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_nj_k:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{7,+,-1}<nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+define void @a_i_nj_k(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ %j.subscript = sub i32 7, %j
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j.subscript, i32 %k
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 32
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 8
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; In the following code, the access functions for both stores are represented
+; in the same way in SCEV, so the delinearization results are also the same. We
+; don't have any type information of the underlying objects.
+;
+; void f(int A[][4][64], int B[][8][32]) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 4; j++)
+; for (k = 0; k < 32; k++) {
+; A[i][j][k] = 1;
+; B[i][2*j][k] = 1;
+; }
+; }
+
+; CHECK: Delinearization on function a_ijk_b_i2jk:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+; CHECK: Base offset: %b
+; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+define void @a_ijk_b_i2jk(ptr %a, ptr %b) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ %j2 = shl i32 %j, 1
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %a.idx = getelementptr [4 x [64 x i32]], ptr %a, i32 %i, i32 %j, i32 %k
+ %b.idx = getelementptr [8 x [32 x i32]], ptr %b, i32 %i, i32 %j2, i32 %k
+ store i32 1, ptr %a.idx
+ store i32 1, ptr %b.idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 32
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 4
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; The type information of the underlying object is not available, so the
+; delinearization result is different from the original array size. In this
+; case, the underlying object is a type of int[][8][32], but the
+; delinearization result is like int[][4][64].
+;
+; void f(int A[][8][32]) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 3; j++)
+; for (k = 0; k < 32; k++)
+; A[i][2*j+1][k] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_2j1_k:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><%for.j.header>][{32,+,1}<nw><%for.k>]
+define void @a_i_2j1_k(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ %j2 = shl i32 %j, 1
+ %j.subscript = add i32 %j2, 1
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j.subscript, i32 %k
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 32
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 3
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; Fail to delinearize because the step recurrence value of the i-loop is not
+; divisible by that of the j-loop.
+;
+; void f(int A[][8][32]) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 2; j++)
+; for (k = 0; k < 42; k++)
+; A[i][3*j][k] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_3j_k:
+; CHECK: AccessFunction: {{...}}0,+,1024}<nuw><nsw><%for.i.header>,+,384}<nw><%for.j.header>,+,4}<nw><%for.k>
+; CHECK-NEXT: failed to delinearize
+define void @a_i_3j_k(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ %j.subscript = mul i32 %j, 3
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j.subscript, i32 %k
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 42
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 2
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; Although the step recurrence value of j-loop is not divisible by that of the
+; k-loop, delinearization is possible because we know that the "actual" stride
+; width for the last dimension is 4 instead of 12.
+;
+; void f(int A[][8][32]) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 8; j++)
+; for (k = 0; k < 10; k++)
+; A[i][j][3*k] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_j_3k:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,3}<nuw><nsw><%for.k>]
+define void @a_i_j_3k(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %k.subscript = mul i32 %k, 3
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j, i32 %k.subscript
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 10
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 8
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; Fail to delinearize because i is used in multiple subscripts that are not adjacent.
+;
+; void f(int A[][8][32]) {
+; for (i = 0; i < 32; i++)
+; for (j = 0; j < 2; j++)
+; for (k = 0; k < 4; k++)
+; A[i][2*j+k][i] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_j2k_i:
+; CHECK: AccessFunction: {{...}}0,+,1028}<%for.i.header>,+,256}<nw><%for.j.header>,+,128}<nw><%for.k>
+; CHECK-NEXT: failed to delinearize
+define void @a_i_j2k_i(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %j2 = shl i32 %j, 1
+ %j2.k = add i32 %j2, %k
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %j2.k, i32 %i
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 4
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 2
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 32
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; Can delinearize, but the result is different from the original array size. In
+; this case, the outermost two dimensions are melded into one.
+;
+; void f(int A[][8][32]) {
+; for (i = 0; i < 8; i++)
+; for (j = 0; j < 10; j++)
+; for (k = 0; k < 10; k++)
+; A[i][i][j+k] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_i_jk:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][288] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{{..}}0,+,1}<nuw><nsw><%for.j.header>,+,1}<nuw><nsw><%for.k>]
+define void @a_i_i_jk(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %jk = add i32 %j, %k
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %i, i32 %jk
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 10
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 10
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 8
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; void f(int A[][8][32]) {
+; for (i = 0; i < 8; i++)
+; for (j = 0; j < 4; j++)
+; for (k = 0; k < 4; k++)
+; for (l = 0; l < 32; l++)
+; A[i][j+k][l] = 1;
+; }
+
+; CHECK: Delinearization on function a_i_jk_l:
+; CHECK: Base offset: %a
+; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
+; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{{..}}0,+,1}<nuw><nsw><%for.j.header>,+,1}<nuw><nsw><%for.k.header>][{0,+,1}<nuw><nsw><%for.l>]
+
+define void @a_i_jk_l(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ br label %for.k.header
+
+for.k.header:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k.latch ]
+ %jk = add i32 %j, %k
+ br label %for.l
+
+for.l:
+ %l = phi i32 [ 0, %for.k.header ], [ %l.inc, %for.l ]
+ %idx = getelementptr [8 x [32 x i32]], ptr %a, i32 %i, i32 %jk, i32 %l
+ store i32 1, ptr %idx
+ %l.inc = add i32 %l, 1
+ %cmp.l = icmp slt i32 %l.inc, 32
+ br i1 %cmp.l, label %for.l, label %for.k.latch
+
+for.k.latch:
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 4
+ br i1 %cmp.k, label %for.k.header, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 4
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 8
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
+
+; Reject if the address is not a multiple of the element size.
+;
+; void f(int *A) {
+; for (i = 0; i < 42; i++)
+; for (j = 0; j < 8; j++)
+; for (k = 0; k < 32; k++)
+; *((int *)((char *)A + i*256 + j*32 + k)) = 1;
+; }
+
+; CHECK: Delinearization on function non_divisible_by_element_size:
+; CHECK: AccessFunction: {{...}}0,+,256}<nuw><nsw><%for.i.header>,+,32}<nw><%for.j.header>,+,1}<nw><%for.k>
+; CHECK-NEXT: failed to delinearize
+define void @non_divisible_by_element_size(ptr %a) {
+entry:
+ br label %for.i.header
+
+for.i.header:
+ %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ]
+ br label %for.j.header
+
+for.j.header:
+ %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j.latch ]
+ br label %for.k
+
+for.k:
+ %k = phi i32 [ 0, %for.j.header ], [ %k.inc, %for.k ]
+ %idx = getelementptr [8 x [32 x i8]], ptr %a, i32 %i, i32 %j, i32 %k
+ store i32 1, ptr %idx
+ %k.inc = add i32 %k, 1
+ %cmp.k = icmp slt i32 %k.inc, 32
+ br i1 %cmp.k, label %for.k, label %for.j.latch
+
+for.j.latch:
+ %j.inc = add i32 %j, 1
+ %cmp.j = icmp slt i32 %j.inc, 8
+ br i1 %cmp.j, label %for.j.header, label %for.i.latch
+
+for.i.latch:
+ %i.inc = add i32 %i, 1
+ %cmp.i = icmp slt i32 %i.inc, 42
+ br i1 %cmp.i, label %for.i.header, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Analysis/DependenceAnalysis/Coupled.ll b/llvm/test/Analysis/DependenceAnalysis/Coupled.ll
index 06bfc5d..1d45134 100644
--- a/llvm/test/Analysis/DependenceAnalysis/Coupled.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/Coupled.ll
@@ -719,12 +719,14 @@ for.end: ; preds = %for.body
;; for(int j = 0; j < M; j+=1)
;; A[M*N + M*i + j] = 2;
+; FIXME: Currently failing to infer %M being positive.
+
define void @couple_weakzerosiv(ptr noalias nocapture %A, i64 %N, i64 %M) {
; CHECK-LABEL: 'couple_weakzerosiv'
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx.us, align 4 --> Dst: store i32 1, ptr %arrayidx.us, align 4
; CHECK-NEXT: da analyze - none!
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx.us, align 4 --> Dst: store i32 2, ptr %arrayidx9.us, align 4
-; CHECK-NEXT: da analyze - output [p>]!
+; CHECK-NEXT: da analyze - output [*|<]!
; CHECK-NEXT: Src: store i32 2, ptr %arrayidx9.us, align 4 --> Dst: store i32 2, ptr %arrayidx9.us, align 4
; CHECK-NEXT: da analyze - none!
;
diff --git a/llvm/test/Analysis/DependenceAnalysis/DADelin.ll b/llvm/test/Analysis/DependenceAnalysis/DADelin.ll
index b2e4959..8f94a45 100644
--- a/llvm/test/Analysis/DependenceAnalysis/DADelin.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/DADelin.ll
@@ -594,14 +594,15 @@ for.end12: ; preds = %for.inc10, %entry
}
+; FIXME? It seems that we cannot prove that %N is non-negative...
define void @nonnegative(ptr nocapture %A, i32 %N) {
; CHECK-LABEL: 'nonnegative'
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx, align 4 --> Dst: store i32 1, ptr %arrayidx, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - output [* *]!
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx, align 4 --> Dst: store i32 2, ptr %arrayidx, align 4
-; CHECK-NEXT: da analyze - consistent output [0 0|<]!
+; CHECK-NEXT: da analyze - output [* *|<]!
; CHECK-NEXT: Src: store i32 2, ptr %arrayidx, align 4 --> Dst: store i32 2, ptr %arrayidx, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - output [* *]!
;
entry:
%cmp44 = icmp eq i32 %N, 0
@@ -630,3 +631,81 @@ for.latch:
exit:
ret void
}
+
+; i = 0;
+; do {
+; a[k * i] = 42;
+; a[k * (i + 1)] = 42;
+; i++;
+; } while (i != k);
+;
+; The dependency direction between the two stores depends on the sign of k.
+; Note that the loop guard is omitted intentionally.
+; FIXME: Each store has loop-carried dependencies on itself if k is zero.
+;
+define void @coeff_may_negative(ptr %a, i32 %k) {
+; CHECK-LABEL: 'coeff_may_negative'
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.0, align 1
+; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - output [*|<]!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.1, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - none!
+;
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, 1
+ %subscript.0 = mul i32 %i, %k
+ %subscript.1 = mul i32 %i.next, %k
+ %idx.0 = getelementptr i8, ptr %a, i32 %subscript.0
+ %idx.1 = getelementptr i8, ptr %a, i32 %subscript.1
+ store i8 42, ptr %idx.0
+ store i8 42, ptr %idx.1
+ %cond.exit = icmp eq i32 %i.next, %k
+ br i1 %cond.exit, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; i = 0;
+; do {
+; a[k * i] = 42;
+; a[k * (i + 1)] = 42;
+; i++;
+; } while (i != k);
+;
+; Note that the loop guard is omitted intentionally.
+; FIXME: In principle, we can infer that the value of k is non-negative from
+; the nsw flag.
+;
+define void @coeff_positive(ptr %a, i32 %k) {
+; CHECK-LABEL: 'coeff_positive'
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.0, align 1
+; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - output [*|<]!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.1, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - none!
+;
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add nsw i32 %i, 1
+ %subscript.0 = mul i32 %i, %k
+ %subscript.1 = mul i32 %i.next, %k
+ %idx.0 = getelementptr i8, ptr %a, i32 %subscript.0
+ %idx.1 = getelementptr i8, ptr %a, i32 %subscript.1
+ store i8 42, ptr %idx.0
+ store i8 42, ptr %idx.1
+ %cond.exit = icmp eq i32 %i.next, %k
+ br i1 %cond.exit, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json
index 9b38f2e..07fde84 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json
+++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json
@@ -47,6 +47,7 @@
"FPTrunc": [89, 90],
"FPExt": [91, 92],
"PtrToInt": [93, 94],
+ "PtrToAddr": [135, 136],
"IntToPtr": [95, 96],
"BitCast": [97, 98],
"AddrSpaceCast": [99, 100],
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt
index 79fcf82..1b9b3c2 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt
+++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt
@@ -45,6 +45,7 @@ Key: SIToFP: [ 87.00 88.00 ]
Key: FPTrunc: [ 89.00 90.00 ]
Key: FPExt: [ 91.00 92.00 ]
Key: PtrToInt: [ 93.00 94.00 ]
+Key: PtrToAddr: [ 135.00 136.00 ]
Key: IntToPtr: [ 95.00 96.00 ]
Key: BitCast: [ 97.00 98.00 ]
Key: AddrSpaceCast: [ 99.00 100.00 ]
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt
index 584bd31..9673e7f 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt
+++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt
@@ -45,6 +45,7 @@ Key: SIToFP: [ 43.50 44.00 ]
Key: FPTrunc: [ 44.50 45.00 ]
Key: FPExt: [ 45.50 46.00 ]
Key: PtrToInt: [ 46.50 47.00 ]
+Key: PtrToAddr: [ 67.50 68.00 ]
Key: IntToPtr: [ 47.50 48.00 ]
Key: BitCast: [ 48.50 49.00 ]
Key: AddrSpaceCast: [ 49.50 50.00 ]
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt
index 2727c85..1f575d2 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt
+++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt
@@ -45,6 +45,7 @@ Key: SIToFP: [ 8.70 8.80 ]
Key: FPTrunc: [ 8.90 9.00 ]
Key: FPExt: [ 9.10 9.20 ]
Key: PtrToInt: [ 9.30 9.40 ]
+Key: PtrToAddr: [ 13.50 13.60 ]
Key: IntToPtr: [ 9.50 9.60 ]
Key: BitCast: [ 9.70 9.80 ]
Key: AddrSpaceCast: [ 9.90 10.00 ]
diff --git a/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll b/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll
index bd46741..da5a898 100644
--- a/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll
+++ b/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll
@@ -417,7 +417,7 @@ define internal noundef range(i32 -1, 1024) i32 @__kmpc_target_init(ptr nofree n
br label %116
116: ; preds = %110, %128
- call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 dereferenceable(8) %3) #20
+ call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 dereferenceable(8) %3) #20
tail call void @llvm.nvvm.barrier.sync(i32 noundef 8)
%117 = call zeroext i1 @__kmpc_kernel_parallel(ptr noalias nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) %3) #20
%118 = load ptr, ptr %3, align 8, !tbaa !93
@@ -446,11 +446,11 @@ define internal noundef range(i32 -1, 1024) i32 @__kmpc_target_init(ptr nofree n
128: ; preds = %126, %120
tail call void @llvm.nvvm.barrier.sync(i32 noundef 8)
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %3) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %3) #20
br label %116, !llvm.loop !94
129: ; preds = %116
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %3) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %3) #20
br label %130
130: ; preds = %106, %129, %100, %98
@@ -495,7 +495,7 @@ define internal fastcc void @__assert_fail_internal(ptr noundef nonnull derefere
declare void @llvm.assume(i1 noundef) #9
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #10
+declare void @llvm.lifetime.start.p0(ptr nocapture) #10
; Function Attrs: convergent nocallback nounwind
declare void @llvm.nvvm.barrier.sync(i32) #11
@@ -587,7 +587,7 @@ define internal void @__kmpc_kernel_end_parallel() local_unnamed_addr #13 {
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #10
+declare void @llvm.lifetime.end.p0(ptr nocapture) #10
; Function Attrs: convergent mustprogress nounwind willreturn allockind("free") memory(argmem: readwrite, inaccessiblemem: readwrite)
declare extern_weak void @free(ptr allocptr nocapture noundef) local_unnamed_addr #14
@@ -595,11 +595,11 @@ declare extern_weak void @free(ptr allocptr nocapture noundef) local_unnamed_add
; Function Attrs: convergent mustprogress nounwind
define internal noundef i32 @_ZN4ompx6printfEPKcz(ptr noundef %0, ...) local_unnamed_addr #15 {
%2 = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 %2) #29
+ call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 %2) #29
call void @llvm.va_start.p0(ptr noundef nonnull align 8 %2) #27
%3 = load ptr, ptr %2, align 8, !tbaa !101
%4 = call i32 @vprintf(ptr noundef %0, ptr noundef %3) #24
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %2) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %2) #20
ret i32 %4
}
@@ -641,7 +641,7 @@ define internal void @__kmpc_target_deinit() #4 {
br i1 %14, label %15, label %27
15: ; preds = %11
- call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 dereferenceable(8) %1) #29
+ call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 dereferenceable(8) %1) #29
%16 = call zeroext i1 @__kmpc_kernel_parallel(ptr noalias nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) %1) #20
%17 = load i32, ptr @__omp_rtl_debug_kind, align 4, !tbaa !62
%18 = load i32, ptr addrspace(4) @__omp_rtl_device_environment, align 8, !tbaa !83
@@ -659,7 +659,7 @@ define internal void @__kmpc_target_deinit() #4 {
26: ; preds = %15
tail call void @llvm.assume(i1 noundef %23) #23
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %1) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %1) #20
br label %27
27: ; preds = %26, %11, %10, %0
diff --git a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
index 71ea5d2..0ad1a33 100644
--- a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
+++ b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
@@ -17,13 +17,13 @@ target triple = "x86_64-unknown-linux-gnu"
@.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @hoo(ptr)
declare i32 @printf(ptr nocapture readonly, ...)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @goo(i32 %N, ptr %b) {
entry:
@@ -38,12 +38,12 @@ for.cond: ; preds = %for.body, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 8, ptr %tmp)
+ call void @llvm.lifetime.start.p0(ptr %tmp)
call void @hoo(ptr %a.i)
call void @hoo(ptr %c)
%tmp1 = load volatile i64, ptr %a.i, align 8
%call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
index 18d2459..03b6768 100644
--- a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
+++ b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
@@ -9,8 +9,8 @@ entry:
%P = alloca [32 x i8]
%Q = call ptr @obscure(ptr %P)
; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %P)
- call void @llvm.lifetime.start.p0(i64 32, ptr %P)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %P)
+ call void @llvm.lifetime.start.p0(ptr %P)
; CHECK: MemoryUse(1)
; CHECK-NEXT: %0 = load i8, ptr %P
%0 = load i8, ptr %P
@@ -18,8 +18,8 @@ entry:
; CHECK-NEXT: store i8 1, ptr %P
store i8 1, ptr %P
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %P)
- call void @llvm.lifetime.end.p0(i64 32, ptr %P)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
; CHECK: MemoryUse(3)
; CHECK-NEXT: %1 = load i8, ptr %P
%1 = load i8, ptr %P
@@ -28,5 +28,5 @@ entry:
%2 = load i8, ptr %Q
ret i8 %1
}
-declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly
-declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P)
+declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly
+declare void @llvm.lifetime.end.p0(ptr nocapture %P)
diff --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll
index b824481..22bbead 100644
--- a/llvm/test/Analysis/MemorySSA/phi-translation.ll
+++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll
@@ -465,7 +465,7 @@ end: ; preds = %for.body
define void @use_clobbered_by_def_in_loop() {
entry:
%nodeStack = alloca [12 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %nodeStack)
+ call void @llvm.lifetime.start.p0(ptr nonnull %nodeStack)
br i1 false, label %cleanup, label %while.cond
; CHECK-LABEL: while.cond:
@@ -502,12 +502,12 @@ while.end: ; preds = %while.cond, %land.r
br i1 true, label %cleanup, label %while.cond.backedge
cleanup: ; preds = %while.body, %while.end, %entry
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack)
+ call void @llvm.lifetime.end.p0(ptr nonnull %nodeStack)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @another_loop_clobber_inc() {
; CHECK-LABEL: void @another_loop_clobber_inc
diff --git a/llvm/test/Analysis/MemorySSA/pr43044.ll b/llvm/test/Analysis/MemorySSA/pr43044.ll
index bd767d3..7ae02f3 100644
--- a/llvm/test/Analysis/MemorySSA/pr43044.ll
+++ b/llvm/test/Analysis/MemorySSA/pr43044.ll
@@ -4,7 +4,7 @@
target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
target triple = "s390x-ibm-linux"
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; CHECK-LABEL: @func_42()
define void @func_42() {
diff --git a/llvm/test/Analysis/MemorySSA/pr49859.ll b/llvm/test/Analysis/MemorySSA/pr49859.ll
index 25ef586..0e97f57 100644
--- a/llvm/test/Analysis/MemorySSA/pr49859.ll
+++ b/llvm/test/Analysis/MemorySSA/pr49859.ll
@@ -11,12 +11,12 @@ entry:
%n = alloca i8, align 1
%i = alloca i8, align 1
%cleanup.dest.slot = alloca i32, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %sum) #3
+ call void @llvm.lifetime.start.p0(ptr %sum) #3
store i8 0, ptr %sum, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %n) #3
+ call void @llvm.lifetime.start.p0(ptr %n) #3
%call = call i8 @idi(i8 10)
store i8 %call, ptr %n, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i8 0, ptr %i, align 1
br label %for.cond
@@ -61,9 +61,9 @@ for.inc: ; preds = %if.end
; CHECK: final.cleanup:
; CHECK-NEXT: ; [[NO20:.*]] = MemoryPhi({if.then,[[NO9:.*]]},{for.cond.cleanup,[[NO8:.*]]})
; CHECK-NEXT: ; [[NO12:.*]] = MemoryDef([[NO20]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr %i)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %i)
final.cleanup: ; preds = %if.then, %for.cond.cleanup
- call void @llvm.lifetime.end.p0(i64 1, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end
; CHECK: for.end:
@@ -71,23 +71,23 @@ final.cleanup: ; preds = %if.then, %for
; CHECK-NEXT: %3 = load i8, ptr %sum, align 1
for.end: ; preds = %final.cleanup
%8 = load i8, ptr %sum, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %res.addr.i)
+ call void @llvm.lifetime.start.p0(ptr %res.addr.i)
store i8 %8, ptr %res.addr.i, align 1
%9 = load i8, ptr %res.addr.i, align 1
call void @foo(i8 %9) #3
- call void @llvm.lifetime.end.p0(i64 1, ptr %res.addr.i)
- call void @llvm.lifetime.end.p0(i64 1, ptr %n) #3
- call void @llvm.lifetime.end.p0(i64 1, ptr %sum) #3
+ call void @llvm.lifetime.end.p0(ptr %res.addr.i)
+ call void @llvm.lifetime.end.p0(ptr %n) #3
+ call void @llvm.lifetime.end.p0(ptr %sum) #3
ret void
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i8 @idi(i8)
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind
declare void @foo(i8)
diff --git a/llvm/test/Analysis/MemorySSA/renamephis.ll b/llvm/test/Analysis/MemorySSA/renamephis.ll
index e297b99..a731ef1 100644
--- a/llvm/test/Analysis/MemorySSA/renamephis.ll
+++ b/llvm/test/Analysis/MemorySSA/renamephis.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @g()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
; CHECK-LABEL: @f
define void @f(i1 %arg) align 2 {
diff --git a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll
index 39b475d..7120eec 100644
--- a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll
+++ b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll
@@ -50,7 +50,7 @@ define i32 @d(i32 %base) {
;
entry:
%e = alloca [1 x [1 x i8]], align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %e) #2
+ call void @llvm.lifetime.start.p0(ptr %e) #2
br label %for.cond
for.cond: ; preds = %for.cond, %entry
@@ -69,4 +69,4 @@ for.cond: ; preds = %for.cond, %entry
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
diff --git a/llvm/test/Analysis/ScalarEvolution/sdiv.ll b/llvm/test/Analysis/ScalarEvolution/sdiv.ll
index 9eaaf8b..acc6ab0 100644
--- a/llvm/test/Analysis/ScalarEvolution/sdiv.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sdiv.ll
@@ -38,7 +38,7 @@ define dso_local void @_Z4loopi(i32 %width) local_unnamed_addr #0 {
entry:
%storage = alloca [2 x i32], align 4
%0 = bitcast ptr %storage to ptr
- call void @llvm.lifetime.start.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.start.p0(ptr %storage) #4
call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 8, i1 false)
br label %for.cond
@@ -48,7 +48,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.end.p0(ptr %storage) #4
ret void
for.body:
@@ -64,10 +64,10 @@ for.body:
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
declare dso_local i32 @_Z3adji(i32) local_unnamed_addr #3
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Analysis/ScalarEvolution/srem.ll b/llvm/test/Analysis/ScalarEvolution/srem.ll
index 377e58a..9d4538f 100644
--- a/llvm/test/Analysis/ScalarEvolution/srem.ll
+++ b/llvm/test/Analysis/ScalarEvolution/srem.ll
@@ -38,7 +38,7 @@ define dso_local void @_Z4loopi(i32 %width) local_unnamed_addr #0 {
entry:
%storage = alloca [2 x i32], align 4
%0 = bitcast ptr %storage to ptr
- call void @llvm.lifetime.start.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.start.p0(ptr %storage) #4
call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 8, i1 false)
br label %for.cond
@@ -48,7 +48,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.end.p0(ptr %storage) #4
ret void
for.body:
@@ -64,10 +64,10 @@ for.body:
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
declare dso_local i32 @_Z3adji(i32) local_unnamed_addr #3
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll b/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll
index 840a517..36d79f9 100644
--- a/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll
+++ b/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll
@@ -8,10 +8,10 @@ define i8 @test(i8 %input) {
%dst = alloca i8
%src = alloca i8
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false), !alias.scope ![[SCOPE:[0-9]+]]
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %src), !noalias !4
+ call void @llvm.lifetime.start.p0(ptr nonnull %src), !noalias !4
store i8 %input, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 1, i1 false), !alias.scope !0
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !4
+ call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !4
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 1, i1 false), !alias.scope !4
%ret_value = load i8, ptr %dst
call void @use(ptr %src)
@@ -23,8 +23,8 @@ define i8 @test(i8 %input) {
; CHECK-DAG: ![[CALLEE0_B:[0-9]+]] = distinct !{!{{[0-9]+}}, !{{[0-9]+}}, !"callee0: %b"}
; CHECK-DAG: ![[SCOPE]] = !{![[CALLEE0_A]], ![[CALLEE0_B]]}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
!0 = !{!1, !7}
diff --git a/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll b/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll
index 6c3dec9..51bfa15 100644
--- a/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll
+++ b/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll
@@ -11,31 +11,31 @@ entry:
; CHECK: %y = alloca i32, align 4
; CHECK-NEXT: Alive: <>
%z = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x z>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y z>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <z>
call void @capture32(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <>
ret void
@@ -48,13 +48,13 @@ entry:
; CHECK-NEXT: Alive: <y>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
@@ -69,31 +69,31 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i64, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <z>
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <>
ret void
@@ -111,31 +111,31 @@ entry:
; CHECK-NEXT: Alive: <>
%z = alloca i64, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <x y z>
call void @capture32(ptr %x)
call void @capture32(ptr %y)
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <y z>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <z>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <>
ret void
@@ -154,12 +154,12 @@ entry:
%z = alloca i64, align 8
%z1 = alloca i64, align 8
%z2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x1)
; CHECK-NEXT: Alive: <x1>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x2)
+ call void @llvm.lifetime.start.p0(ptr %x2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x2)
; CHECK-NEXT: Alive: <x1 x2>
call void @capture64(ptr nonnull %x1)
@@ -171,8 +171,8 @@ entry:
if.then: ; preds = %entry
; CHECK: if.then:
; CHECK-NEXT: Alive: <x1 x2>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x1 x2 y>
call void @capture64(ptr nonnull %y)
@@ -181,13 +181,13 @@ if.then: ; preds = %entry
if.then3: ; preds = %if.then
; CHECK: if.then3:
; CHECK-NEXT: Alive: <x1 x2 y>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.start.p0(ptr %y1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y1)
; CHECK-NEXT: Alive: <x1 x2 y y1>
call void @capture64(ptr nonnull %y1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.end.p0(ptr %y1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y1)
; CHECK-NEXT: Alive: <x1 x2 y>
br label %if.end
@@ -195,13 +195,13 @@ if.then3: ; preds = %if.then
if.else: ; preds = %if.then
; CHECK: if.else:
; CHECK-NEXT: Alive: <x1 x2 y>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.start.p0(ptr %y2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y2)
; CHECK-NEXT: Alive: <x1 x2 y y2>
call void @capture64(ptr nonnull %y2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.end.p0(ptr %y2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y2)
; CHECK-NEXT: Alive: <x1 x2 y>
br label %if.end
@@ -209,8 +209,8 @@ if.else: ; preds = %if.then
if.end: ; preds = %if.else, %if.then3
; CHECK: if.end:
; CHECK-NEXT: Alive: <x1 x2 y>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x1 x2>
br label %if.end9
@@ -222,8 +222,8 @@ if.else4: ; preds = %entry
; CHECK: %z.cast = bitcast ptr %z to ptr
; CHECK-NEXT: Alive: <x1 x2>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <x1 x2 z>
call void @capture64(ptr nonnull %z)
@@ -232,13 +232,13 @@ if.else4: ; preds = %entry
if.then6: ; preds = %if.else4
; CHECK: if.then6:
; CHECK-NEXT: Alive: <x1 x2 z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.start.p0(ptr %z1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z1)
; CHECK-NEXT: Alive: <x1 x2 z z1>
call void @capture64(ptr nonnull %z1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.end.p0(ptr %z1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z1)
; CHECK-NEXT: Alive: <x1 x2 z>
br label %if.end8
@@ -246,13 +246,13 @@ if.then6: ; preds = %if.else4
if.else7: ; preds = %if.else4
; CHECK: if.else7:
; CHECK-NEXT: Alive: <x1 x2 z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.start.p0(ptr %z2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z2)
; CHECK-NEXT: Alive: <x1 x2 z z2>
call void @capture64(ptr nonnull %z2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.end.p0(ptr %z2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z2)
; CHECK-NEXT: Alive: <x1 x2 z>
br label %if.end8
@@ -260,8 +260,8 @@ if.else7: ; preds = %if.else4
if.end8: ; preds = %if.else7, %if.then6
; CHECK: if.end8:
; CHECK-NEXT: Alive: <x1 x2 z>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <x1 x2>
br label %if.end9
@@ -269,12 +269,12 @@ if.end8: ; preds = %if.else7, %if.then6
if.end9: ; preds = %if.end8, %if.end
; CHECK: if.end9:
; CHECK-NEXT: Alive: <x1 x2>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x2)
; CHECK-NEXT: Alive: <x1>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x1)
; CHECK-NEXT: Alive: <>
ret void
@@ -287,8 +287,8 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
@@ -297,17 +297,17 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
ret void
@@ -315,8 +315,8 @@ bb2: ; preds = %entry
bb3: ; preds = %entry
; CHECK: bb3:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
ret void
@@ -329,13 +329,13 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br i1 %d, label %bb2, label %bb3
@@ -343,13 +343,13 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <>
ret void
@@ -367,13 +367,13 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br i1 %d, label %bb2, label %bb3
@@ -381,8 +381,8 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
@@ -401,8 +401,8 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
@@ -411,12 +411,12 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
@@ -436,8 +436,8 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br i1 %d, label %bb2, label %bb3
@@ -445,8 +445,8 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %y)
@@ -467,12 +467,12 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i)
; CHECK-NEXT: Alive: <A.i>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i)
; CHECK-NEXT: Alive: <A.i B.i>
call void @capture100x32(ptr %A.i)
@@ -480,30 +480,30 @@ entry:
; CHECK-NEXT: Alive: <A.i B.i>
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.end.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i)
; CHECK-NEXT: Alive: <B.i>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.end.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1)
+ call void @llvm.lifetime.start.p0(ptr %A.i1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i1)
; CHECK-NEXT: Alive: <A.i1>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.start.p0(ptr %B.i2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i2)
; CHECK-NEXT: Alive: <A.i1 B.i2>
call void @capture100x32(ptr %A.i1)
call void @capture100x32(ptr %B.i2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1)
+ call void @llvm.lifetime.end.p0(ptr %A.i1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i1)
; CHECK-NEXT: Alive: <B.i2>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.end.p0(ptr %B.i2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i2)
; CHECK-NEXT: Alive: <>
ret void
@@ -516,20 +516,20 @@ entry:
; CHECK-NEXT: Alive: <>
%buf1 = alloca i8, i32 100000, align 16
%buf2 = alloca i8, i32 100000, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %buf1)
; CHECK-NEXT: Alive: <buf1>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.end.p0(ptr %buf1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %buf1)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %buf1)
; CHECK-NEXT: Alive: <buf1>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2)
+ call void @llvm.lifetime.start.p0(ptr %buf2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %buf2)
; CHECK-NEXT: Alive: <buf1 buf2>
call void @capture8(ptr %buf1)
@@ -546,22 +546,22 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i)
; CHECK-NEXT: Alive: <A.i A.i1 B.i2>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i)
; CHECK-NEXT: Alive: <A.i A.i1 B.i B.i2>
call void @capture100x32(ptr %A.i)
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.end.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i)
; CHECK-NEXT: Alive: <A.i1 B.i B.i2>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.end.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i)
; CHECK-NEXT: Alive: <A.i1 B.i2>
br label %block2
@@ -583,23 +583,23 @@ entry:
; CHECK-NEXT: Alive: <>
%a.i = alloca [4 x %struct.Klass], align 16
%b.i = alloca [4 x %struct.Klass], align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i)
+ call void @llvm.lifetime.start.p0(ptr %a.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %a.i)
; CHECK-NEXT: Alive: <a.i>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.start.p0(ptr %b.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %b.i)
; CHECK-NEXT: Alive: <a.i b.i>
call void @capture8(ptr %a.i)
call void @capture8(ptr %b.i)
%z3 = load i32, ptr %a.i, align 16
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i)
+ call void @llvm.lifetime.end.p0(ptr %a.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %a.i)
; CHECK-NEXT: Alive: <b.i>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.end.p0(ptr %b.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %b.i)
; CHECK-NEXT: Alive: <>
ret i32 %z3
@@ -611,8 +611,8 @@ entry:
; CHECK: entry:
; CHECK-NEXT: Alive: <>
%x = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br label %l2
@@ -622,8 +622,8 @@ l2: ; preds = %l2, %entry
; MAY-NEXT: Alive: <x>
; MUST-NEXT: Alive: <>
call void @capture8(ptr %x)
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br label %l2
@@ -636,8 +636,8 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br label %l2
@@ -645,17 +645,17 @@ entry:
l2: ; preds = %l2, %entry
; CHECK: l2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture8(ptr %y)
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture8(ptr %x)
@@ -677,24 +677,24 @@ entry:
if.then: ; preds = %entry
; CHECK: if.then:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
-; CHECK: call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
+; CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %a)
; CHECK-NEXT: Alive: <a>
tail call void @capture8(ptr %a)
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
-; CHECK: call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %a)
; CHECK-NEXT: Alive: <>
br label %if.end
if.else: ; preds = %entry
; CHECK: if.else:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b)
-; CHECK: call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
+; CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %b)
; CHECK-NEXT: Alive: <b>
tail call void @capture8(ptr %b)
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
-; CHECK: call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %b)
; CHECK-NEXT: Alive: <>
br label %if.end
@@ -719,8 +719,8 @@ entry:
if.then:
; CHECK: if.then:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
br label %if.end
@@ -730,12 +730,12 @@ if.then:
if.else:
; CHECK: if.else:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
br label %if.end
@@ -758,12 +758,12 @@ entry:
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
br label %end
@@ -773,7 +773,7 @@ entry:
dead:
; CHECK: dead:
; CHECK-NOT: Alive:
- call void @llvm.lifetime.start.p0(i64 4, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
br label %end
; CHECK: br label %end
@@ -792,20 +792,20 @@ entry:
; CHECK: entry:
; CHECK-NEXT: Alive: <>
%x = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
ret void
@@ -827,8 +827,8 @@ if.then:
; CHECK: if.then:
; MAY-NEXT: Alive: <x y>
; MUST-NEXT: Alive: <>
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; MAY-NEXT: Alive: <x>
; MUST-NEXT: Alive: <>
@@ -840,12 +840,12 @@ if.then:
if.else:
; CHECK: if.else:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
br label %if.then
@@ -868,8 +868,8 @@ entry:
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br i1 %a, label %if.then, label %if.else
@@ -880,8 +880,8 @@ if.then:
; CHECK: if.then:
; MAY-NEXT: Alive: <x>
; MUST-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; MAY-NEXT: Alive: <x y>
; MUST-NEXT: Alive: <y>
@@ -893,12 +893,12 @@ if.then:
if.else:
; CHECK: if.else:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br label %if.then
@@ -921,8 +921,8 @@ entry:
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br i1 %a, label %if.then, label %if.end
@@ -933,8 +933,8 @@ if.then:
; CHECK: if.then:
; MAY-NEXT: Alive: <x y>
; MUST-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
br i1 %a, label %if.then, label %if.end
@@ -949,8 +949,8 @@ if.end:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr captures(none))
-declare void @llvm.lifetime.end.p0(i64, ptr captures(none))
+declare void @llvm.lifetime.start.p0(ptr captures(none))
+declare void @llvm.lifetime.end.p0(ptr captures(none))
declare void @capture8(ptr)
declare void @capture32(ptr)
declare void @capture64(ptr)
diff --git a/llvm/test/Analysis/StackSafetyAnalysis/local.ll b/llvm/test/Analysis/StackSafetyAnalysis/local.ll
index 02d46c8..6944f38 100644
--- a/llvm/test/Analysis/StackSafetyAnalysis/local.ll
+++ b/llvm/test/Analysis/StackSafetyAnalysis/local.ll
@@ -707,9 +707,9 @@ entry:
%n = load i8, ptr %y
call void @llvm.memset.p0.i32(ptr nonnull %z, i8 0, i32 1, i1 false)
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
ret void
}
@@ -731,9 +731,9 @@ entry:
%y = alloca i8, align 4
%z = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
store i8 5, ptr %x
%n = load i8, ptr %y
@@ -756,13 +756,13 @@ entry:
%y = alloca i8, align 4
%z = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
- call void @llvm.lifetime.end.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %z)
store i8 5, ptr %x
%n = load i8, ptr %y
@@ -973,13 +973,13 @@ define void @DoubleLifetime() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 true)
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -993,13 +993,13 @@ define void @DoubleLifetime2() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
%n = load i32, ptr %a
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -1013,13 +1013,13 @@ define void @DoubleLifetime3() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
store i32 5, ptr %a
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -1033,9 +1033,9 @@ define void @DoubleLifetime4() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
call void @unknown_call(ptr %a)
ret void
}
@@ -1136,5 +1136,5 @@ entry:
ret ptr null
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Analysis/ValueTracking/pr152700.ll b/llvm/test/Analysis/ValueTracking/pr152700.ll
new file mode 100644
index 0000000..91644c5
--- /dev/null
+++ b/llvm/test/Analysis/ValueTracking/pr152700.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare noundef i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
+declare i32 @llvm.umin.i32(i32, i32)
+define i32 @foo(i1 %c, i32 %arg) {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: i1 [[C:%.*]], i32 [[ARG:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[I:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
+; CHECK-NEXT: br i1 [[C]], label %[[BB_1:.*]], label %[[BB_2:.*]]
+; CHECK: [[BB_1]]:
+; CHECK-NEXT: br label %[[BB_2]]
+; CHECK: [[BB_2]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[I]], %[[ENTRY]] ], [ 0, %[[BB_1]] ]
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.umin.i32(i32 [[PHI]], i32 [[ARG]])
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %i = call i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
+ br i1 %c, label %bb.1, label %bb.2
+bb.1:
+ br label %bb.2
+bb.2:
+ %phi = phi i32 [ %i, %entry ], [ 0, %bb.1 ]
+ %res = call i32 @llvm.umin.i32(i32 %phi, i32 %arg)
+ ret i32 %res
+}
diff --git a/llvm/test/Assembler/auto_upgrade_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_intrinsics.ll
index d1b535b..37cb496 100644
--- a/llvm/test/Assembler/auto_upgrade_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_intrinsics.ll
@@ -171,10 +171,10 @@ define void @tests.lifetime.start.end() {
; CHECK-LABEL: @tests.lifetime.start.end(
%a = alloca i8
call void @llvm.lifetime.start(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.start.p0(ptr %a)
store i8 0, ptr %a
call void @llvm.lifetime.end(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -185,10 +185,10 @@ define void @tests.lifetime.start.end.unnamed() {
; CHECK-LABEL: @tests.lifetime.start.end.unnamed(
%a = alloca ptr
call void @llvm.lifetime.start.unnamed(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.start.p0(ptr %a)
store ptr null, ptr %a
call void @llvm.lifetime.end.unnamed(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -220,5 +220,5 @@ define void @test.prefetch.unnamed(ptr %ptr) {
; emitted at the end.
; CHECK: declare i32 @llvm.objectsize.i32.p0
-; CHECK: declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none))
-; CHECK: declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none))
+; CHECK: declare void @llvm.lifetime.start.p0(ptr captures(none))
+; CHECK: declare void @llvm.lifetime.end.p0(ptr captures(none))
diff --git a/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll b/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll
index 00ab934..377c002 100644
--- a/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll
+++ b/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll
@@ -5,8 +5,8 @@ define void @strip_bitcast() {
; CHECK-LABEL: define void @strip_bitcast() {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B:%.*]] = bitcast ptr [[A]] to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
@@ -20,8 +20,8 @@ define void @strip_addrspacecast() {
; CHECK-LABEL: define void @strip_addrspacecast() {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B:%.*]] = addrspacecast ptr [[A]] to ptr addrspace(1)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
@@ -35,8 +35,8 @@ define void @strip_gep() {
; CHECK-LABEL: define void @strip_gep() {
; CHECK-NEXT: [[A:%.*]] = alloca [2 x i8], align 1
; CHECK-NEXT: [[B:%.*]] = getelementptr [2 x i8], ptr [[A]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [2 x i8]
@@ -55,3 +55,8 @@ define void @remove_unanalyzable(ptr %p) {
call void @llvm.lifetime.end.p0(i64 1, ptr %p)
ret void
}
+
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p1(i64, ptr addrspace(1))
+declare void @llvm.lifetime.end.p1(i64, ptr addrspace(1))
diff --git a/llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll b/llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll
new file mode 100644
index 0000000..665deff
--- /dev/null
+++ b/llvm/test/Assembler/ptrtoaddr-invalid-constexpr.ll
@@ -0,0 +1,56 @@
+;; Check all requirements on the ptrtoaddr constant expression operands
+;; Most of these invalid cases are detected at parse time but some are only
+;; detected at verification time (see Verifier::visitPtrToAddrInst())
+; RUN: rm -rf %t && split-file --leading-lines %s %t
+
+;--- src_vec_dst_no_vec.ll
+; RUN: not llvm-as %t/src_vec_dst_no_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_VEC_DST_NO_VEC %s --implicit-check-not="error:"
+@g = global i64 ptrtoaddr (<2 x ptr> <ptr @g, ptr @g> to i64)
+; SRC_VEC_DST_NO_VEC: [[#@LINE-1]]:17: error: invalid cast opcode for cast from '<2 x ptr>' to 'i64'
+
+;--- src_no_vec_dst_vec.ll
+; RUN: not llvm-as %t/src_no_vec_dst_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NO_VEC_DST_VEC %s --implicit-check-not="error:"
+@g = global <2 x i64> ptrtoaddr (ptr @g to <2 x i64>)
+; SRC_NO_VEC_DST_VEC: [[#@LINE-1]]:23: error: invalid cast opcode for cast from 'ptr' to '<2 x i64>'
+
+;--- dst_not_int.ll
+; RUN: not llvm-as %t/dst_not_int.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT %s --implicit-check-not="error:"
+@g = global float ptrtoaddr (ptr @g to float)
+; DST_NOT_INT: [[#@LINE-1]]:19: error: invalid cast opcode for cast from 'ptr' to 'float'
+
+;--- dst_not_int_vec.ll
+; RUN: not llvm-as %t/dst_not_int_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT_VEC %s --implicit-check-not="error:"
+@g = global <2 x float> ptrtoaddr (<2 x ptr> <ptr @g, ptr @g> to <2 x float>)
+; DST_NOT_INT_VEC: [[#@LINE-1]]:25: error: invalid cast opcode for cast from '<2 x ptr>' to '<2 x float>'
+
+;--- src_not_ptr.ll
+; RUN: not llvm-as %t/src_not_ptr.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR %s --implicit-check-not="error:"
+@g = global i64 ptrtoaddr (i32 1 to i64)
+; SRC_NOT_PTR: [[#@LINE-1]]:17: error: invalid cast opcode for cast from 'i32' to 'i64'
+
+;--- src_not_ptr_vec.ll
+; RUN: not llvm-as %t/src_not_ptr_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR_VEC %s --implicit-check-not="error:"
+@g = global <2 x i64> ptrtoaddr (<2 x i32> <i32 1, i32 2> to <2 x i64>)
+; SRC_NOT_PTR_VEC: [[#@LINE-1]]:23: error: invalid cast opcode for cast from '<2 x i32>' to '<2 x i64>'
+
+;--- vec_src_fewer_elems.ll
+; RUN: not llvm-as %t/vec_src_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_SRC_FEWER_ELEMS %s --implicit-check-not="error:"
+@g = global <4 x i64> ptrtoaddr (<2 x ptr> <ptr @g, ptr @g> to <4 x i64>)
+; VEC_SRC_FEWER_ELEMS: [[#@LINE-1]]:23: error: invalid cast opcode for cast from '<2 x ptr>' to '<4 x i64>'
+
+;--- vec_dst_fewer_elems.ll
+; RUN: not llvm-as %t/vec_dst_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_DST_FEWER_ELEMS %s --implicit-check-not="error:"
+@g = global <2 x i64> ptrtoaddr (<4 x ptr> <ptr @g, ptr @g, ptr @g, ptr @g> to <2 x i64>)
+; VEC_DST_FEWER_ELEMS: [[#@LINE-1]]:23: error: invalid cast opcode for cast from '<4 x ptr>' to '<2 x i64>'
+
+;--- dst_not_addr_size.ll
+; The following invalid IR is caught by the verifier, not the parser:
+; RUN: llvm-as %t/dst_not_addr_size.ll --disable-output --disable-verify
+; RUN: not llvm-as %t/dst_not_addr_size.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_ADDR_SIZE %s --implicit-check-not="error:"
+; DST_NOT_ADDR_SIZE: assembly parsed, but does not verify as correct!
+@g = global i32 ptrtoaddr (ptr @g to i32)
+; DST_NOT_ADDR_SIZE-NEXT: PtrToAddr result must be address width
+; DST_NOT_ADDR_SIZE-NEXT: i32 ptrtoaddr (ptr @g to i32)
+@g_vec = global <4 x i32> ptrtoaddr (<4 x ptr> <ptr @g, ptr @g, ptr @g, ptr @g> to <4 x i32>)
+; TODO: Verifier.cpp does not visit ConstantVector/ConstantStruct values
+; TODO-DST_NOT_ADDR_SIZE: PtrToAddr result must be address width
diff --git a/llvm/test/Assembler/ptrtoaddr-invalid.ll b/llvm/test/Assembler/ptrtoaddr-invalid.ll
new file mode 100644
index 0000000..dff787b
--- /dev/null
+++ b/llvm/test/Assembler/ptrtoaddr-invalid.ll
@@ -0,0 +1,84 @@
+;; Check all requirements on the ptrtoaddr instruction operands
+;; Most of these invalid cases are detected at parse time but some are only
+;; detected at verification time (see Verifier::visitPtrToAddrInst())
+; RUN: rm -rf %t && split-file --leading-lines %s %t
+
+;--- src_vec_dst_no_vec.ll
+; RUN: not llvm-as %t/src_vec_dst_no_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_VEC_DST_NO_VEC %s --implicit-check-not="error:"
+define i64 @bad(<2 x ptr> %p) {
+ %addr = ptrtoaddr <2 x ptr> %p to i64
+ ; SRC_VEC_DST_NO_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x ptr>' to 'i64'
+ ret i64 %addr
+}
+
+;--- src_no_vec_dst_vec.ll
+; RUN: not llvm-as %t/src_no_vec_dst_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NO_VEC_DST_VEC %s --implicit-check-not="error:"
+define <2 x i64> @bad(ptr %p) {
+ %addr = ptrtoaddr ptr %p to <2 x i64>
+ ; SRC_NO_VEC_DST_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from 'ptr' to '<2 x i64>'
+ ret <2 x i64> %addr
+}
+
+;--- dst_not_int.ll
+; RUN: not llvm-as %t/dst_not_int.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT %s --implicit-check-not="error:"
+define float @bad(ptr %p) {
+ %addr = ptrtoaddr ptr %p to float
+ ; DST_NOT_INT: [[#@LINE-1]]:21: error: invalid cast opcode for cast from 'ptr' to 'float'
+ ret float %addr
+}
+
+;--- dst_not_int_vec.ll
+; RUN: not llvm-as %t/dst_not_int_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_INT_VEC %s --implicit-check-not="error:"
+define <2 x float> @bad(<2 x ptr> %p) {
+ %addr = ptrtoaddr <2 x ptr> %p to <2 x float>
+ ; DST_NOT_INT_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x ptr>' to '<2 x float>'
+ ret <2 x float> %addr
+}
+
+;--- src_not_ptr.ll
+; RUN: not llvm-as %t/src_not_ptr.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR %s --implicit-check-not="error:"
+define i64 @bad(i32 %p) {
+ %addr = ptrtoaddr i32 %p to i64
+ ; SRC_NOT_PTR: [[#@LINE-1]]:21: error: invalid cast opcode for cast from 'i32' to 'i64'
+ ret i64 %addr
+}
+
+;--- src_not_ptr_vec.ll
+; RUN: not llvm-as %t/src_not_ptr_vec.ll -o /dev/null 2>&1 | FileCheck -check-prefix=SRC_NOT_PTR_VEC %s --implicit-check-not="error:"
+define <2 x i64> @bad(<2 x i32> %p) {
+ %addr = ptrtoaddr <2 x i32> %p to <2 x i64>
+ ; SRC_NOT_PTR_VEC: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x i32>' to '<2 x i64>'
+ ret <2 x i64> %addr
+}
+
+;--- vec_src_fewer_elems.ll
+; RUN: not llvm-as %t/vec_src_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_SRC_FEWER_ELEMS %s --implicit-check-not="error:"
+define <4 x i64> @bad(<2 x ptr> %p) {
+ %addr = ptrtoaddr <2 x ptr> %p to <4 x i64>
+ ; VEC_SRC_FEWER_ELEMS: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<2 x ptr>' to '<4 x i64>'
+ ret <4 x i64> %addr
+}
+
+;--- vec_dst_fewer_elems.ll
+; RUN: not llvm-as %t/vec_dst_fewer_elems.ll -o /dev/null 2>&1 | FileCheck -check-prefix=VEC_DST_FEWER_ELEMS %s --implicit-check-not="error:"
+define <2 x i64> @bad(<4 x ptr> %p) {
+ %addr = ptrtoaddr <4 x ptr> %p to <2 x i64>
+ ; VEC_DST_FEWER_ELEMS: [[#@LINE-1]]:21: error: invalid cast opcode for cast from '<4 x ptr>' to '<2 x i64>'
+ ret <2 x i64> %addr
+}
+
+;--- dst_not_addr_size.ll
+; The following invalid IR is caught by the verifier, not the parser:
+; RUN: llvm-as %t/dst_not_addr_size.ll --disable-output --disable-verify
+; RUN: not llvm-as %t/dst_not_addr_size.ll -o /dev/null 2>&1 | FileCheck -check-prefix=DST_NOT_ADDR_SIZE %s --implicit-check-not="error:"
+; DST_NOT_ADDR_SIZE: assembly parsed, but does not verify as correct!
+define i32 @bad(ptr %p) {
+ %addr = ptrtoaddr ptr %p to i32
+ ; DST_NOT_ADDR_SIZE: PtrToAddr result must be address width
+ ret i32 %addr
+}
+define <4 x i32> @bad_vec(<4 x ptr> %p) {
+ %addr = ptrtoaddr <4 x ptr> %p to <4 x i32>
+ ; DST_NOT_ADDR_SIZE: PtrToAddr result must be address width
+ ret <4 x i32> %addr
+}
diff --git a/llvm/test/Assembler/ptrtoaddr.ll b/llvm/test/Assembler/ptrtoaddr.ll
new file mode 100644
index 0000000..f21410b
--- /dev/null
+++ b/llvm/test/Assembler/ptrtoaddr.ll
@@ -0,0 +1,27 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+target datalayout = "p1:64:64:64:32"
+
+@i_as0 = global i32 0
+@global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64)
+; CHECK: @global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64)
+@i_as1 = addrspace(1) global i32 0
+@global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32)
+; CHECK: @global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32)
+
+define i64 @test_as0(ptr %p) {
+ %addr = ptrtoaddr ptr %p to i64
+ ; CHECK: %addr = ptrtoaddr ptr %p to i64
+ ret i64 %addr
+}
+
+define i32 @test_as1(ptr addrspace(1) %p) {
+ %addr = ptrtoaddr ptr addrspace(1) %p to i32
+ ; CHECK: %addr = ptrtoaddr ptr addrspace(1) %p to i32
+ ret i32 %addr
+}
+
+define <2 x i32> @test_vec_as1(<2 x ptr addrspace(1)> %p) {
+ %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32>
+ ; CHECK: %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32>
+ ret <2 x i32> %addr
+}
diff --git a/llvm/test/Bitcode/ptrtoaddr.ll b/llvm/test/Bitcode/ptrtoaddr.ll
new file mode 100644
index 0000000..6c5fed2
--- /dev/null
+++ b/llvm/test/Bitcode/ptrtoaddr.ll
@@ -0,0 +1,27 @@
+; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
+target datalayout = "p1:64:64:64:32"
+
+@i_as0 = global i32 0
+@global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64)
+; CHECK: @global_cast_as0 = global i64 ptrtoaddr (ptr @i_as0 to i64)
+@i_as1 = addrspace(1) global i32 0
+@global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32)
+; CHECK: @global_cast_as1 = global i32 ptrtoaddr (ptr addrspace(1) @i_as1 to i32)
+
+define i64 @test_as0(ptr %p) {
+ %addr = ptrtoaddr ptr %p to i64
+ ; CHECK: %addr = ptrtoaddr ptr %p to i64
+ ret i64 %addr
+}
+
+define i32 @test_as1(ptr addrspace(1) %p) {
+ %addr = ptrtoaddr ptr addrspace(1) %p to i32
+ ; CHECK: %addr = ptrtoaddr ptr addrspace(1) %p to i32
+ ret i32 %addr
+}
+
+define <2 x i32> @test_vec_as1(<2 x ptr addrspace(1)> %p) {
+ %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32>
+ ; CHECK: %addr = ptrtoaddr <2 x ptr addrspace(1)> %p to <2 x i32>
+ ret <2 x i32> %addr
+}
diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt
index 3042b8f..b46f482 100644
--- a/llvm/test/CMakeLists.txt
+++ b/llvm/test/CMakeLists.txt
@@ -30,7 +30,6 @@ llvm_canonicalize_cmake_booleans(
LLVM_INCLUDE_SPIRV_TOOLS_TESTS
LLVM_APPEND_VC_REV
LLVM_HAS_LOGF128
- LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS
)
configure_lit_site_cfg(
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir
index 437a9e6..3f14162 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir
@@ -10,7 +10,7 @@
define i32 @va_start(ptr %a, ...) {
entry:
%ap = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %ap)
+ call void @llvm.lifetime.start.p0(ptr %ap)
call void @llvm.va_start.p0(ptr %ap)
%vr_offs_p = getelementptr inbounds i8, ptr %ap, i64 28
%vr_offs = load i32, ptr %vr_offs_p, align 4
diff --git a/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll
new file mode 100644
index 0000000..da04c67
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+sve2 -verify-machineinstrs < %s -o - | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; This test is reduced from a real world example that would cause the DAGCombiner to hang.
+
+define void @histcnt_loop(ptr %0, i64 %1, ptr %2, i64 %3, i64 %4) {
+; CHECK-LABEL: histcnt_loop:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov z0.d, #1 // =0x1
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: add x9, x0, x1
+; CHECK-NEXT: .LBB0_1: // %loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT: lsl x10, x8, #1
+; CHECK-NEXT: add x11, x0, x10
+; CHECK-NEXT: add x10, x9, x10
+; CHECK-NEXT: lsl z1.d, z1.d, #1
+; CHECK-NEXT: ld1h { z4.d }, p0/z, [x11, #1, mul vl]
+; CHECK-NEXT: ld1h { z5.d }, p0/z, [x10, #1, mul vl]
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x9, x8, lsl #1]
+; CHECK-NEXT: add x8, x8, x3
+; CHECK-NEXT: cmp x4, x8
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: lsl z1.d, z4.d, #1
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z4.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z4.d
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: lsl z1.d, z3.d, #1
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: lsl z1.d, z5.d, #1
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: b.ne .LBB0_1
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %6 = phi i64 [ 0, %entry ], [ %15, %loop ]
+ %7 = getelementptr inbounds nuw i16, ptr %0, i64 %6
+ %8 = getelementptr inbounds nuw i8, ptr %7, i64 %1
+ %9 = load <vscale x 4 x i16>, ptr %7, align 2
+ %10 = load <vscale x 4 x i16>, ptr %8, align 2
+ %11 = zext <vscale x 4 x i16> %9 to <vscale x 4 x i64>
+ %12 = zext <vscale x 4 x i16> %10 to <vscale x 4 x i64>
+ %13 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %11
+ %14 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %12
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %13, i16 1, <vscale x 4 x i1> splat (i1 true))
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %14, i16 1, <vscale x 4 x i1> splat (i1 true))
+ %15 = add nuw i64 %6, %3
+ %16 = icmp eq i64 %15, %4
+ br i1 %16, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll
index 113eb14..4db9db9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll
@@ -370,3 +370,175 @@ entry:
%r = select i1 %c, i64 %a, i64 %ands
ret i64 %r
}
+
+; Test EOR.
+define i32 @test1_eor(i32 %a) {
+; CHECK-LABEL: test1_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: eor w8, w0, #0x400
+; CHECK-NEXT: eor w0, w8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i32 %a, 2098176
+ ret i32 %eor
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i32 @test2_eor(i32 %a) {
+; CHECK-LABEL: test2_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: eor w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i32 %a, 135
+ ret i32 %eor
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i32 @test3_eor(i32 %a) {
+; CHECK-LABEL: test3_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: eor w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i32 %a, 2163712
+ ret i32 %eor
+}
+
+define i64 @test4_eor(i64 %a) {
+; CHECK-LABEL: test4_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: eor x8, x0, #0x400
+; CHECK-NEXT: eor x0, x8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 2098176
+ ret i64 %eor
+}
+
+define i64 @test5_eor(i64 %a) {
+; CHECK-LABEL: test5_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: eor x8, x0, #0x4000
+; CHECK-NEXT: eor x0, x8, #0x200000000
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 8589950976
+ ret i64 %eor
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i64 @test6_eor(i64 %a) {
+; CHECK-LABEL: test6_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: eor x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 135
+ ret i64 %eor
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i64 @test7_eor(i64 %a) {
+; CHECK-LABEL: test7_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: eor x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 2163712
+ ret i64 %eor
+}
+
+; Test ORR.
+define i32 @test1_orr(i32 %a) {
+; CHECK-LABEL: test1_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: orr w8, w0, #0x400
+; CHECK-NEXT: orr w0, w8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %orr = or i32 %a, 2098176
+ ret i32 %orr
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i32 @test2_orr(i32 %a) {
+; CHECK-LABEL: test2_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: orr w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i32 %a, 135
+ ret i32 %orr
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i32 @test3_orr(i32 %a) {
+; CHECK-LABEL: test3_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: orr w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i32 %a, 2163712
+ ret i32 %orr
+}
+
+define i64 @test4_orr(i64 %a) {
+; CHECK-LABEL: test4_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: orr x8, x0, #0x400
+; CHECK-NEXT: orr x0, x8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 2098176
+ ret i64 %orr
+}
+
+define i64 @test5_orr(i64 %a) {
+; CHECK-LABEL: test5_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: orr x8, x0, #0x4000
+; CHECK-NEXT: orr x0, x8, #0x200000000
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 8589950976
+ ret i64 %orr
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i64 @test6_orr(i64 %a) {
+; CHECK-LABEL: test6_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: orr x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 135
+ ret i64 %orr
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i64 @test7_orr(i64 %a) {
+; CHECK-LABEL: test7_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: orr x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 2163712
+ ret i64 %orr
+}
diff --git a/llvm/test/CodeGen/AArch64/abd-combine.ll b/llvm/test/CodeGen/AArch64/abd-combine.ll
index d025789..cdb40ce 100644
--- a/llvm/test/CodeGen/AArch64/abd-combine.ll
+++ b/llvm/test/CodeGen/AArch64/abd-combine.ll
@@ -17,12 +17,9 @@ define <8 x i16> @abdu_base(<8 x i16> %src1, <8 x i16> %src2) {
define <8 x i16> @abdu_const(<8 x i16> %src1) {
; CHECK-LABEL: abdu_const:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: uabd v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: movi v1.4h, #1
+; CHECK-NEXT: mov v1.d[1], v1.d[0]
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
%sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -34,12 +31,9 @@ define <8 x i16> @abdu_const(<8 x i16> %src1) {
define <8 x i16> @abdu_const_lhs(<8 x i16> %src1) {
; CHECK-LABEL: abdu_const_lhs:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: uabd v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: movi v1.4h, #1
+; CHECK-NEXT: mov v1.d[1], v1.d[0]
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
%sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1
@@ -318,12 +312,9 @@ define <8 x i16> @abds_base(<8 x i16> %src1, <8 x i16> %src2) {
define <8 x i16> @abds_const(<8 x i16> %src1) {
; CHECK-LABEL: abds_const:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: sabd v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: movi v1.4h, #1
+; CHECK-NEXT: mov v1.d[1], v1.d[0]
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
%sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -335,12 +326,9 @@ define <8 x i16> @abds_const(<8 x i16> %src1) {
define <8 x i16> @abds_const_lhs(<8 x i16> %src1) {
; CHECK-LABEL: abds_const_lhs:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v1.4s, #1
-; CHECK-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: sabd v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: movi v1.4h, #1
+; CHECK-NEXT: mov v1.d[1], v1.d[0]
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
%sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1
@@ -352,11 +340,10 @@ define <8 x i16> @abds_const_lhs(<8 x i16> %src1) {
define <8 x i16> @abds_const_zero(<8 x i16> %src1) {
; CHECK-LABEL: abds_const_zero:
; CHECK: // %bb.0:
-; CHECK-NEXT: sshll v1.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: abs v0.4s, v0.4s
-; CHECK-NEXT: abs v1.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: abs v0.4h, v0.4h
+; CHECK-NEXT: abs v1.4h, v1.4h
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
; CHECK-NEXT: ret
%zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
%sub = sub <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, %zextsrc1
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
index 256ff94..9a1b6a0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
@@ -70,6 +70,23 @@ entry:
ret <2 x i64> %add.i
}
+define void @test_commutable_vaddl_s8(<8 x i8> %a, <8 x i8> %b, ptr %c) {
+; CHECK-LABEL: test_commutable_vaddl_s8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: saddl v0.8h, v0.8b, v1.8b
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ store <8 x i16> %add.i, ptr %c
+ %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i
+ %c.gep.1 = getelementptr i8, ptr %c, i64 16
+ store <8 x i16> %add.i2, ptr %c.gep.1
+ ret void
+}
+
define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) {
; CHECK-LABEL: test_vaddl_u8:
; CHECK: // %bb.0: // %entry
@@ -106,6 +123,23 @@ entry:
ret <2 x i64> %add.i
}
+define void @test_commutable_vaddl_u8(<8 x i8> %a, <8 x i8> %b, ptr %c) {
+; CHECK-LABEL: test_commutable_vaddl_u8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: uaddl v0.8h, v0.8b, v1.8b
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ store <8 x i16> %add.i, ptr %c
+ %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i
+ %c.gep.1 = getelementptr i8, ptr %c, i64 16
+ store <8 x i16> %add.i2, ptr %c.gep.1
+ ret void
+}
+
define <8 x i16> @test_vaddl_a8(<8 x i8> %a, <8 x i8> %b) {
; CHECK-SD-LABEL: test_vaddl_a8:
; CHECK-SD: // %bb.0: // %entry
@@ -2892,9 +2926,9 @@ define <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coer
; CHECK-GI-LABEL: cmplx_mul_combined_re_im:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: lsr x9, x0, #16
-; CHECK-GI-NEXT: adrp x8, .LCPI196_0
+; CHECK-GI-NEXT: adrp x8, .LCPI198_0
; CHECK-GI-NEXT: rev32 v4.8h, v0.8h
-; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI196_0]
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI198_0]
; CHECK-GI-NEXT: fmov d1, x9
; CHECK-GI-NEXT: dup v2.8h, v1.h[0]
; CHECK-GI-NEXT: sqneg v1.8h, v2.8h
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll b/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll
index 6c7ddd9..ccd1917 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll
@@ -575,3 +575,69 @@ define <4 x i32> @knownbits_sabd_and_mul_mask(<4 x i32> %a0, <4 x i32> %a1) {
%6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
ret <4 x i32> %6
}
+
+define <4 x i16> @trunc_abdu_foldable(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-SD-LABEL: trunc_abdu_foldable:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: uabd v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: trunc_abdu_foldable:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: uabd v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
+ %ext_a = zext <4 x i16> %a to <4 x i32>
+ %ext_b = zext <4 x i16> %b to <4 x i32>
+ %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %ext_a, <4 x i32> %ext_b)
+ %trunc = trunc <4 x i32> %abd to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @trunc_abds_foldable(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-SD-LABEL: trunc_abds_foldable:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sabd v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: trunc_abds_foldable:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: sabd v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
+ %a32 = sext <4 x i16> %a to <4 x i32>
+ %b32 = sext <4 x i16> %b to <4 x i32>
+ %abd32 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a32, <4 x i32> %b32)
+ %res16 = trunc <4 x i32> %abd32 to <4 x i16>
+ ret <4 x i16> %res16
+}
+
+define <4 x i16> @trunc_abdu_not_foldable(<4 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: trunc_abdu_not_foldable:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %ext_a = zext <4 x i16> %a to <4 x i32>
+ %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %ext_a, <4 x i32> %b)
+ %trunc = trunc <4 x i32> %abd to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @truncate_abds_testcase1(<4 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: truncate_abds_testcase1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %a32 = sext <4 x i16> %a to <4 x i32>
+ %abd32 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a32, <4 x i32> %b)
+ %res16 = trunc <4 x i32> %abd32 to <4 x i16>
+ ret <4 x i16> %res16
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
index ecf3f69..0d427c0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
@@ -1608,6 +1608,18 @@ define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
ret <16 x i8> %prod
}
+define <16 x i8> @commutable_poly_mul(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK-LABEL: commutable_poly_mul:
+; CHECK: // %bb.0:
+; CHECK-NEXT: pmul v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: add v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %1 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+ %2 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %rhs, <16 x i8> %lhs)
+ %3 = add <16 x i8> %1, %2
+ ret <16 x i8> %3
+}
+
declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>)
declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>)
declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 78881c8..ede5a7c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -44,6 +44,35 @@ define <2 x i64> @sabdl2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp4
}
+define void @commutable_sabdl(ptr %A, ptr %B, ptr %C) nounwind {
+; CHECK-SD-LABEL: commutable_sabdl:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0]
+; CHECK-SD-NEXT: ldr d1, [x1]
+; CHECK-SD-NEXT: sabdl.8h v0, v1, v0
+; CHECK-SD-NEXT: str q0, [x2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commutable_sabdl:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr d0, [x0]
+; CHECK-GI-NEXT: ldr d1, [x1]
+; CHECK-GI-NEXT: sabdl.8h v0, v0, v1
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: ret
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ store <8 x i16> %tmp4, ptr %C
+ %tmp5 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1)
+ %tmp6 = zext <8 x i8> %tmp5 to <8 x i16>
+ %tmp7 = getelementptr i8, ptr %C, i64 16
+ store <8 x i16> %tmp6, ptr %C
+ ret void
+}
+
define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
; CHECK-SD-LABEL: sabdl2_8h:
; CHECK-SD: // %bb.0:
@@ -155,6 +184,35 @@ define <2 x i64> @uabdl2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp4
}
+define void @commutable_uabdl(ptr %A, ptr %B, ptr %C) nounwind {
+; CHECK-SD-LABEL: commutable_uabdl:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0]
+; CHECK-SD-NEXT: ldr d1, [x1]
+; CHECK-SD-NEXT: uabdl.8h v0, v1, v0
+; CHECK-SD-NEXT: str q0, [x2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commutable_uabdl:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr d0, [x0]
+; CHECK-GI-NEXT: ldr d1, [x1]
+; CHECK-GI-NEXT: uabdl.8h v0, v0, v1
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: ret
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ store <8 x i16> %tmp4, ptr %C
+ %tmp5 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1)
+ %tmp6 = zext <8 x i8> %tmp5 to <8 x i16>
+ %tmp7 = getelementptr i8, ptr %C, i64 16
+ store <8 x i16> %tmp6, ptr %C
+ ret void
+}
+
define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
; CHECK-SD-LABEL: uabdl2_8h:
; CHECK-SD: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 07400bb..d12f7ce 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -3,6 +3,7 @@
; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; CHECK-GI: warning: Instruction selection used fallback path for pmull8h
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for commutable_pmull8h
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_1s
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2s
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_4s
@@ -78,6 +79,20 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp3
}
+define void @commutable_smull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
+; CHECK-LABEL: commutable_smull:
+; CHECK: // %bb.0:
+; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+ %1 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %B)
+ %2 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %B, <2 x i32> %A)
+ store <2 x i64> %1, ptr %C
+ %3 = getelementptr i8, ptr %C, i64 16
+ store <2 x i64> %2, ptr %3
+ ret void
+}
+
declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
@@ -121,6 +136,20 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp3
}
+define void @commutable_umull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
+; CHECK-LABEL: commutable_umull:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+ %1 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %B)
+ %2 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %B, <2 x i32> %A)
+ store <2 x i64> %1, ptr %C
+ %3 = getelementptr i8, ptr %C, i64 16
+ store <2 x i64> %2, ptr %3
+ ret void
+}
+
declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
@@ -212,6 +241,20 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind {
ret <8 x i16> %tmp3
}
+define void @commutable_pmull8h(<8 x i8> %A, <8 x i8> %B, ptr %C) {
+; CHECK-LABEL: commutable_pmull8h:
+; CHECK: // %bb.0:
+; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+ %1 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %A, <8 x i8> %B)
+ %2 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %B, <8 x i8> %A)
+ store <8 x i16> %1, ptr %C
+ %3 = getelementptr i8, ptr %C, i8 16
+ store <8 x i16> %2, ptr %3
+ ret void
+}
+
declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind {
@@ -487,10 +530,10 @@ define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
; CHECK-GI-LABEL: smlal2d_chain_with_constant:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mvn v3.8b, v2.8b
-; CHECK-GI-NEXT: adrp x8, .LCPI27_0
+; CHECK-GI-NEXT: adrp x8, .LCPI30_0
; CHECK-GI-NEXT: smull v1.2d, v1.2s, v3.2s
; CHECK-GI-NEXT: smlal v1.2d, v0.2s, v2.2s
-; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI27_0]
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI30_0]
; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d
; CHECK-GI-NEXT: str q0, [x0]
; CHECK-GI-NEXT: ret
@@ -566,8 +609,8 @@ define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
;
; CHECK-GI-LABEL: smlsl2d_chain_with_constant:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI31_0
-; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI31_0]
+; CHECK-GI-NEXT: adrp x8, .LCPI34_0
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI34_0]
; CHECK-GI-NEXT: smlsl v3.2d, v0.2s, v2.2s
; CHECK-GI-NEXT: mvn v0.8b, v2.8b
; CHECK-GI-NEXT: smlsl v3.2d, v1.2s, v0.2s
@@ -829,10 +872,10 @@ define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
; CHECK-GI-LABEL: umlal2d_chain_with_constant:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mvn v3.8b, v2.8b
-; CHECK-GI-NEXT: adrp x8, .LCPI43_0
+; CHECK-GI-NEXT: adrp x8, .LCPI46_0
; CHECK-GI-NEXT: umull v1.2d, v1.2s, v3.2s
; CHECK-GI-NEXT: umlal v1.2d, v0.2s, v2.2s
-; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI43_0]
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI46_0]
; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d
; CHECK-GI-NEXT: str q0, [x0]
; CHECK-GI-NEXT: ret
@@ -908,8 +951,8 @@ define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
;
; CHECK-GI-LABEL: umlsl2d_chain_with_constant:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI47_0
-; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI47_0]
+; CHECK-GI-NEXT: adrp x8, .LCPI50_0
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI50_0]
; CHECK-GI-NEXT: umlsl v3.2d, v0.2s, v2.2s
; CHECK-GI-NEXT: mvn v0.8b, v2.8b
; CHECK-GI-NEXT: umlsl v3.2d, v1.2s, v0.2s
@@ -3222,6 +3265,20 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind {
ret <16 x i8> %val
}
+define <16 x i8> @test_commutable_pmull_64(i64 %l, i64 %r) nounwind {
+; CHECK-LABEL: test_commutable_pmull_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, x1
+; CHECK-NEXT: fmov d1, x0
+; CHECK-NEXT: pmull v0.1q, v1.1d, v0.1d
+; CHECK-NEXT: add v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %1 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r)
+ %2 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %r, i64 %l)
+ %3 = add <16 x i8> %1, %2
+ ret <16 x i8> %3
+}
+
declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)
define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind {
diff --git a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir
index 23ac67c..805d244 100644
--- a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir
+++ b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir
@@ -96,3 +96,23 @@ body: |
$q25 = ORRv16i8 $q3, killed $q3
RET_ReallyLR implicit $q22
...
+---
+name: DoubleOp
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $q2
+
+ ; CHECK-LABEL: name: DoubleOp
+ ; CHECK: liveins: $q2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MOVIv8i16 1, 0
+ ; CHECK-NEXT: renamable $q1 = ORRv16i8 renamable $q2, renamable $q2
+ ; CHECK-NEXT: renamable $q1 = BSLv16i8 killed renamable $q1, renamable $q2, renamable $q0
+ ; CHECK-NEXT: renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0
+ ; CHECK-NEXT: RET undef $lr, implicit $q0
+ renamable $q0 = MOVIv8i16 1, 0
+ renamable $q1 = BSPv16i8 killed renamable $q2, renamable $q2, renamable $q0
+ renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0
+ RET_ReallyLR implicit $q0
+...
diff --git a/llvm/test/CodeGen/AArch64/lifetime-poison.ll b/llvm/test/CodeGen/AArch64/lifetime-poison.ll
index e04530d..dfb76d1 100644
--- a/llvm/test/CodeGen/AArch64/lifetime-poison.ll
+++ b/llvm/test/CodeGen/AArch64/lifetime-poison.ll
@@ -8,7 +8,7 @@ define void @test() {
; CHECK-LABEL: test:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- call void @llvm.lifetime.start.p0(i64 4, ptr poison)
- call void @llvm.lifetime.end.p0(i64 4, ptr poison)
+ call void @llvm.lifetime.start.p0(ptr poison)
+ call void @llvm.lifetime.end.p0(ptr poison)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
index 0711f69..df83762 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
@@ -5,8 +5,8 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-android"
declare void @use(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
define void @OneVarNoInit() sanitize_memtag {
@@ -16,18 +16,18 @@ define void @OneVarNoInit() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[TX]], i64 16)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -39,19 +39,19 @@ define void @OneVarInitConst() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 42, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -64,21 +64,21 @@ define void @ArrayInitConst() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0)
; CHECK-NEXT: [[TX8_16:%.*]] = getelementptr i8, ptr [[TX]], i32 16
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 16, align 4
- call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 42, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -90,7 +90,7 @@ define void @ArrayInitConst2() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 2
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 184683593770, i64 -1)
@@ -98,19 +98,19 @@ define void @ArrayInitConst2() sanitize_memtag {
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 16, align 4
- call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 42, ptr %x, align 4
%0 = getelementptr i32, ptr %x, i32 1
store i32 43, ptr %0, align 4
%1 = getelementptr i32, ptr %x, i32 2
store i64 -1, ptr %1, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -122,23 +122,23 @@ define void @ArrayInitConstSplit() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 -4294967296, i64 4294967295)
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TX]], i32 16
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TMP1]], i64 48)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 16, align 4
- call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
%0 = getelementptr i32, ptr %x, i32 1
store i64 -1, ptr %0, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -150,7 +150,7 @@ define void @ArrayInitConstWithHoles() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 32, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 5
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 14
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX]], i64 16)
@@ -164,18 +164,18 @@ define void @ArrayInitConstWithHoles() sanitize_memtag {
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_64]], i64 64)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 128)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 32, align 4
- call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
%0 = getelementptr i32, ptr %x, i32 5
store i32 42, ptr %0, align 4
%1 = getelementptr i32, ptr %x, i32 14
store i32 43, ptr %1, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -187,20 +187,20 @@ define void @InitNonConst(i32 %v) sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-NEXT: [[X_TAG:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[X_TAG]], i64 [[TMP0]], i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[X_TAG]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 %v, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -212,7 +212,7 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[W]] to i64
@@ -221,17 +221,17 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag {
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[VW]], i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 4, align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 %v, ptr %x, align 4
%0 = getelementptr i32, ptr %x, i32 1
store i32 %w, ptr %0, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -243,19 +243,19 @@ define void @InitVector() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 4, align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store <2 x i32> <i32 1, i32 2>, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -302,23 +302,23 @@ define void @InitVectorSplit() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32
; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[TMP1]], i64 [[LSHR]])
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 4, align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
%0 = getelementptr i32, ptr %x, i32 1
store <2 x i32> <i32 1, i32 2>, ptr %0, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir
index 45f6bfe..0fa5103 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir
@@ -18,15 +18,15 @@
%C.tag = call ptr @llvm.aarch64.tagp.p0(ptr %C, ptr %basetag, i64 1)
call void @llvm.aarch64.settag(ptr %C.tag, i64 32)
call void @F56(ptr %C.tag)
- call void @llvm.lifetime.start.p0(i64 32, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.aarch64.settag(ptr %A.tag, i64 32)
call void @F56(ptr %A.tag)
call void @llvm.aarch64.settag(ptr %A, i64 32)
- call void @llvm.lifetime.end.p0(i64 32, ptr %A)
- call void @llvm.lifetime.start.p0(i64 32, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @glob, i64 32, i1 false)
call void @F78(ptr %A)
- call void @llvm.lifetime.end.p0(i64 32, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.aarch64.settag(ptr %C, i64 32)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
index aa9cccc..91adf82 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
@@ -17,17 +17,17 @@ S0:
S1:
; CHECK-LABEL: S1:
- call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %v) #1
+ call void @llvm.lifetime.start.p0(ptr nonnull %v) #1
; CHECK: call void @llvm.aarch64.settag(ptr %v.tag, i64 48)
- call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %w) #1
+ call void @llvm.lifetime.start.p0(ptr nonnull %w) #1
; CHECK: call void @llvm.aarch64.settag(ptr %w.tag, i64 48)
%t1 = call i32 @g1(ptr nonnull %v, ptr nonnull %w) #1
; CHECK: call i32 @g1
; CHECK-NOT: settag{{.*}}%v
; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48)
; CHECK-NOT: settag{{.*}}%v
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1
-; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w)
+ call void @llvm.lifetime.end.p0(ptr nonnull %w) #1
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %w)
%b1 = icmp eq i32 %t1, 0
br i1 %b1, label %S2, label %S3
; CHECK-NOT: settag
@@ -40,7 +40,7 @@ S2:
S3:
; CHECK-LABEL: S3:
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %v) #1
+ call void @llvm.lifetime.end.p0(ptr nonnull %v) #1
tail call void @z1() #1
br label %exit2
; CHECK-NOT: settag
@@ -73,9 +73,9 @@ declare void @z1() #0
declare void @z2() #0
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll
index 62f8f89..79a9291 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1250 %s
; Natural mapping
define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
@@ -99,26 +100,47 @@ define amdgpu_ps <2 x float> @raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vg
; GFX8-NEXT: $vgpr1 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to <2 x float>
ret <2 x float> %cast
@@ -142,22 +164,39 @@ define amdgpu_ps void @raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgp
; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -217,58 +256,111 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -328,57 +420,109 @@ define amdgpu_ps void @raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgp
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -400,21 +544,40 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll
index 364ed62..9f1b7a6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1250 %s
; Natural mapping
@@ -24,24 +25,43 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_
; GFX8-NEXT: $vgpr0 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -66,22 +86,39 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -145,62 +182,119 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_
; GFX8-NEXT: $vgpr0 = COPY [[COPY15]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY15]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY15]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY15]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -263,60 +357,115 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -341,24 +490,46 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_
; GFX8-NEXT: $vgpr0 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
@@ -395,33 +566,61 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -450,26 +649,47 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -542,71 +762,137 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0
- ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0
+ ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0
+ ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -673,64 +959,123 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -764,33 +1109,64 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY8]], [[COPY10]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%voffset = add i32 %voffset.base, 4095
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll
index 46ca43b..7003bb1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s
; FIXME: Test with SI when argument lowering not broken for f16
; Natural mapping
@@ -124,52 +125,99 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -226,55 +274,105 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -509,23 +607,41 @@ define amdgpu_ps <2 x float> @raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sg
; GFX8-NEXT: $vgpr1 = COPY [[COPY7]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <2 x float> %val
}
@@ -551,25 +667,45 @@ define amdgpu_ps <3 x float> @raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sg
; GFX8-NEXT: $vgpr2 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
;
- ; GFX12-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ; GFX1200-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <3 x float> %val
}
@@ -597,27 +733,49 @@ define amdgpu_ps <4 x float> @raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sg
; GFX8-NEXT: $vgpr3 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr3 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr3 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr3 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <4 x float> %val
}
@@ -715,23 +873,41 @@ define amdgpu_ps <4 x half> @raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgp
; GFX8-NEXT: $vgpr1 = COPY [[COPY7]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <4 x half> @llvm.amdgcn.raw.buffer.load.v4f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <4 x half> %val
}
@@ -929,52 +1105,99 @@ define amdgpu_ps half @raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffse
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret half %val
}
@@ -1028,52 +1251,99 @@ define amdgpu_ps float @raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffse
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%zext = zext i8 %val to i32
%cast = bitcast i32 %zext to float
@@ -1194,20 +1464,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 16
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1229,20 +1517,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1267,20 +1573,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4096
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1522,54 +1846,103 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
- ; GFX12-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1200-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1250-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%soffset = add i32 %soffset.base, 5000
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1627,52 +2000,102 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 5000
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
index 3fbfb63..4784ac5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
@@ -5,7 +5,8 @@
; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11
-; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12
+; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200
+; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250
define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: name: raw_buffer_load_i8_tfe
@@ -110,27 +111,49 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i8_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { i8, i32 } %res, 0
store i8 %data, ptr addrspace(1) %data_addr
@@ -242,27 +265,49 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { i16, i32 } %res, 0
store i16 %data, ptr addrspace(1) %data_addr
@@ -374,27 +419,49 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_f16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { half, i32 } %res, 0
store half %data, ptr addrspace(1) %data_addr
@@ -506,27 +573,49 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %res, 0
store i32 %data, ptr addrspace(1) %data_addr
@@ -646,29 +735,53 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %res, 0
store <2 x i32> %data, ptr addrspace(1) %data_addr
@@ -788,29 +901,53 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %res, 0
store <2 x float> %data, ptr addrspace(1) %data_addr
@@ -977,30 +1114,55 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %res, 0
store <3 x i32> %data, ptr addrspace(1) %data_addr
@@ -1167,30 +1329,55 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %res, 0
store <3 x float> %data, ptr addrspace(1) %data_addr
@@ -1318,31 +1505,57 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %res, 0
store <4 x i32> %data, ptr addrspace(1) %data_addr
@@ -1470,31 +1683,57 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %res, 0
store <4 x float> %data, ptr addrspace(1) %data_addr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll
index 63ca7be..c365d57 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s
; FIXME: Test with SI when argument lowering not broken for f16
; Natural mapping
@@ -126,52 +127,99 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -309,55 +357,105 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -618,22 +716,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -657,23 +772,41 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX3_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -698,24 +831,43 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX4_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -876,22 +1028,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -946,54 +1115,103 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -1080,20 +1298,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 16
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1115,20 +1351,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4095
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1153,20 +1407,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4096
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1256,20 +1528,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 16
call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1291,20 +1581,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4095
call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1329,20 +1637,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4096
call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1400,52 +1726,102 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 5000
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1501,51 +1877,97 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 5000, i32 %soffset, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll
index 75d6c59..484639a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -22,23 +23,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -63,23 +82,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -109,28 +146,51 @@ define amdgpu_ps <2 x float> @struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc_
; GFX8-NEXT: $vgpr1 = COPY [[COPY10]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY10]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY10]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY10]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to <2 x float>
ret <2 x float> %cast
@@ -156,24 +216,43 @@ define amdgpu_ps void @struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__
; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -236,61 +315,117 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -353,60 +488,115 @@ define amdgpu_ps void @struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -431,23 +621,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
%cast = bitcast i32 %ret to float
ret float %cast
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll
index c9d1227..7dab257 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -25,26 +26,47 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg
; GFX8-NEXT: $vgpr0 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -71,24 +93,43 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cm
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -155,65 +196,125 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vg
; GFX8-NEXT: $vgpr0 = COPY [[COPY17]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY17]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY17]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY17]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -279,63 +380,121 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cm
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -362,26 +521,50 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg
; GFX8-NEXT: $vgpr0 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY7]], [[COPY9]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY10]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
@@ -420,35 +603,65 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -479,28 +692,51 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cm
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -576,74 +812,143 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__v
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0
- ; GFX12-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0
+ ; GFX1200-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0
+ ; GFX1250-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -713,67 +1018,129 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cm
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -809,35 +1176,68 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY9]], [[COPY11]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%voffset = add i32 %voffset.base, 4095
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
index 9b5e46b3..dbef90f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -21,22 +22,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -63,25 +81,45 @@ define amdgpu_ps <2 x float> @struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__
; GFX8-NEXT: $vgpr1 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x float> %val
}
@@ -110,27 +148,49 @@ define amdgpu_ps <3 x float> @struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__
; GFX8-NEXT: $vgpr2 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
;
- ; GFX12-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ; GFX1200-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <3 x float> %val
}
@@ -161,29 +221,53 @@ define amdgpu_ps <4 x float> @struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__
; GFX8-NEXT: $vgpr3 = COPY [[COPY10]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
;
- ; GFX12-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]]
- ; GFX12-NEXT: $vgpr3 = COPY [[COPY10]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ; GFX1200-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1200-NEXT: $vgpr3 = COPY [[COPY10]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1250-NEXT: $vgpr3 = COPY [[COPY10]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <4 x float> %val
}
@@ -208,23 +292,41 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -248,22 +350,42 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -287,22 +409,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 64, i32 0)
ret float %val
}
@@ -363,59 +502,113 @@ define amdgpu_ps float @struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -438,22 +631,39 @@ define amdgpu_ps float @struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = zext i8 %val to i32
%cast = bitcast i32 %ext to float
@@ -478,22 +688,39 @@ define amdgpu_ps float @struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = sext i8 %val to i32
%cast = bitcast i32 %ext to float
@@ -519,23 +746,41 @@ define amdgpu_ps float @struct_buffer_load_i8_sext_wrong_width(<4 x i32> inreg %
; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i8_sext_wrong_width
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec
- ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i8_sext_wrong_width
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec
+ ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i8_sext_wrong_width
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec
+ ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%trunc = trunc i8 %val to i4
%ext = sext i4 %trunc to i32
@@ -561,22 +806,39 @@ define amdgpu_ps float @struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgp
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = zext i16 %val to i32
%cast = bitcast i32 %ext to float
@@ -601,22 +863,39 @@ define amdgpu_ps float @struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgp
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = sext i16 %val to i32
%cast = bitcast i32 %ext to float
@@ -642,23 +921,41 @@ define amdgpu_ps float @struct_buffer_load_i16_sext_wrong_width(<4 x i32> inreg
; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i16_sext_wrong_width
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec
- ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i16_sext_wrong_width
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec
+ ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i16_sext_wrong_width
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec
+ ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%trunc = trunc i16 %val to i8
%ext = sext i8 %trunc to i32
@@ -685,22 +982,39 @@ define amdgpu_ps half @struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voff
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call half @llvm.amdgcn.struct.buffer.load.f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret half %val
}
@@ -724,22 +1038,39 @@ define amdgpu_ps <2 x half> @struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__v
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call <2 x half> @llvm.amdgcn.struct.buffer.load.v2f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %val
}
@@ -772,25 +1103,45 @@ define amdgpu_ps <4 x half> @struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__v
; GFX8-NEXT: $vgpr1 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <4 x half> @llvm.amdgcn.struct.buffer.load.v4f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <4 x half> %val
}
@@ -814,22 +1165,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1)
ret float %val
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
index 674fe1c..39cce20 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
@@ -5,7 +5,8 @@
; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11
-; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12
+; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200
+; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250
define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: name: raw_buffer_load_i8_tfe
@@ -114,29 +115,53 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i8_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i8, i32 } %res, 0
store i8 %data, ptr addrspace(1) %data_addr
@@ -252,29 +277,53 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i16, i32 } %res, 0
store i16 %data, ptr addrspace(1) %data_addr
@@ -390,29 +439,53 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_f16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { half, i32 } %res, 0
store half %data, ptr addrspace(1) %data_addr
@@ -528,29 +601,53 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %res, 0
store i32 %data, ptr addrspace(1) %data_addr
@@ -674,31 +771,57 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %res, 0
store <2 x i32> %data, ptr addrspace(1) %data_addr
@@ -822,31 +945,57 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %res, 0
store <2 x float> %data, ptr addrspace(1) %data_addr
@@ -1018,32 +1167,59 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %res, 0
store <3 x i32> %data, ptr addrspace(1) %data_addr
@@ -1215,32 +1391,59 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %res, 0
store <3 x float> %data, ptr addrspace(1) %data_addr
@@ -1372,33 +1575,61 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %res, 0
store <4 x i32> %data, ptr addrspace(1) %data_addr
@@ -1530,33 +1761,61 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %res, 0
store <4 x float> %data, ptr addrspace(1) %data_addr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll
index 8183d85..c9771b5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx810 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -21,22 +22,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_
; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -61,24 +79,43 @@ define amdgpu_ps void @struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -104,25 +141,45 @@ define amdgpu_ps void @struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX3_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -149,26 +206,47 @@ define amdgpu_ps void @struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX4_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -233,64 +311,123 @@ define amdgpu_ps void @struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vinde
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_128_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -313,22 +450,39 @@ define amdgpu_ps void @struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__
; GFX8-NEXT: BUFFER_STORE_BYTE_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%val.trunc = trunc i32 %val to i8
call void @llvm.amdgcn.struct.buffer.store.i8(i8 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
@@ -352,22 +506,39 @@ define amdgpu_ps void @struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex_
; GFX8-NEXT: BUFFER_STORE_SHORT_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%val.trunc = trunc i32 %val to i16
call void @llvm.amdgcn.struct.buffer.store.i16(i16 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
@@ -391,22 +562,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_
; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1)
ret void
}
@@ -429,22 +617,39 @@ define amdgpu_ps void @struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -475,24 +680,43 @@ define amdgpu_ps void @struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
index 9979e83..30a7864 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
@@ -368,10 +368,7 @@ define amdgpu_ps float @test_clamp_v2bf16_s(<2 x bfloat> inreg %src) {
define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
; GCN-LABEL: test_clamp_bf16_folding:
; GCN: ; %bb.0:
-; GCN-NEXT: v_exp_bf16_e32 v0, v0
-; GCN-NEXT: v_nop
-; GCN-NEXT: s_delay_alu instid0(TRANS32_DEP_1)
-; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp
; GCN-NEXT: ; return to shader part epilog
%exp = call bfloat @llvm.exp2.bf16(bfloat %src)
%max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0)
@@ -382,9 +379,7 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloat> %src1) {
; GCN-LABEL: test_clamp_v2bf16_folding:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp
; GCN-NEXT: ; return to shader part epilog
%mul = fmul <2 x bfloat> %src0, %src1
%max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>)
@@ -396,9 +391,7 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa
define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) {
; GCN-LABEL: v_test_mul_add_v2bf16_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, v2, v3
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, v4
+; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
%mul = fmul contract <2 x bfloat> %a, %b
@@ -410,9 +403,7 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) {
; GCN-LABEL: v_test_mul_add_v2bf16_vss:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, s1
+; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
%mul = fmul contract <2 x bfloat> %a, %b
@@ -424,9 +415,9 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) {
; GCN-LABEL: v_test_mul_add_v2bf16_sss:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, s2
+; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
%mul = fmul contract <2 x bfloat> %a, %b
@@ -438,9 +429,7 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) {
; GCN-LABEL: v_test_mul_add_v2bf16_vsc:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, 0.5 op_sel_hi:[1,0]
+; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0]
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
%mul = fmul contract <2 x bfloat> %a, %b
@@ -452,9 +441,9 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfloat> %a) {
; GCN-LABEL: v_test_mul_add_v2bf16_vll:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, 0x42c83f80, v2
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, 0x43484000, v2
+; GCN-NEXT: s_mov_b32 s0, 0x43484000
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
%mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0>
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 52e697c..505ddc8 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -24671,7 +24671,6 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
ret <32 x bfloat> %op
}
-
declare bfloat @llvm.maxnum.bf16(bfloat, bfloat)
declare <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat>, <2 x bfloat>)
declare <3 x bfloat> @llvm.maxnum.v3bf16(<3 x bfloat>, <3 x bfloat>)
@@ -29673,7 +29672,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
ret { bfloat, i16 } %op
}
-
declare bfloat @llvm.log.bf16(bfloat)
declare bfloat @llvm.log2.bf16(bfloat)
declare bfloat @llvm.log10.bf16(bfloat)
@@ -47043,18 +47041,10 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX8-LABEL: v_fmuladd_bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
@@ -47067,20 +47057,13 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX900-LABEL: v_fmuladd_bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX900-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX900-NEXT: s_movk_i32 s4, 0x7fff
; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -47090,35 +47073,25 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX950-LABEL: v_fmuladd_bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, s0
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT: v_add3_u32 v0, v0, v2, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -47126,55 +47099,38 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, 0
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v1, v3
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc_lo
-; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v3, v1, v2
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
-; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v1, v3 :: v_dual_lshlrev_b32 v1, 16, v2
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v2, 0x7fff
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call bfloat @llvm.fmuladd.bf16(bfloat %a, bfloat %b, bfloat %c)
@@ -47235,39 +47191,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX8-LABEL: v_fmuladd_v2bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX8-NEXT: v_fma_f32 v3, v5, v4, v3
; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
+; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, s4, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
@@ -47279,36 +47218,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX900-LABEL: v_fmuladd_v2bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX900-NEXT: s_movk_i32 s4, 0x7fff
-; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX900-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2
+; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX900-NEXT: v_fma_f32 v3, v5, v4, v3
+; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4
; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -47319,150 +47244,94 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX950-LABEL: v_fmuladd_v2bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v0
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v1
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX950-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX950-NEXT: v_fmac_f32_e32 v3, v5, v4
+; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, v3
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_v2bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX10-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX10-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX10-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX10-NEXT: v_perm_b32 v0, v0, v1, 0x7060302
+; GFX10-NEXT: v_fmac_f32_e32 v3, v5, v4
+; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX10-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
+; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11TRUE16-LABEL: v_fmuladd_v2bf16:
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v1, v0
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
+; GFX11TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_v2bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
+; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
+; GFX11FAKE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)
ret <2 x bfloat> %op
@@ -47542,57 +47411,33 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-LABEL: v_fmuladd_v3bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v0
+; GFX8-NEXT: v_fma_f32 v3, v6, v5, v3
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
@@ -47605,52 +47450,31 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX900-LABEL: v_fmuladd_v3bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX900-NEXT: s_movk_i32 s4, 0x7fff
; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
-; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX900-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX900-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v0
+; GFX900-NEXT: v_fma_f32 v3, v6, v5, v3
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -47662,211 +47486,132 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX950-LABEL: v_fmuladd_v3bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX950-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, s0
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v4
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v0
+; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4
-; GFX950-NEXT: v_add_f32_e32 v3, v3, v5
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX950-NEXT: v_fmac_f32_e32 v3, v6, v5
+; GFX950-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v4, v3
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_v3bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_mul_f32_e32 v3, v7, v6
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX10-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT: v_add_f32_e32 v2, v2, v5
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
-; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX10-NEXT: v_alignbit_b32 v1, s4, v1, 16
+; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX10-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_bfe_u32 v0, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX10-NEXT: v_add3_u32 v1, v1, v6, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX10-NEXT: v_add3_u32 v0, v0, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v2, v1, 0x7060302
+; GFX10-NEXT: v_alignbit_b32 v1, s4, v3, 16
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11TRUE16-LABEL: v_fmuladd_v3bf16:
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
-; GFX11TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v4, v0, v2 :: v_dual_fmac_f32 v5, v1, v3
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
-; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11TRUE16-NEXT: v_add3_u32 v8, v9, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v10, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v2, v3
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v1, v8, v6 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_add3_u32 v5, v6, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v6, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11TRUE16-NEXT: v_add3_u32 v2, v3, v5, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v2, v0
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
+; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_v3bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_lshlrev_b32 v3, 16, v3
-; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v3, v7, v6
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0
; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo
-; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v5
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v3
-; GFX11FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v1, 16
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v5, 16, v5
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX11FAKE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v5, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v6, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v5, 0x7fff
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v1, 0x7060302
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v3, 16
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat> %c)
ret <3 x bfloat> %op
@@ -47966,75 +47711,43 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-LABEL: v_fmuladd_v4bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v1
-; GFX8-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v5
-; GFX8-NEXT: v_add_f32_e32 v6, v6, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX8-NEXT: v_fma_f32 v6, v8, v7, v6
; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7
+; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v0
+; GFX8-NEXT: v_fma_f32 v3, v7, v5, v3
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
@@ -48048,68 +47761,40 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX900-LABEL: v_fmuladd_v4bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v1
-; GFX900-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX900-NEXT: s_movk_i32 s4, 0x7fff
-; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4
-; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
-; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v5
-; GFX900-NEXT: v_add_f32_e32 v6, v6, v7
+; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; GFX900-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX900-NEXT: v_fma_f32 v6, v8, v7, v6
+; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4
; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc
-; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX900-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX900-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v0
+; GFX900-NEXT: v_fma_f32 v3, v7, v5, v3
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -48121,264 +47806,162 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX950-LABEL: v_fmuladd_v4bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v1
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v3
+; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v1
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX950-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v5
-; GFX950-NEXT: v_add_f32_e32 v1, v1, v3
+; GFX950-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v0
+; GFX950-NEXT: v_fmac_f32_e32 v1, v7, v3
+; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v6, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4
-; GFX950-NEXT: v_add_f32_e32 v6, v6, v7
-; GFX950-NEXT: v_add_f32_e32 v3, v3, v5
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6
+; GFX950-NEXT: v_fmac_f32_e32 v3, v0, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v3, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, v6
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_v4bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6
-; GFX10-NEXT: v_mul_f32_e32 v7, v9, v7
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v10, v10, v6, 0x7fff
+; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX10-NEXT: v_fmac_f32_e32 v7, v9, v8
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v6
+; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX10-NEXT: v_add3_u32 v0, v10, v6, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v3, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v11, v0, 16, 1
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v7
-; GFX10-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v0
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
+; GFX10-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX10-NEXT: v_add3_u32 v0, v2, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v2, v3, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT: v_add3_u32 v11, v11, v0, 0x7fff
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_add_f32_e32 v3, v3, v8
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v10, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX10-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_add3_u32 v4, v7, v3, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo
-; GFX10-NEXT: v_add3_u32 v4, v6, v1, 0x7fff
-; GFX10-NEXT: v_add3_u32 v5, v7, v2, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_add3_u32 v6, v8, v4, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v3, v2, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v4, v1, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11TRUE16-LABEL: v_fmuladd_v4bf16:
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_lshlrev_b32 v3, 16, v3
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v3
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v1
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
-; GFX11TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v9, v7 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
-; GFX11TRUE16-NEXT: v_add3_u32 v9, v10, v6, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v7, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1
-; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v0
-; GFX11TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v9, v11 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v6, v8
-; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
+; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v10, v8
+; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v1, v0, v2
+; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v5, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v7, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
+; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v7, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v4, v9, v6, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v7, v6 :: v_dual_lshlrev_b32 v6, 16, v4
-; GFX11TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11TRUE16-NEXT: v_add3_u32 v5, v5, v1, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v6 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11TRUE16-NEXT: v_add3_u32 v6, v7, v2, 0x7fff
-; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v1, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc_lo
; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v10, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc_lo
-; GFX11TRUE16-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v1, v2
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v3, 0x7fff
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v8, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v3
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1
+; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v3, v2
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_v4bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v9, 16, v0
; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v1
-; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v3
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v3
; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4
+; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v6
; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v7, v9, v7
-; GFX11FAKE16-NEXT: v_add3_u32 v10, v10, v6, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, v0, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
-; GFX11FAKE16-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX11FAKE16-NEXT: v_bfe_u32 v11, v0, 16, 1
-; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_add3_u32 v11, v11, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v1, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v10, v6, 0x7fff
+; GFX11FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v9, v8
+; GFX11FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v2, v5, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_bfe_u32 v3, v7, 16, 1
; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v2, v9, v10 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_add_f32_e32 v3, v3, v8
-; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v7, v3, 0x7fff
-; GFX11FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v6, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v2, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11FAKE16-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11FAKE16-NEXT: v_add3_u32 v6, v8, v4, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v3, v7, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v7
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v3, v2, 0x7060302
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x7060302
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c)
ret <4 x bfloat> %op
diff --git a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
index f58cb84..839d0ba 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
@@ -38,11 +38,11 @@ define hidden void @copy(ptr noundef %va) {
; CHECK-NEXT: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr
; CHECK-NEXT: %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr
; CHECK-NEXT: store ptr %va, ptr addrspace(5) %va.addr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp.ascast, ptr %va.addr.ascast, i32 8, i1 false)
; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %cp, align 8
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp)
; CHECK-NEXT: ret void
;
entry:
@@ -51,43 +51,43 @@ entry:
%va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr
%cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr
store ptr %va, ptr addrspace(5) %va.addr, align 8
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp)
call void @llvm.va_copy.p0(ptr %cp.ascast, ptr nonnull %va.addr.ascast)
%0 = load ptr, ptr addrspace(5) %cp, align 8
call void @valist(ptr noundef %0)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp)
ret void
}
-declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture)
+declare void @llvm.lifetime.start.p5(ptr addrspace(5) nocapture)
declare void @llvm.va_copy.p0(ptr, ptr)
declare hidden void @valist(ptr noundef)
-declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture)
+declare void @llvm.lifetime.end.p5(ptr addrspace(5) nocapture)
define hidden void @start_once(...) {
; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %s = alloca ptr, align 8, addrspace(5)
; CHECK-NEXT: %s.ascast = addrspacecast ptr addrspace(5) %s to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s)
; CHECK-NEXT: store ptr %varargs, ptr %s.ascast, align 8
; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s, align 8
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s)
; CHECK-NEXT: ret void
;
entry:
%s = alloca ptr, align 8, addrspace(5)
%s.ascast = addrspacecast ptr addrspace(5) %s to ptr
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %s)
call void @llvm.va_start.p0(ptr %s.ascast)
%0 = load ptr, ptr addrspace(5) %s, align 8
call void @valist(ptr noundef %0)
call void @llvm.va_end.p0(ptr %s.ascast)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %s)
ret void
}
@@ -102,16 +102,16 @@ define hidden void @start_twice(...) {
; CHECK-NEXT: %s1 = alloca ptr, align 8, addrspace(5)
; CHECK-NEXT: %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr
; CHECK-NEXT: %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1)
; CHECK-NEXT: store ptr %varargs, ptr %s0.ascast, align 8
; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s0, align 8
; CHECK-NEXT: call void @valist(ptr noundef %0)
; CHECK-NEXT: store ptr %varargs, ptr %s1.ascast, align 8
; CHECK-NEXT: %1 = load ptr, ptr addrspace(5) %s1, align 8
; CHECK-NEXT: call void @valist(ptr noundef %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0)
; CHECK-NEXT: ret void
;
entry:
@@ -119,8 +119,8 @@ entry:
%s1 = alloca ptr, align 8, addrspace(5)
%s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr
%s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0)
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1)
call void @llvm.va_start.p0(ptr %s0.ascast)
%0 = load ptr, ptr addrspace(5) %s0, align 8
call void @valist(ptr noundef %0)
@@ -129,8 +129,8 @@ entry:
%1 = load ptr, ptr addrspace(5) %s1, align 8
call void @valist(ptr noundef %1)
call void @llvm.va_end.p0(ptr %s1.ascast)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0)
ret void
}
@@ -138,12 +138,12 @@ define hidden void @single_i32(i32 noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -157,12 +157,12 @@ define hidden void @single_double(double noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -174,12 +174,12 @@ define hidden void @single_v4f32(<4 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -191,12 +191,12 @@ define hidden void @single_v8f32(<8 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -208,12 +208,12 @@ define hidden void @single_v16f32(<16 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -225,12 +225,12 @@ define hidden void @single_v32f32(<32 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -242,14 +242,14 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store double %y, ptr addrspace(5) %1, align 8
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -261,14 +261,14 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8
; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -286,14 +286,14 @@ define hidden void @i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32
; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %y.coerce3, 3
; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %y.coerce4, 4
; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %y.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -317,14 +317,14 @@ define hidden void @libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64
; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3
; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4
; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %0, align 8
; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -342,14 +342,14 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <4 x float> %y, ptr addrspace(5) %1, align 16
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -361,14 +361,14 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -380,14 +380,14 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <8 x float> %y, ptr addrspace(5) %1, align 32
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -399,14 +399,14 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -418,14 +418,14 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <16 x float> %y, ptr addrspace(5) %1, align 64
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -437,14 +437,14 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -456,14 +456,14 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <32 x float> %y, ptr addrspace(5) %1, align 128
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -475,14 +475,14 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -495,12 +495,12 @@ define hidden void @fptr_single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 4, addrspace(5)
; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void %0(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -520,12 +520,12 @@ define hidden void @fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i6
; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3
; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4
; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void %0(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index 7d36c9f..004d3c0 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -284,6 +284,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -329,6 +330,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -382,6 +384,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -430,6 +433,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 1b092b2..5674ae3 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -349,29 +349,24 @@ define i32 @select_fneg_xor_select_i32(i1 %cond0, i1 %cond1, i32 %arg0, i32 %arg
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
-; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: select_fneg_xor_select_i32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%fneg0 = xor i32 %arg0, -2147483648
%select0 = select i1 %cond0, i32 %arg1, i32 %fneg0
@@ -550,31 +545,25 @@ define i64 @select_fneg_xor_select_i64(i1 %cond0, i1 %cond1, i64 %arg0, i64 %arg
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
-; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc
-; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; GCN-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: select_fneg_xor_select_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_and_b32 v1, 1, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%fneg0 = xor i64 %arg0, 9223372036854775808
%select0 = select i1 %cond0, i64 %arg1, i64 %fneg0
diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
index 3a898a9..f0db321 100644
--- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
@@ -244,8 +244,9 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac
; GCN-GISEL-NEXT: global_load_b128 v[60:63], v[0:1], off offset:16
; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:240
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE ; 16-byte Folded Spill
-; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; 16-byte Folded Reload
+; GCN-GISEL-NEXT: s_clause 0x1
+; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
; GCN-GISEL-NEXT: s_clause 0xe
; GCN-GISEL-NEXT: global_store_b128 v[46:47], v[0:3], off offset:32
diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir
index 8007597..492753b 100644
--- a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir
+++ b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir
@@ -1,6 +1,507 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1200
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1250
+
+---
+name: long_clause
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; GFX1200-LABEL: name: long_clause
+ ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1200-NEXT: S_CLAUSE 31
+ ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec
+ ; GFX1200-NEXT: }
+ ; GFX1200-NEXT: BUNDLE implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1200-NEXT: S_CLAUSE 31
+ ; GFX1200-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec
+ ; GFX1200-NEXT: }
+ ; GFX1200-NEXT: BUNDLE implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1200-NEXT: S_CLAUSE 15
+ ; GFX1200-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec
+ ; GFX1200-NEXT: }
+ ;
+ ; GFX1250-LABEL: name: long_clause
+ ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1250-NEXT: S_CLAUSE 62
+ ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1250-NEXT: S_CLAUSE 16
+ ; GFX1250-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec
+ $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec
+ $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec
+ $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec
+ $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec
+ $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec
+ $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec
+ $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec
+ $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec
+ $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec
+ $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec
+ $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec
+ $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec
+ $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec
+ $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec
+ $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec
+ $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec
+ $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec
+ $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec
+ $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec
+ $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec
+ $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec
+ $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec
+ $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec
+ $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec
+ $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec
+ $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec
+ $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec
+ $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec
+ $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec
+ $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec
+ $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec
+ $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec
+ $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec
+ $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec
+ $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec
+ $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec
+ $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec
+ $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec
+ $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec
+ $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec
+ $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec
+ $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec
+ $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec
+ $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec
+ $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec
+ $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec
+ $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec
+ $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec
+ $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec
+ $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec
+ $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec
+ $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec
+ $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec
+ $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec
+ $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec
+ $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec
+ $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec
+ $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec
+ $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec
+ $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec
+ $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec
+ $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec
+ $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec
+ $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec
+ $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec
+ $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec
+ $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec
+ $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec
+ $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec
+ $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec
+ $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec
+ $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec
+ $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec
+ $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec
+ $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec
+ $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec
+ $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec
+ $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec
+ $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec
+...
+
+---
+name: kill
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr4
+ ; GFX12-LABEL: name: kill
+ ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ ; GFX12-NEXT: KILL undef renamable $sgpr4
+ ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+ ; GFX12-NEXT: }
+ $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ KILL undef renamable $sgpr4
+ $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+...
+
+---
+name: kill2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5
+ ; GFX12-LABEL: name: kill2
+ ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ ; GFX12-NEXT: KILL undef renamable $sgpr4
+ ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+ ; GFX12-NEXT: }
+ ; GFX12-NEXT: KILL undef renamable $sgpr5
+ $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ KILL undef renamable $sgpr4
+ $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+ KILL undef renamable $sgpr5
+...
+
+---
+name: flat_load_atomic
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: flat_load_atomic
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1200-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ;
+ ; GFX1250-LABEL: name: flat_load_atomic
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: }
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: global_load_atomic
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: global_load_atomic
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: global_load_atomic
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+...
+
+---
+name: flat_global_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GFX12-LABEL: name: flat_global_load
+ ; GFX12: liveins: $vgpr0_vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr
+ $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: buffer_load_atomic
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-LABEL: name: buffer_load_atomic
+ ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: buffer_load_atomic
+ ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr0, implicit-def $vgpr0_lo16, implicit-def $vgpr0_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec
+...
+
+---
+name: flat_load_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: flat_load_store
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1200-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ;
+ ; GFX1250-LABEL: name: flat_load_store
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: }
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: global_load_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: global_load_store
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: global_load_store
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+...
+
+---
+name: buffer_load_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-LABEL: name: buffer_load_store
+ ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: buffer_load_store
+ ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+...
+
+---
+name: flat_load_global_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX12-LABEL: name: flat_load_global_load
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+...
+
+---
+name: global_load_buffer_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX12-LABEL: name: global_load_buffer_store
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+ $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+...
---
name: flat_prefetch_flat_load
@@ -31,3 +532,106 @@ body: |
GLOBAL_PREFETCH_B8 $vgpr0_vgpr1, 0, 0, implicit $exec
$vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
...
+
+---
+name: async_load_async_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-LABEL: name: async_load_async_store
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $asynccnt, implicit $vgpr2, implicit $vgpr0_vgpr1, implicit $exec, implicit $asynccnt {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ ; GFX12-NEXT: GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit internal $asynccnt
+ ; GFX12-NEXT: }
+ GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+...
+
+---
+name: async_load_ds_load_tr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-LABEL: name: async_load_ds_load_tr
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec
+ GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec
+...
+
+---
+name: ds_load_trs_ds_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; GFX12-LABEL: name: ds_load_trs_ds_load
+ ; GFX12: liveins: $vgpr0
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec
+ ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec
+ ; GFX12-NEXT: $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec
+ $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec
+ $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec
+ $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec
+...
+
+# Make sure we do not clause DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 with anything
+---
+name: ds_atomic_async_barrier_arrive_b64_ds_read
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_ds_read
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec
+ ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ ; GFX12-NEXT: $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec
+ $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec
+ DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec
+...
+
+---
+name: ds_atomic_async_barrier_arrive_b64_flat_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_flat_load
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr
+ $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: global_load_switching_scope
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GFX12-LABEL: name: global_load_switching_scope
+ ; GFX12: liveins: $vgpr0_vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: }
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr
+...
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir
index c7767cb8..b53bde6 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir
@@ -20,11 +20,32 @@
ret void
}
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2() #0 {
+ ret void
+ }
+
define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg() #0 {
ret void
}
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first() #1 {
+ ret void
+ }
+
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second() #1 {
+ ret void
+ }
+
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg() #1 {
+ ret void
+ }
+
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg() #1 {
+ ret void
+ }
+
attributes #0 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="8,8" }
+ attributes #1 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="10,10" }
...
# Inflate pattern, except the defining instruction isn't an MFMA.
@@ -403,6 +424,89 @@ body: |
...
+# Non-mac variant, src2 is a physical register
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
# Non-mac variant, src2 is the same VGPR, but a different subregister.
---
name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg
@@ -489,3 +593,423 @@ body: |
S_ENDPGM 0
...
+
+# There isn't an assignable AGPR around the first MFMA.
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# There isn't an assignable AGPR around the second MFMA.
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# There isn't an assignable AGPR around the first MFMA, with physreg interference
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# There isn't an assignable AGPR around the second MFMA, physreg interference
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
index b907c13..b59f2de 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
@@ -445,6 +445,86 @@ body: |
...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, killed $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2
+ S_ENDPGM 0
+
+...
+
# There is a rewrite candidate, but it is used by another MFMA which
# does not have a tied result.
---
@@ -619,10 +699,9 @@ body: |
S_ENDPGM 0
...
-
-# There isn't an assignable AGPR around the first MFMA.
+# Chain of 2 untied cases, but the use isn't in src2.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -630,7 +709,7 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -647,10 +726,8 @@ body: |
; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
- ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -685,10 +762,8 @@ body: |
liveins: $vcc
undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
%3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
- %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
@@ -711,9 +786,10 @@ body: |
...
-# There isn't an assignable AGPR around the second MFMA.
+# Chain of 2 untied cases, but the second mfma is a different size and
+# uses a subregister.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -721,7 +797,7 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -739,18 +815,16 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
- ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
- ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
@@ -758,10 +832,7 @@ body: |
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0:
S_NOP 0, implicit-def $agpr0
@@ -777,9 +848,7 @@ body: |
undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
%3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
- %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
- S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
+ %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
@@ -794,6 +863,229 @@ body: |
S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Performs a split and inflate around the single instruction
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Performs a split and inflate around the single instruction, non-tied case
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
@@ -802,9 +1094,11 @@ body: |
...
-# Chain of 2 untied cases, but the use isn't in src2.
+# This case does not fully use %0 after the MFMA. As a result,
+# SplitKits insert a copy bundle for the subset of used lanes instead
+# of a simple copy.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -812,7 +1106,447 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Untied version of previous. This case does not fully use %4 after
+# the MFMA. As a result, SplitKits insert a copy bundle for the subset
+# of used lanes instead of a simple copy,
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr2_vgpr3_vgpr4_vgpr5
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_1024_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: chained_mfma_dst_user_is_vgpr
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x80000000)
+
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $vcc
+
+ undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
+ early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# TODO: In this trivial case, the single copy required is cheaper than
+# the tuple copy.
+---
+name: chained_mfma_dst_user_is_vgpr_small_subreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr_small_subreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: dead renamable $vgpr0 = nofpexcept V_CVT_F16_F32_e32 killed $vgpr0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x80000000)
+
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $vcc
+
+ undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
+ early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %5:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 %4.sub0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Transitive user of the register is an MFMA with non-register src2
+---
+name: chained_mfma_dst_user_has_imm_src2
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: chained_mfma_dst_user_has_imm_src2
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -830,7 +1564,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr20_vgpr21_vgpr22_vgpr23 = V_MFMA_F32_4X4X4F16_vgprcd_e64 $vgpr20_vgpr21, $vgpr18_vgpr19, 0, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -853,6 +1588,8 @@ body: |
; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0:
+ successors: %bb.1(0x80000000)
+
S_NOP 0, implicit-def $agpr0
renamable $sgpr0 = S_MOV_B32 0
undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
@@ -862,16 +1599,104 @@ body: |
%0.sub9:vreg_512_align2 = COPY %0.sub8
bb.1:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
liveins: $vcc
- undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec
+ undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
+ early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4.sub0_sub1_sub2_sub3:vreg_512_align2 = V_MFMA_F32_4X4X4F16_vgprcd_e64 %4.sub0_sub1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec
+
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
bb.2:
- ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
@@ -889,10 +1714,8 @@ body: |
...
-# Chain of 2 untied cases, but the second mfma is a different size and
-# uses a subregister.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -900,7 +1723,7 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -908,26 +1731,27 @@ body: |
; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
- ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
@@ -935,7 +1759,10 @@ body: |
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0:
S_NOP 0, implicit-def $agpr0
@@ -949,14 +1776,15 @@ body: |
bb.1:
liveins: $vcc
- undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
bb.2:
- ; No VGPRs available for %0 or %4
+ ; No VGPRs available for %0
S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
@@ -966,13 +1794,16 @@ body: |
S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
S_ENDPGM 0
...
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -980,32 +1811,115 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
- ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %4, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Non-mac variant, src2 is an immediate.
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
- ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, 0, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
@@ -1025,20 +1939,16 @@ body: |
bb.0:
S_NOP 0, implicit-def $agpr0
renamable $sgpr0 = S_MOV_B32 0
- undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
renamable $sgpr1 = COPY renamable $sgpr0
%1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
- %0.sub9:vreg_1024_align2 = COPY %0.sub8
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
bb.1:
liveins: $vcc
- %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
diff --git a/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll b/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll
new file mode 100644
index 0000000..7b356d2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll
@@ -0,0 +1,237 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+
+; Demonstrate that the conversion of bitmasks affecting the sign bit on integers to srcmods
+; does not apply to canonicalizing instructions.
+
+define double @v_uitofp_i32_to_f64_abs(i32 %arg0) nounwind {
+; GCN-LABEL: v_uitofp_i32_to_f64_abs:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_uitofp_i32_to_f64_abs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.abs = and i32 %arg0, u0x7fffffff
+ %cvt = uitofp i32 %arg0.abs to double
+ ret double %cvt
+}
+
+define double @v_uitofp_i32_to_f64_neg(i32 %arg0) nounwind {
+; GCN-LABEL: v_uitofp_i32_to_f64_neg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_uitofp_i32_to_f64_neg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.neg = and i32 %arg0, u0x80000000
+ %cvt = uitofp i32 %arg0.neg to double
+ ret double %cvt
+}
+
+define double @s_uitofp_i32_to_f64_abs(i32 inreg %arg0) nounwind {
+; GCN-LABEL: s_uitofp_i32_to_f64_abs:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_bitset0_b32 s16, 31
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s16
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_uitofp_i32_to_f64_abs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset0_b32 s0, 31
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.abs = and i32 %arg0, u0x7fffffff
+ %cvt = uitofp i32 %arg0.abs to double
+ ret double %cvt
+}
+
+define double @s_uitofp_i32_to_f64_neg(i32 inreg %arg0) nounwind {
+; GCN-LABEL: s_uitofp_i32_to_f64_neg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s4, s16, 0x80000000
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s4
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_uitofp_i32_to_f64_neg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_and_b32 s0, s0, 0x80000000
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.neg = and i32 %arg0, u0x80000000
+ %cvt = uitofp i32 %arg0.neg to double
+ ret double %cvt
+}
+
+define half @v_uitofp_i16_to_f16_abs(i16 %arg0) nounwind {
+; GFX7-LABEL: v_uitofp_i16_to_f16_abs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX7-NEXT: v_cvt_f32_u32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uitofp_i16_to_f16_abs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX9-NEXT: v_cvt_f16_u16_e32 v0, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: v_uitofp_i16_to_f16_abs:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0x7fff, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: v_uitofp_i16_to_f16_abs:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %arg0.abs = and i16 %arg0, u0x7fff
+ %cvt = uitofp i16 %arg0.abs to half
+ ret half %cvt
+}
+
+define half @v_uitofp_i16_to_f16_neg(i16 %arg0) nounwind {
+; GFX7-LABEL: v_uitofp_i16_to_f16_neg:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0
+; GFX7-NEXT: v_cvt_f32_u32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uitofp_i16_to_f16_neg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff8000, v0
+; GFX9-NEXT: v_cvt_f16_u16_e32 v0, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: v_uitofp_i16_to_f16_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0x8000, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, v0.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: v_uitofp_i16_to_f16_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff8000, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %arg0.neg = and i16 %arg0, u0x8000
+ %cvt = uitofp i16 %arg0.neg to half
+ ret half %cvt
+}
+
+define half @s_uitofp_i16_to_f16_abs(i16 inreg %arg0) nounwind {
+; GFX7-LABEL: s_uitofp_i16_to_f16_abs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s4, s16, 0x7fff
+; GFX7-NEXT: v_cvt_f32_u32_e32 v0, s4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_uitofp_i16_to_f16_abs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_and_b32 s4, s16, 0x7fff
+; GFX9-NEXT: v_cvt_f16_u16_e32 v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: s_uitofp_i16_to_f16_abs:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: s_uitofp_i16_to_f16_abs:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %arg0.abs = and i16 %arg0, u0x7fff
+ %cvt = uitofp i16 %arg0.abs to half
+ ret half %cvt
+}
+
+define half @s_uitofp_i16_to_f16_neg(i16 inreg %arg0) nounwind {
+; GFX7-LABEL: s_uitofp_i16_to_f16_neg:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s4, s16, 0x8000
+; GFX7-NEXT: v_cvt_f32_u32_e32 v0, s4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_uitofp_i16_to_f16_neg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_and_b32 s4, s16, 0x8000
+; GFX9-NEXT: v_cvt_f16_u16_e32 v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: s_uitofp_i16_to_f16_neg:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x8000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: s_uitofp_i16_to_f16_neg:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0x8000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %arg0.neg = and i16 %arg0, u0x8000
+ %cvt = uitofp i16 %arg0.neg to half
+ ret half %cvt
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll b/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll
new file mode 100644
index 0000000..b3c7ac8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll
@@ -0,0 +1,1011 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+
+define i32 @fneg_select_i32_1(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v2, -v1, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -v1, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @fneg_select_i32_2(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_select_i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v1, v2, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v1, v2, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %b, i32 %neg.a
+ ret i32 %select
+}
+
+define i32 @fneg_select_i32_both(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_select_i32_both:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i32_both:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %neg.b = xor i32 %b, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %neg.b
+ ret i32 %select
+}
+
+define i32 @fneg_1_fabs_2_select_i32(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_1_fabs_2_select_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, |v1|, -v1, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_1_fabs_2_select_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, |v1|, -v1, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %abs.b = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %abs.b
+ ret i32 %select
+}
+
+define i32 @s_fneg_select_i32_1(i32 inreg %cond, i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: s_fneg_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s4, s17, 0x80000000
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s4, s4, s18
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s1, s1, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s1, s2
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @s_fneg_1_fabs_2_select_i32(i32 inreg %cond, i32 %a, i32 %b) {
+; GCN-LABEL: s_fneg_1_fabs_2_select_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT: v_cndmask_b32_e64 v0, |v0|, -v0, s[4:5]
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_1_fabs_2_select_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, |v0|, -v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %abs.b = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %abs.b
+ ret i32 %select
+}
+
+define <2 x i32> @fneg_select_v2i32_1(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_select_v2i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v4, -v2, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_v2i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, -v2, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b
+ ret <2 x i32> %select
+}
+
+define <2 x i32> @fneg_select_v2i32_2(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_select_v2i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v4, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_v2i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v4, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+define i32 @fabs_select_i32_1(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fabs_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v2, |v1|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, |v1|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @fabs_select_i32_2(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fabs_select_i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, |v1|, v2, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, |v1|, v2, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %b, i32 %neg.a
+ ret i32 %select
+}
+
+define <2 x i32> @fneg_1_fabs_2_select_v2i32(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_1_fabs_2_select_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, |v2|, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, |v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_1_fabs_2_select_v2i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, |v2|, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, |v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %abs.b = and <2 x i32> %a, splat (i32 u0x7fffffff)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %abs.b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+define i32 @fneg_fabs_select_i32_1(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_fabs_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v2, -|v1|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -|v1|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @fneg_fabs_select_i32_2(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_fabs_select_i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -|v1|, v2, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -|v1|, v2, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %b, i32 %neg.a
+ ret i32 %select
+}
+
+define <2 x i32> @fneg_fabs_select_v2i32_1(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_fabs_select_v2i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v4, -|v2|, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_v2i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, -|v2|, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b
+ ret <2 x i32> %select
+}
+
+define <2 x i32> @fneg_fabs_select_v2i32_2(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_fabs_select_v2i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -|v2|, v4, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_v2i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -|v2|, v4, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+
+define <2 x i32> @s_fneg_select_v2i32_1(<2 x i32> inreg %cond, <2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GCN-LABEL: s_fneg_select_v2i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GCN-NEXT: s_xor_b32 s5, s18, 0x80000000
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s5, s5, s20
+; GCN-NEXT: s_cmp_eq_u32 s17, 0
+; GCN-NEXT: s_cselect_b32 s4, s4, s21
+; GCN-NEXT: v_mov_b32_e32 v0, s5
+; GCN-NEXT: v_mov_b32_e32 v1, s4
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_v2i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_xor_b32 s2, s2, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b
+ ret <2 x i32> %select
+}
+
+define <2 x i32> @s_fneg_fabs_select_v2i32_2(<2 x i32> inreg %cond, <2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GCN-LABEL: s_fneg_fabs_select_v2i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_bitset1_b32 s19, 31
+; GCN-NEXT: s_bitset1_b32 s18, 31
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s4, s20, s18
+; GCN-NEXT: s_cmp_eq_u32 s17, 0
+; GCN-NEXT: s_cselect_b32 s5, s21, s19
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_fabs_select_v2i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset1_b32 s3, 31
+; GFX11-NEXT: s_bitset1_b32 s2, 31
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+define i64 @fneg_select_i64_1(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_select_i64_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @fneg_select_i64_2(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_select_i64_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @fneg_1_fabs_2_select_i64(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_1_fabs_2_select_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, |v5|, -v3, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_1_fabs_2_select_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, |v5|, -v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %abs.b = and i64 %b, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %abs.b
+ ret i64 %select
+}
+
+define i64 @fabs_select_i64_1(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fabs_select_i64_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, |v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, |v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @fabs_select_i64_2(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fabs_select_i64_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, |v3|, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, |v3|, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @fneg_fabs_select_i64_1(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_fabs_select_i64_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @fneg_fabs_select_i64_2(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_fabs_select_i64_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @s_fneg_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_select_i64_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s6, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_select_i64_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s5, s18, s20
+; GFX9-NEXT: s_cselect_b32 s4, s4, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @s_fneg_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_select_i64_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s20, s18
+; GFX7-NEXT: s_cselect_b32 s5, s21, s6
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_select_i64_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s5, s20, s18
+; GFX9-NEXT: s_cselect_b32 s4, s21, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @s_fneg_1_fabs_2_select_i64(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_1_fabs_2_select_i64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000
+; GFX7-NEXT: s_bitset0_b32 s21, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s6, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_1_fabs_2_select_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GFX9-NEXT: s_bitset0_b32 s21, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s5, s18, s20
+; GFX9-NEXT: s_cselect_b32 s4, s4, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_1_fabs_2_select_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_bitset0_b32 s17, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %abs.b = and i64 %b, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %abs.b
+ ret i64 %select
+}
+
+define i64 @s_fabs_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fabs_select_i64_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset0_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s19, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fabs_select_i64_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset0_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s18, s20
+; GFX9-NEXT: s_cselect_b32 s5, s19, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset0_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @s_fabs_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fabs_select_i64_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset0_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s20, s18
+; GFX7-NEXT: s_cselect_b32 s5, s21, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fabs_select_i64_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset0_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s20, s18
+; GFX9-NEXT: s_cselect_b32 s5, s21, s19
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset0_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @s_fneg_fabs_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_fabs_select_i64_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset1_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s19, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_fabs_select_i64_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset1_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s18, s20
+; GFX9-NEXT: s_cselect_b32 s5, s19, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset1_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @s_fneg_fabs_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_fabs_select_i64_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset1_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s20, s18
+; GFX7-NEXT: s_cselect_b32 s5, s21, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_fabs_select_i64_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset1_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s20, s18
+; GFX9-NEXT: s_cselect_b32 s5, s21, s19
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset1_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i16 @fneg_select_i16_1(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_select_i16_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_select_i16_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_select_i16_1:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_select_i16_1:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %neg.a, i16 %b
+ ret i16 %select
+}
+
+define i16 @fneg_select_i16_2(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_select_i16_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_select_i16_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_select_i16_2:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v0.h, v2.l, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_select_i16_2:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %b, i16 %neg.a
+ ret i16 %select
+}
+
+define i16 @fneg_select_i16_both(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_select_i16_both:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX7-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_select_i16_both:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_select_i16_both:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v1.l, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.l, 0x8000, v0.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_select_i16_both:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %neg.b = xor i16 %b, u0x8000
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %neg.a, i16 %neg.b
+ ret i16 %select
+}
+
+define i16 @fneg_1_fabs_2_select_i16(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_1_fabs_2_select_i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0x7fff, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_1_fabs_2_select_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0x7fff, v1
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_1_fabs_2_select_i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l
+; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0x7fff, v1.l
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_1_fabs_2_select_i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %abs.b = and i16 %a, u0x7fff
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %neg.a, i16 %abs.b
+ ret i16 %select
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
index 462090c..0a2e7af 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
@@ -1,12 +1,46 @@
-; RUN: llc -mtriple=amdgcn -mcpu=verde < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX12 %s
-; GCN-LABEL: {{^}}gs_const:
-; GCN-NOT: v_cmpx
-; GCN: s_mov_b64 exec, 0
define amdgpu_gs void @gs_const() {
+; SI-LABEL: gs_const:
+; SI: ; %bb.0:
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: gs_const:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: gs_const:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], exec
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%tmp = icmp ule i32 0, 3
%tmp1 = select i1 %tmp, float 1.000000e+00, float -1.000000e+00
%c1 = fcmp oge float %tmp1, 0.0
@@ -19,12 +53,81 @@ define amdgpu_gs void @gs_const() {
ret void
}
-; GCN-LABEL: {{^}}vcc_implicit_def:
-; GCN: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}}
-; GCN: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}}
-; GCN: s_and{{n2|_not1}}_b64 exec, exec, vcc
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]]
define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) {
+; SI-LABEL: vcc_implicit_def:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
+; SI-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
+; SI-NEXT: exp mrt1 v0, v0, v0, v0 done vm
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB1_2:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: vcc_implicit_def:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
+; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: exp mrt1 v0, v0, v0, v0 done vm
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB1_2:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: vcc_implicit_def:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
+; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: exp mrt1 v0, v0, v0, v0 done
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB1_2:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: vcc_implicit_def:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_le_f32_e64 s[0:1], 0, v1
+; GFX12-NEXT: s_mov_b64 s[2:3], exec
+; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1]
+; GFX12-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, vcc
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: export mrt1 v0, v0, v0, v0 done
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB1_2:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%tmp0 = fcmp olt float %arg13, 0.000000e+00
%c1 = fcmp oge float %arg14, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
@@ -34,31 +137,102 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) {
ret void
}
-; GCN-LABEL: {{^}}true:
-; GCN-NEXT: %bb.
-; GCN-NEXT: s_endpgm
define amdgpu_gs void @true() {
+; GCN-LABEL: true:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 true)
ret void
}
-; GCN-LABEL: {{^}}false:
-; GCN-NOT: v_cmpx
-; GCN: s_mov_b64 exec, 0
define amdgpu_gs void @false() {
+; SI-LABEL: false:
+; SI: ; %bb.0:
+; SI-NEXT: s_andn2_b64 exec, exec, exec
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: false:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_andn2_b64 exec, exec, exec
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: false:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: s_and_not1_b64 exec, exec, exec
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 false)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}and:
-; GCN: v_cmp_lt_i32
-; GCN: v_cmp_lt_i32
-; GCN: s_or_b64 s[0:1]
-; GCN: s_and{{n2|_not1}}_b64 s[0:1], exec, s[0:1]
-; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1]
-; GCN: s_and_b64 exec, exec, s[2:3]
define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) {
+; SI-LABEL: and:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: s_mov_b64 s[2:3], exec
+; SI-NEXT: s_andn2_b64 s[0:1], exec, s[0:1]
+; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; SI-NEXT: s_and_b64 exec, exec, s[2:3]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: and:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX10-NEXT: s_mov_b64 s[2:3], exec
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; GFX10-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: and:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX11-12-NEXT: s_mov_b64 s[2:3], exec
+; GFX11-12-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = icmp slt i32 %a, %b
%c2 = icmp slt i32 %c, %d
%x = or i1 %c1, %c2
@@ -67,13 +241,52 @@ define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) {
ret void
}
-; GCN-LABEL: {{^}}andn2:
-; GCN: v_cmp_lt_i32
-; GCN: v_cmp_lt_i32
-; GCN: s_xor_b64 s[0:1]
-; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1]
-; GCN: s_and_b64 exec, exec, s[2:3]
define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) {
+; SI-LABEL: andn2:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; SI-NEXT: s_mov_b64 s[2:3], exec
+; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; SI-NEXT: s_and_b64 exec, exec, s[2:3]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: andn2:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX10-NEXT: s_mov_b64 s[2:3], exec
+; GFX10-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; GFX10-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: andn2:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX11-12-NEXT: s_mov_b64 s[2:3], exec
+; GFX11-12-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = icmp slt i32 %a, %b
%c2 = icmp slt i32 %c, %d
%x = xor i1 %c1, %c2
@@ -83,135 +296,854 @@ define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) {
ret void
}
-; GCN-LABEL: {{^}}oeq:
-; GCN: v_cmp_neq_f32
+; Should use v_cmp_neq_f32
define amdgpu_gs void @oeq(float %a) {
+; SI-LABEL: oeq:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: oeq:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: oeq:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: oeq:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp oeq float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ogt:
-; GCN: v_cmp_nlt_f32
+; Should use v_cmp_nlt_f32
define amdgpu_gs void @ogt(float %a) {
+; SI-LABEL: ogt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ogt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ogt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ogt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ogt float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}oge:
-; GCN: v_cmp_nle_f32
+; Should use v_cmp_nle_f32
define amdgpu_gs void @oge(float %a) {
+; SI-LABEL: oge:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: oge:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: oge:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: oge:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp oge float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}olt:
-; GCN: v_cmp_ngt_f32
+; Should use v_cmp_ngt_f32
define amdgpu_gs void @olt(float %a) {
+; SI-LABEL: olt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: olt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: olt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: olt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp olt float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ole:
-; GCN: v_cmp_nge_f32
+; Should use v_cmp_nge_f32
define amdgpu_gs void @ole(float %a) {
+; SI-LABEL: ole:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ole:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ole:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ole:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ole float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}one:
-; GCN: v_cmp_nlg_f32
+; Should use v_cmp_nlg_f32
define amdgpu_gs void @one(float %a) {
+; SI-LABEL: one:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: one:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: one:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: one:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp one float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ord:
-; GCN: v_cmp_o_f32
+; Should use v_cmp_o_f32
define amdgpu_gs void @ord(float %a) {
+; SI-LABEL: ord:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ord:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: ord:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX11-12-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = fcmp ord float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}uno:
-; GCN: v_cmp_u_f32
+; Should use v_cmp_u_f32
define amdgpu_gs void @uno(float %a) {
+; SI-LABEL: uno:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: uno:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: uno:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX11-12-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = fcmp uno float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ueq:
-; GCN: v_cmp_lg_f32
+; Should use v_cmp_lg_f32
define amdgpu_gs void @ueq(float %a) {
+; SI-LABEL: ueq:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ueq:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ueq:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ueq:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ueq float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ugt:
-; GCN: v_cmp_ge_f32
+; Should use v_cmp_ge_f32
define amdgpu_gs void @ugt(float %a) {
+; SI-LABEL: ugt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ugt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ugt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ugt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ugt float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}uge:
-; GCN: v_cmp_gt_f32_e32 vcc, -1.0
+; Should use v_cmp_gt_f32_e32 vcc, -1.0
define amdgpu_gs void @uge(float %a) {
+; SI-LABEL: uge:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: uge:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: uge:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: uge:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, -1.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp uge float %a, -1.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ult:
-; GCN: v_cmp_le_f32_e32 vcc, -2.0
+; Should use v_cmp_le_f32_e32 vcc, -2.0
define amdgpu_gs void @ult(float %a) {
+; SI-LABEL: ult:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ult:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ult:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ult:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nle_f32_e32 vcc, -2.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ult float %a, -2.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ule:
-; GCN: v_cmp_lt_f32_e32 vcc, 2.0
+; Should use v_cmp_lt_f32_e32 vcc, 2.0
define amdgpu_gs void @ule(float %a) {
+; SI-LABEL: ule:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ule:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ule:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ule:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc, 2.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ule float %a, 2.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}une:
-; GCN: v_cmp_eq_f32_e32 vcc, 0
+; Should use v_cmp_eq_f32_e32 vcc, 0
define amdgpu_gs void @une(float %a) {
+; SI-LABEL: une:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: une:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: une:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: une:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp une float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}neg_olt:
-; GCN: v_cmp_gt_f32_e32 vcc, 1.0
+; Should use v_cmp_gt_f32_e32 vcc, 1.0
define amdgpu_gs void @neg_olt(float %a) {
+; SI-LABEL: neg_olt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: neg_olt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: neg_olt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: neg_olt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, 1.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp olt float %a, 1.0
%c2 = xor i1 %c1, 1
call void @llvm.amdgcn.kill(i1 %c2)
@@ -219,13 +1151,61 @@ define amdgpu_gs void @neg_olt(float %a) {
ret void
}
-; GCN-LABEL: {{^}}fcmp_x2:
; FIXME: LLVM should be able to combine these fcmp opcodes.
-; SI: v_cmp_lt_f32_e32 vcc, s{{[0-9]+}}, v0
-; GFX10: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
-; GCN: v_cndmask_b32
-; GCN: v_cmp_nle_f32
define amdgpu_ps void @fcmp_x2(float %a) #0 {
+; SI-LABEL: fcmp_x2:
+; SI: ; %bb.0:
+; SI-NEXT: s_mov_b32 s0, 0x3e800000
+; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_cbranch_scc0 .LBB21_1
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB21_1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: fcmp_x2:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_cbranch_scc0 .LBB21_1
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB21_1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: fcmp_x2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_1
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB21_1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: fcmp_x2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[2:3]
+; GFX12-NEXT: s_cbranch_scc0 .LBB21_1
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB21_1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%ogt = fcmp nsz ogt float %a, 2.500000e-01
%k = select i1 %ogt, float -1.000000e+00, float 0.000000e+00
%c = fcmp nsz oge float %k, 0.000000e+00
@@ -234,14 +1214,78 @@ define amdgpu_ps void @fcmp_x2(float %a) #0 {
}
; Note: an almost identical test for this exists in llvm.amdgcn.wqm.vote.ll
-; GCN-LABEL: {{^}}wqm:
-; GCN: v_cmp_neq_f32_e32 vcc, 0
-; GCN-DAG: s_wqm_b64 s[2:3], vcc
-; GCN-DAG: s_mov_b64 s[0:1], exec
-; GCN: s_and{{n2|_not1}}_b64 s[2:3], exec, s[2:3]
-; GCN: s_and{{n2|_not1}}_b64 s[0:1], s[0:1], s[2:3]
-; GCN: s_and_b64 exec, exec, s[0:1]
define amdgpu_ps float @wqm(float %a) {
+; SI-LABEL: wqm:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; SI-NEXT: s_wqm_b64 s[2:3], vcc
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[2:3], exec, s[2:3]
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_cbranch_scc0 .LBB22_2
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v0, 0
+; SI-NEXT: s_branch .LBB22_3
+; SI-NEXT: .LBB22_2:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB22_3:
+;
+; GFX10-LABEL: wqm:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_wqm_b64 s[2:3], vcc
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[2:3]
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_cbranch_scc0 .LBB22_2
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-NEXT: s_branch .LBB22_3
+; GFX10-NEXT: .LBB22_2:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB22_3:
+;
+; GFX11-LABEL: wqm:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-NEXT: s_wqm_b64 s[2:3], vcc
+; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3]
+; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_cbranch_scc0 .LBB22_2
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_branch .LBB22_3
+; GFX11-NEXT: .LBB22_2:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB22_3:
+;
+; GFX12-LABEL: wqm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_wqm_b64 s[2:3], vcc
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3]
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_cbranch_scc0 .LBB22_2
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_branch .LBB22_3
+; GFX12-NEXT: .LBB22_2:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB22_3:
%c1 = fcmp une float %a, 0.0
%c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1)
call void @llvm.amdgcn.kill(i1 %c2)
@@ -249,28 +1293,212 @@ define amdgpu_ps float @wqm(float %a) {
}
; This checks that we use the 64-bit encoding when the operand is a SGPR.
-; GCN-LABEL: {{^}}test_sgpr:
-; GCN: v_cmp_nle_f32_e64
define amdgpu_ps void @test_sgpr(float inreg %a) #0 {
+; SI-LABEL: test_sgpr:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_cbranch_scc0 .LBB23_1
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB23_1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: test_sgpr:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_cbranch_scc0 .LBB23_1
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB23_1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test_sgpr:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_1
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB23_1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_sgpr:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_cmp_le_f32 s0, 1.0
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1]
+; GFX12-NEXT: s_cbranch_scc0 .LBB23_1
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB23_1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%c = fcmp ole float %a, 1.000000e+00
call void @llvm.amdgcn.kill(i1 %c) #1
ret void
}
-; GCN-LABEL: {{^}}test_non_inline_imm_sgpr:
-; GCN-NOT: v_cmp_le_f32_e64
define amdgpu_ps void @test_non_inline_imm_sgpr(float inreg %a) #0 {
+; SI-LABEL: test_non_inline_imm_sgpr:
+; SI: ; %bb.0:
+; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; SI-NEXT: v_cmp_le_f32_e32 vcc, s0, v0
+; SI-NEXT: s_andn2_b64 s[0:1], exec, vcc
+; SI-NEXT: s_andn2_b64 s[2:3], exec, s[0:1]
+; SI-NEXT: s_cbranch_scc0 .LBB24_1
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB24_1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: test_non_inline_imm_sgpr:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0
+; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[0:1]
+; GFX10-NEXT: s_cbranch_scc0 .LBB24_1
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB24_1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test_non_inline_imm_sgpr:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0
+; GFX11-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1]
+; GFX11-NEXT: s_cbranch_scc0 .LBB24_1
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB24_1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_non_inline_imm_sgpr:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_cmp_le_f32 s0, 0x3fc00000
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1]
+; GFX12-NEXT: s_cbranch_scc0 .LBB24_1
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB24_1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%c = fcmp ole float %a, 1.500000e+00
call void @llvm.amdgcn.kill(i1 %c) #1
ret void
}
-; GCN-LABEL: {{^}}test_scc_liveness:
-; GCN: s_cmp
-; GCN: s_and_b64 exec
-; GCN: s_cmp
-; GCN: s_cbranch_scc
define amdgpu_ps void @test_scc_liveness() #0 {
+; SI-LABEL: test_scc_liveness:
+; SI: ; %bb.0: ; %main_body
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: .LBB25_1: ; %loop3
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: s_cmp_gt_i32 s2, 0
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_andn2_b64 s[4:5], exec, s[4:5]
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
+; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: ; %bb.2: ; %loop3
+; SI-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: s_add_i32 s3, s2, 1
+; SI-NEXT: s_cmp_lt_i32 s2, 1
+; SI-NEXT: s_mov_b32 s2, s3
+; SI-NEXT: s_cbranch_scc1 .LBB25_1
+; SI-NEXT: ; %bb.3: ; %endloop15
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB25_4:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: test_scc_liveness:
+; GFX10: ; %bb.0: ; %main_body
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_mov_b32 s2, 0
+; GFX10-NEXT: .LBB25_1: ; %loop3
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_cmp_gt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX10-NEXT: s_andn2_b64 s[4:5], exec, s[4:5]
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
+; GFX10-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX10-NEXT: ; %bb.2: ; %loop3
+; GFX10-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_add_i32 s3, s2, 1
+; GFX10-NEXT: s_cmp_lt_i32 s2, 1
+; GFX10-NEXT: s_mov_b32 s2, s3
+; GFX10-NEXT: s_cbranch_scc1 .LBB25_1
+; GFX10-NEXT: ; %bb.3: ; %endloop15
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB25_4:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test_scc_liveness:
+; GFX11: ; %bb.0: ; %main_body
+; GFX11-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-NEXT: s_mov_b32 s2, 0
+; GFX11-NEXT: .LBB25_1: ; %loop3
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_cmp_gt_i32 s2, 0
+; GFX11-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX11-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5]
+; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: ; %bb.2: ; %loop3
+; GFX11-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; GFX11-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_add_i32 s3, s2, 1
+; GFX11-NEXT: s_cmp_lt_i32 s2, 1
+; GFX11-NEXT: s_mov_b32 s2, s3
+; GFX11-NEXT: s_cbranch_scc1 .LBB25_1
+; GFX11-NEXT: ; %bb.3: ; %endloop15
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB25_4:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_scc_liveness:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_mov_b32 s2, 0
+; GFX12-NEXT: .LBB25_1: ; %loop3
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_cmp_gt_i32 s2, 0
+; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX12-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5]
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX12-NEXT: ; %bb.2: ; %loop3
+; GFX12-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_add_co_i32 s3, s2, 1
+; GFX12-NEXT: s_cmp_lt_i32 s2, 1
+; GFX12-NEXT: s_mov_b32 s2, s3
+; GFX12-NEXT: s_cbranch_scc1 .LBB25_1
+; GFX12-NEXT: ; %bb.3: ; %endloop15
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB25_4:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
main_body:
br label %loop3
@@ -287,11 +1515,139 @@ endloop15: ; preds = %loop3
; Check this compiles.
; If kill is marked as defining VCC then this will fail with live interval issues.
-; GCN-LABEL: {{^}}kill_with_loop_exit:
-; GCN: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec
-; GCN: s_and{{n2|_not1}}_b64 [[LIVE]], [[LIVE]], exec
-; GCN-NEXT: s_cbranch_scc0
define amdgpu_ps void @kill_with_loop_exit(float inreg %inp0, float inreg %inp1, <4 x i32> inreg %inp2, float inreg %inp3) {
+; SI-LABEL: kill_with_loop_exit:
+; SI: ; %bb.0: ; %.entry
+; SI-NEXT: v_mov_b32_e32 v0, 0x43000000
+; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0
+; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s1, v0
+; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v0, 1.0
+; SI-NEXT: s_cbranch_vccnz .LBB26_5
+; SI-NEXT: ; %bb.1: ; %.preheader1.preheader
+; SI-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0
+; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; SI-NEXT: s_mov_b64 s[2:3], exec
+; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
+; SI-NEXT: .LBB26_2: ; %bb
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-NEXT: v_add_f32_e32 v0, 0x3e800000, v0
+; SI-NEXT: s_cbranch_vccnz .LBB26_2
+; SI-NEXT: ; %bb.3: ; %bb33
+; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
+; SI-NEXT: s_cbranch_scc0 .LBB26_6
+; SI-NEXT: ; %bb.4: ; %bb33
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: .LBB26_5: ; %bb35
+; SI-NEXT: exp mrt0 v0, v0, v0, v0 done vm
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB26_6:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: kill_with_loop_exit:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0
+; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1
+; GFX10-NEXT: v_mov_b32_e32 v0, 1.0
+; GFX10-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX10-NEXT: ; %bb.1: ; %.preheader1.preheader
+; GFX10-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0
+; GFX10-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; GFX10-NEXT: s_mov_b64 s[2:3], exec
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX10-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
+; GFX10-NEXT: .LBB26_2: ; %bb
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: v_add_f32_e32 v0, 0x3e800000, v0
+; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_vccnz .LBB26_2
+; GFX10-NEXT: ; %bb.3: ; %bb33
+; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
+; GFX10-NEXT: s_cbranch_scc0 .LBB26_6
+; GFX10-NEXT: ; %bb.4: ; %bb33
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: .LBB26_5: ; %bb35
+; GFX10-NEXT: exp mrt0 v0, v0, v0, v0 done vm
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB26_6:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: kill_with_loop_exit:
+; GFX11: ; %bb.0: ; %.entry
+; GFX11-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0
+; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, 1.0
+; GFX11-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX11-NEXT: ; %bb.1: ; %.preheader1.preheader
+; GFX11-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; GFX11-NEXT: s_mov_b64 s[2:3], exec
+; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX11-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
+; GFX11-NEXT: .LBB26_2: ; %bb
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: v_add_f32_e32 v0, 0x3e800000, v0
+; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_vccnz .LBB26_2
+; GFX11-NEXT: ; %bb.3: ; %bb33
+; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec
+; GFX11-NEXT: s_cbranch_scc0 .LBB26_6
+; GFX11-NEXT: ; %bb.4: ; %bb33
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: .LBB26_5: ; %bb35
+; GFX11-NEXT: exp mrt0 v0, v0, v0, v0 done
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB26_6:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: kill_with_loop_exit:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_cmp_lt_f32 s0, 0x43000000
+; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX12-NEXT: s_cmp_lt_f32 s1, 0x43000000
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX12-NEXT: s_mov_b32 s4, 1.0
+; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX12-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX12-NEXT: ; %bb.1: ; %.preheader1.preheader
+; GFX12-NEXT: s_cmp_ngt_f32 s6, 0
+; GFX12-NEXT: s_mov_b64 s[2:3], exec
+; GFX12-NEXT: s_mov_b32 s4, 0x3fc00000
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX12-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
+; GFX12-NEXT: .LBB26_2: ; %bb
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_add_f32 s4, s4, 0x3e800000
+; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX12-NEXT: s_cbranch_vccnz .LBB26_2
+; GFX12-NEXT: ; %bb.3: ; %bb33
+; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec
+; GFX12-NEXT: s_cbranch_scc0 .LBB26_6
+; GFX12-NEXT: ; %bb.4: ; %bb33
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: .LBB26_5: ; %bb35
+; GFX12-NEXT: v_mov_b32_e32 v0, s4
+; GFX12-NEXT: export mrt0 v0, v0, v0, v0 done
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB26_6:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
.entry:
%tmp24 = fcmp olt float %inp0, 1.280000e+02
%tmp25 = fcmp olt float %inp1, 1.280000e+02
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
index 7a20b5c..a2c1545 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -1,27 +1,52 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -34,23 +59,42 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_i32_off(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -62,23 +106,43 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_atomic_buffer_load_i32_soff(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -90,23 +154,42 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_atomic_buffer_load_i32_dlc(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -119,24 +202,44 @@ bb2:
}
define amdgpu_kernel void @raw_nonatomic_buffer_load_i32(<4 x i32> %addr) {
-; CHECK-LABEL: raw_nonatomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_nonatomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_nonatomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -149,23 +252,43 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_i64(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -179,23 +302,42 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_v2i16(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -209,68 +351,151 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
-; CHECK-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: raw_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX11-FAKE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: raw_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX11-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-FAKE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -285,23 +510,42 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_v4i32(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB8_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB8_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB8_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -315,25 +559,46 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_ptr(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v1, v[1:2]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v1, v[1:2]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v1, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll
index 5c0e34c..d51e912 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll
@@ -1,58 +1,95 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=CHECK %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX12 %s
define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v1, 24, v1
+; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 24
%ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 92, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
index 8a6594f..1a1a1f7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
@@ -6,6 +6,7 @@
; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10
; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11
; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
+; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: raw_buffer_load_i8_tfe:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
index 89511de..eeea1456 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
@@ -3,6 +3,7 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck -check-prefixes=GFX11 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 | FileCheck -check-prefixes=GFX12 %s
define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) {
; GFX68-LABEL: buffer_store:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
index 561ec7d..6f7c001 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
@@ -1,27 +1,52 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @raw_ptr_atomic_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -34,23 +59,42 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -62,23 +106,43 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -90,23 +154,42 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -119,24 +202,44 @@ bb2:
}
define amdgpu_kernel void @raw_nonptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_nonptr_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_nonptr_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_nonptr_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -149,23 +252,43 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -179,23 +302,42 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -209,68 +351,151 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr) {
-; CHECK-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX11-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX11-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -285,23 +510,42 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB8_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB8_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB8_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -315,25 +559,46 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v1, v[1:2]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v1, v[1:2]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v1, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll
index 8b6ba1a..2c3b521 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll
@@ -1,104 +1,174 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; FIXME: Test 90a, 940. 908 should fail to select.
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 128
%ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret <2 x bfloat> %ret
}
define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2)
ret <2 x bfloat> %ret
}
define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 128
%unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
}
define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2)
ret void
}
; Test waterfall loop
define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: v_readfirstlane_b32 s3, v6
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
-; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v6
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s0, s0, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr6
-; GFX12-NEXT: ; implicit-def: $vgpr5
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB4_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: v_readfirstlane_b32 s3, v6
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
+; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v6
+; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_b32 s0, s0, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr6
+; GFX1200-NEXT: ; implicit-def: $vgpr5
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1200-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v11, v4 :: v_dual_mov_b32 v10, v3
+; GFX1250-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v8, v1
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v5
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v8
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v9
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v10
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v11
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v6
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[8:9]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[10:11]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v6
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[4:7], s3 offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9_vgpr10_vgpr11
+; GFX1250-NEXT: ; implicit-def: $vgpr6
+; GFX1250-NEXT: ; implicit-def: $vgpr1
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 128
%ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret <2 x bfloat> %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll
index 8141e0d..ea8f836 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll
@@ -2,7 +2,8 @@
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
; GFX908-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -26,15 +27,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24)
ret void
}
@@ -61,15 +69,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -96,15 +111,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -131,15 +153,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0)
ret void
}
@@ -166,15 +195,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll
index 767117d..2838740 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
; GFX90A-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -18,16 +19,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24)
ret float %ret
}
@@ -47,16 +56,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset_
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -76,16 +93,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %ret
}
@@ -105,16 +130,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_v
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0)
ret <2 x half> %ret
}
@@ -134,16 +167,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll
index 3540468..4dd258b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s
define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) {
; GFX7-LABEL: raw_ptr_buffer_load_bf16:
@@ -41,6 +42,14 @@ define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_u16 v0, off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_u16 v0, off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call bfloat @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret bfloat %val
}
@@ -82,6 +91,14 @@ define <2 x bfloat> @raw_ptr_buffer_load_v2bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_b32 v0, off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_v2bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v0, off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret <2 x bfloat> %val
}
@@ -125,6 +142,14 @@ define <4 x bfloat> @raw_ptr_buffer_load_v4bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_b64 v[0:1], off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_v4bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call <4 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v4bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret <4 x bfloat> %val
}
@@ -178,6 +203,14 @@ define <8 x bfloat> @raw_ptr_buffer_load_v8bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_b128 v[0:3], off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_v8bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b128 v[0:3], off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call <8 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v8bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret <8 x bfloat> %val
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll
index e1f84dc..ec7d7d4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s
define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %data, i32 %offset) {
; GFX7-LABEL: buffer_store_bf16:
@@ -32,6 +33,11 @@ define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %d
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.bf16(bfloat %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
@@ -65,6 +71,11 @@ define amdgpu_ps void @buffer_store_v2bf16(ptr addrspace(8) inreg %rsrc, <2 x bf
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_v2bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.v2bf16(<2 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
@@ -102,6 +113,11 @@ define amdgpu_ps void @buffer_store_v4bf16(ptr addrspace(8) inreg %rsrc, <4 x bf
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_v4bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.v4bf16(<4 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
@@ -153,6 +169,11 @@ define amdgpu_ps void @buffer_store_v8bf16(ptr addrspace(8) inreg %rsrc, <8 x bf
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_v8bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.v8bf16(<8 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
index f6f614e..8896364 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
@@ -1,30 +1,58 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @struct_atomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -37,23 +65,43 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_const_idx(<4 x i32> %addr) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_const_idx:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_const_idx:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_const_idx:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 15
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -66,26 +114,48 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_off(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -98,26 +168,49 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_soff(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -129,26 +222,48 @@ bb2:
ret void
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_dlc(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -161,26 +276,49 @@ bb2:
}
define amdgpu_kernel void @struct_nonatomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_nonatomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_nonatomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_nonatomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -193,26 +331,49 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i64(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v2, s6
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, s6
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -226,26 +387,48 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_v2i16(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB7_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB7_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB7_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -259,77 +442,172 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32 %index) {
-; CHECK-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1
-; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: struct_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_clause 0x1
-; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: struct_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_clause 0x1
-; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX12-FAKE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_clause 0x1
+; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -344,26 +622,48 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_v4i32(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -377,28 +677,52 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_ptr(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB10_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v2, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB10_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB10_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v2, v[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB10_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v2, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
index 13b28d4..9abbc06 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
@@ -6,6 +6,7 @@
; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10
; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11
; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
+; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
define amdgpu_ps void @struct_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: struct_buffer_load_i8_tfe:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll
index 9ce33c6..822016b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll
@@ -3,6 +3,8 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) {
; GFX68-LABEL: buffer_store:
@@ -21,6 +23,15 @@ define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <
; GFX11-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], 0 idxen glc
; GFX11-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], 0 idxen slc
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_mov_b32_e32 v12, 0
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: buffer_store_b128 v[0:3], v12, s[0:3], null idxen
+; GFX12-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], null idxen th:TH_STORE_NT
+; GFX12-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], null idxen th:TH_STORE_HT
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0)
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1)
@@ -40,6 +51,12 @@ define amdgpu_ps void @buffer_store_immoffs(<4 x i32> inreg, <4 x float>) {
; GFX11-NEXT: v_mov_b32_e32 v4, 0
; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen offset:42
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_immoffs:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen offset:42
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 42, i32 0, i32 0)
ret void
@@ -55,6 +72,11 @@ define amdgpu_ps void @buffer_store_idx(<4 x i32> inreg, <4 x float>, i32) {
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_idx:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0)
ret void
@@ -76,6 +98,12 @@ define amdgpu_ps void @buffer_store_ofs(<4 x i32> inreg, <4 x float>, i32) {
; GFX11-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, s4
; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_ofs:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, 0
+; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 %2, i32 0, i32 0)
ret void
@@ -91,6 +119,11 @@ define amdgpu_ps void @buffer_store_both(<4 x i32> inreg, <4 x float>, i32, i32)
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_both:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 %3, i32 0, i32 0)
ret void
@@ -108,6 +141,12 @@ define amdgpu_ps void @buffer_store_both_reversed(<4 x i32> inreg, <4 x float>,
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: buffer_store_b128 v[0:3], v[5:6], s[0:3], 0 idxen offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_both_reversed:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_dual_mov_b32 v6, v5 :: v_dual_mov_b32 v7, v4
+; GFX12-NEXT: buffer_store_b128 v[0:3], v[6:7], s[0:3], null idxen offen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %3, i32 %2, i32 0, i32 0)
ret void
@@ -139,6 +178,15 @@ define amdgpu_ps void @buffer_store_wait(<4 x i32> inreg, <4 x float>, i32, i32,
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_wait:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen
+; GFX12-NEXT: buffer_load_b128 v[0:3], v5, s[0:3], null idxen
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0)
%data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %0, i32 %3, i32 0, i32 0, i32 0)
@@ -156,6 +204,11 @@ define amdgpu_ps void @buffer_store_x1(<4 x i32> inreg %rsrc, float %data, i32 %
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_x1:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.f32(float %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
@@ -171,6 +224,11 @@ define amdgpu_ps void @buffer_store_x2(<4 x i32> inreg %rsrc, <2 x float> %data,
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_x2:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
@@ -193,6 +251,15 @@ define amdgpu_ps void @buffer_store_int(<4 x i32> inreg, <4 x i32>, <2 x i32>, i
; GFX11-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], 0 idxen glc
; GFX11-NEXT: buffer_store_b32 v6, v7, s[0:3], 0 idxen slc
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_int:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_mov_b32_e32 v7, 0
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: buffer_store_b128 v[0:3], v7, s[0:3], null idxen
+; GFX12-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], null idxen th:TH_STORE_NT
+; GFX12-NEXT: buffer_store_b32 v6, v7, s[0:3], null idxen th:TH_STORE_HT
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4i32(<4 x i32> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0)
call void @llvm.amdgcn.struct.buffer.store.v2i32(<2 x i32> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1)
@@ -212,6 +279,12 @@ define amdgpu_ps void @struct_buffer_store_byte(<4 x i32> inreg %rsrc, float %v1
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX11-NEXT: buffer_store_b8 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_byte:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX12-NEXT: buffer_store_b8 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
%v2 = fptoui float %v1 to i32
%v3 = trunc i32 %v2 to i8
@@ -237,6 +310,18 @@ define amdgpu_ps void @struct_buffer_store_f16(<4 x i32> inreg %rsrc, float %v1,
; GFX11-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen
; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX12-TRUE16-LABEL: struct_buffer_store_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
+; GFX12-TRUE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen
+; GFX12-TRUE16-NEXT: s_endpgm
+;
+; GFX12-FAKE16-LABEL: struct_buffer_store_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX12-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen
+; GFX12-FAKE16-NEXT: s_endpgm
%v2 = fptrunc float %v1 to half
call void @llvm.amdgcn.struct.buffer.store.f16(half %v2, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
@@ -261,6 +346,11 @@ define amdgpu_ps void @struct_buffer_store_v2f16(<4 x i32> inreg %rsrc, <2 x hal
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_v2f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
@@ -288,6 +378,11 @@ define amdgpu_ps void @struct_buffer_store_v4f16(<4 x i32> inreg %rsrc, <4 x hal
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_v4f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
@@ -304,6 +399,12 @@ define amdgpu_ps void @struct_buffer_store_i16(<4 x i32> inreg %rsrc, float %v1,
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_i16:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
%v2 = fptoui float %v1 to i32
%v3 = trunc i32 %v2 to i16
@@ -329,6 +430,11 @@ define amdgpu_ps void @struct_buffer_store_vif16(<4 x i32> inreg %rsrc, <2 x i16
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_vif16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v2i16(<2 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
@@ -354,6 +460,11 @@ define amdgpu_ps void @struct_buffer_store_v4i16(<4 x i32> inreg %rsrc, <4 x i16
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_v4i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v4i16(<4 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
index 8f33dd6..23db247 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
@@ -1,30 +1,58 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -37,23 +65,43 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_const_idx(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 15
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -66,26 +114,48 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -98,26 +168,49 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -129,26 +222,48 @@ bb2:
ret void
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -161,26 +276,49 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_nonatomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_nonatomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_nonatomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_nonatomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -193,26 +331,49 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v2, s6
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, s6
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -226,26 +387,48 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB7_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB7_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB7_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -259,77 +442,172 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1
-; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_clause 0x1
-; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_clause 0x1
-; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX12-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_clause 0x1
+; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -344,26 +622,48 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -377,28 +677,52 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB10_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v2, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB10_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB10_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v2, v[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB10_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v2, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll
index 746b879..4366472 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll
@@ -3,6 +3,7 @@
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 {
; GFX908-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -39,6 +40,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -75,6 +84,13 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -114,6 +130,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret void
}
@@ -153,6 +177,14 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -291,6 +323,42 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_v
; GFX1200-NEXT: ; %bb.2:
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr0
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -429,6 +497,42 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr
; GFX1200-NEXT: ; %bb.2:
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr0
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB5_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll
index 71c63bf..0191a85 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 {
; GFX90A-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -32,6 +33,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -62,6 +72,14 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffs
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -95,6 +113,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
@@ -128,6 +155,15 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__
; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %ret
}
@@ -237,6 +273,43 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_vo
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -346,6 +419,43 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB5_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %ret
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll
index e3889ab..d551d91 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll
@@ -4,7 +4,8 @@
; Not supported in gfx8 or gfx9
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX6-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffs
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
@@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff
; GFX11-NEXT: buffer_atomic_max_f32 v0, v1, s[0:3], s16 idxen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen slc
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret void
}
@@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
-; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s1, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
+; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1200-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5]
+; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: v_readfirstlane_b32 s3, v7
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
-; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s0, s0, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr7
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
+; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_b32 s0, s0, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr7
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1200-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll
index f001bf9..0096289 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll
@@ -4,7 +4,8 @@
; Not supported in gfx8 or gfx9
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX6-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voff
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
@@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_
; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_
; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_vof
; GFX11-NEXT: buffer_atomic_min_f32 v0, v1, s[0:3], s16 idxen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_
; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen slc
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret void
}
@@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
-; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s1, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
+; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1200-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5]
+; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: v_readfirstlane_b32 s3, v7
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
-; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s0, s0, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr7
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
+; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_b32 s0, s0, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr7
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1200-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
index 91a8446..13ea8b0 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
@@ -18,10 +18,9 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-NEXT: v_mov_b32_e32 v2, s3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_endpgm
;
@@ -34,14 +33,12 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
; GFX12-NEXT: v_readfirstlane_b32 s0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_readfirstlane_b32 s1, v3
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_or_b32 s0, s0, s1
-; GFX12-NEXT: s_or_b32 s0, s2, s0
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
; GFX12-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll
index 63c0463..66de953 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll
@@ -255,6 +255,56 @@ define i32 @ptrtoint_offset(ptr addrspace(7) %ptr) {
ret i32 %ret
}
+define i32 @ptrtoaddr(ptr addrspace(7) %ptr) {
+; CHECK-LABEL: define i32 @ptrtoaddr
+; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0
+; CHECK-NEXT: [[RET:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1
+; CHECK-NEXT: ret i32 [[RET]]
+;
+ %ret = ptrtoaddr ptr addrspace(7) %ptr to i32
+ ret i32 %ret
+}
+
+define <2 x i32> @ptrtoaddr_vec(<2 x ptr addrspace(7)> %ptr) {
+; CHECK-LABEL: define <2 x i32> @ptrtoaddr_vec
+; CHECK-SAME: ({ <2 x ptr addrspace(8)>, <2 x i32> } [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { <2 x ptr addrspace(8)>, <2 x i32> } [[PTR]], 0
+; CHECK-NEXT: [[RET:%.*]] = extractvalue { <2 x ptr addrspace(8)>, <2 x i32> } [[PTR]], 1
+; CHECK-NEXT: ret <2 x i32> [[RET]]
+;
+ %ret = ptrtoaddr <2 x ptr addrspace(7)> %ptr to <2 x i32>
+ ret <2 x i32> %ret
+}
+
+;; Check that we extend the offset to i160.
+define i160 @ptrtoaddr_ext(ptr addrspace(7) %ptr) {
+; CHECK-LABEL: define i160 @ptrtoaddr_ext
+; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0
+; CHECK-NEXT: [[PTR_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1
+; CHECK-NEXT: [[RET:%.*]] = zext i32 [[PTR_OFF]] to i160
+; CHECK-NEXT: ret i160 [[RET]]
+;
+ %addr = ptrtoaddr ptr addrspace(7) %ptr to i32
+ %ext = zext i32 %addr to i160
+ ret i160 %ext
+}
+
+;; Check that we truncate the offset to i16.
+define i16 @ptrtoaddr_trunc(ptr addrspace(7) %ptr) {
+; CHECK-LABEL: define i16 @ptrtoaddr_trunc
+; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0
+; CHECK-NEXT: [[PTR_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1
+; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[PTR_OFF]] to i16
+; CHECK-NEXT: ret i16 [[RET]]
+;
+ %addr = ptrtoaddr ptr addrspace(7) %ptr to i32
+ %trunc = trunc i32 %addr to i16
+ ret i16 %trunc
+}
+
define ptr addrspace(7) @inttoptr(i160 %v) {
; CHECK-LABEL: define { ptr addrspace(8), i32 } @inttoptr
; CHECK-SAME: (i160 [[V:%.*]]) #[[ATTR0]] {
diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir
index 23412aa..3b3ea3f 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir
+++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir
@@ -347,8 +347,10 @@ body: |
...
# User-requested maximum number of VGPRs need to be taken into account by
# the scheduler's rematerialization stage. Register usage above that number
-# is considered like spill; occupancy is "inadvertently" increased when
-# eliminating spill.
+# is considered like spill. On unified RF (gfx90a), the requested number is
+# understood "per-bank", effectively doubling its value, so no rematerialization
+# is necessary.
+---
name: small_num_vgprs_as_spill
tracksRegLiveness: true
machineFunctionInfo:
@@ -371,36 +373,15 @@ body: |
; GFX908-NEXT: [[V_CVT_I32_F64_e32_10:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_11:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_12:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_16:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_17:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_18:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_19:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: bb.1:
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]]
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]]
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]]
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]]
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]]
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_33:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]]
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_31]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_33]], implicit [[V_CVT_I32_F64_e32_27]]
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode
+ ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_14]], implicit [[V_CVT_I32_F64_e32_15]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_13]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: small_num_vgprs_as_spill
@@ -420,36 +401,15 @@ body: |
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_10:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_11:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_12:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_16:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_17:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_18:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_19:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode
+ ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode
+ ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode
+ ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.1:
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]]
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]]
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]]
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode
- ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]]
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_33:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode
- ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_31]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_33]], implicit [[V_CVT_I32_F64_e32_27]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]]
; GFX90A-NEXT: S_ENDPGM 0
bb.0:
successors: %bb.1
@@ -467,38 +427,16 @@ body: |
%10:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0
%11:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0
%12:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0
- %13:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0
- %14:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0
- %15:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0
- %16:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0
- %17:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0
- %18:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0
- %19:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0
- %20:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0
- %21:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0
- %22:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0
- %23:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0
- %24:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0
- %25:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0
- %26:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0
- %27:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode
- %28:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode
- %29:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode
- %30:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode
- %31:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode
- %32:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode
- %33:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode
+ %13:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode
+ %14:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode
+ %15:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode
bb.1:
S_NOP 0, implicit %0, implicit %1, implicit %2, implicit %3, implicit %4
S_NOP 0, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9
S_NOP 0, implicit %10, implicit %11, implicit %12, implicit %13, implicit %14
- S_NOP 0, implicit %15, implicit %16, implicit %17, implicit %18, implicit %19
- S_NOP 0, implicit %20, implicit %21, implicit %22, implicit %23, implicit %24
- S_NOP 0, implicit %25, implicit %26, implicit %27, implicit %28, implicit %29
- S_NOP 0, implicit %30, implicit %31, implicit %32, implicit %33
-
+ S_NOP 0, implicit %15
S_ENDPGM 0
...
# Min/Max occupancy is 8, but user requests 7, the scheduler's rematerialization
@@ -815,9 +753,9 @@ body: |
S_ENDPGM 0
...
# Min/Max waves/EU is 8. For targets with non-unified RF (gfx908) we are able to
-# eliminate both ArchVGPR and AGPR spilling by saving 2 VGPRs. In the unified RF
-# case (gfx90a) the ArchVGPR allocation granule forces us to remat more
-# ArchVGPRs to eliminate spilling.
+# eliminate both ArchVGPR and AGPR spilling by saving one of each. In the
+# unified RF case (gfx90a) the ArchVGPR allocation granule may force us to remat
+# more ArchVGPRs to eliminate spilling.
---
name: reduce_arch_and_acc_vgrp_spill
tracksRegLiveness: true
@@ -860,6 +798,7 @@ body: |
; GFX908-NEXT: [[DEF28:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX908-NEXT: [[DEF29:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX908-NEXT: [[DEF30:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
+ ; GFX908-NEXT: [[DEF31:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX908-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 1, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 2, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_3:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 3, implicit $exec, implicit $mode, implicit-def $m0
@@ -886,12 +825,11 @@ body: |
; GFX908-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 64, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 64, implicit $exec, implicit $mode
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: bb.1:
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]]
@@ -899,17 +837,17 @@ body: |
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]]
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]]
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]]
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_27]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]]
- ; GFX908-NEXT: [[DEF31:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode
+ ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_27]], implicit [[V_CVT_I32_F64_e32_28]]
; GFX908-NEXT: [[DEF32:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_30]], implicit [[V_CVT_I32_F64_e32_31]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[V_CVT_I32_F64_e32_32]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF30]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]], implicit [[DEF32]], implicit [[DEF]], implicit [[DEF1]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]], implicit [[V_CVT_I32_F64_e32_31]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF31]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: reduce_arch_and_acc_vgrp_spill
@@ -1358,8 +1296,7 @@ body: |
; GFX908-NEXT: [[V_CVT_I32_F64_e32_252:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 252, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_253:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 253, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[V_CVT_I32_F64_e32_254:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 254, implicit $exec, implicit $mode, implicit-def $m0
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: bb.1:
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]], implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]]
@@ -1387,7 +1324,8 @@ body: |
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_220]], implicit [[V_CVT_I32_F64_e32_221]], implicit [[V_CVT_I32_F64_e32_222]], implicit [[V_CVT_I32_F64_e32_223]], implicit [[V_CVT_I32_F64_e32_224]], implicit [[V_CVT_I32_F64_e32_225]], implicit [[V_CVT_I32_F64_e32_226]], implicit [[V_CVT_I32_F64_e32_227]], implicit [[V_CVT_I32_F64_e32_228]], implicit [[V_CVT_I32_F64_e32_229]]
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_230]], implicit [[V_CVT_I32_F64_e32_231]], implicit [[V_CVT_I32_F64_e32_232]], implicit [[V_CVT_I32_F64_e32_233]], implicit [[V_CVT_I32_F64_e32_234]], implicit [[V_CVT_I32_F64_e32_235]], implicit [[V_CVT_I32_F64_e32_236]], implicit [[V_CVT_I32_F64_e32_237]], implicit [[V_CVT_I32_F64_e32_238]], implicit [[V_CVT_I32_F64_e32_239]]
; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_240]], implicit [[V_CVT_I32_F64_e32_241]], implicit [[V_CVT_I32_F64_e32_242]], implicit [[V_CVT_I32_F64_e32_243]], implicit [[V_CVT_I32_F64_e32_244]], implicit [[V_CVT_I32_F64_e32_245]], implicit [[V_CVT_I32_F64_e32_246]], implicit [[V_CVT_I32_F64_e32_247]], implicit [[V_CVT_I32_F64_e32_248]], implicit [[V_CVT_I32_F64_e32_249]]
- ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[DEF]]
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode
+ ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[DEF]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: reduce_spill_archvgpr_above_addressable_limit
@@ -1395,6 +1333,7 @@ body: |
; GFX90A-NEXT: successors: %bb.1(0x80000000)
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 0, implicit $exec, implicit $mode, implicit-def $m0
+ ; GFX90A-NEXT: [[DEF:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 1, implicit $exec, implicit $mode, implicit-def $m0
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 2, implicit $exec, implicit $mode, implicit-def $m0
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_3:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 3, implicit $exec, implicit $mode, implicit-def $m0
@@ -1650,8 +1589,6 @@ body: |
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_253:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 253, implicit $exec, implicit $mode, implicit-def $m0
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_254:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 254, implicit $exec, implicit $mode, implicit-def $m0
; GFX90A-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode
- ; GFX90A-NEXT: [[DEF:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.1:
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]], implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]]
@@ -1679,6 +1616,7 @@ body: |
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_220]], implicit [[V_CVT_I32_F64_e32_221]], implicit [[V_CVT_I32_F64_e32_222]], implicit [[V_CVT_I32_F64_e32_223]], implicit [[V_CVT_I32_F64_e32_224]], implicit [[V_CVT_I32_F64_e32_225]], implicit [[V_CVT_I32_F64_e32_226]], implicit [[V_CVT_I32_F64_e32_227]], implicit [[V_CVT_I32_F64_e32_228]], implicit [[V_CVT_I32_F64_e32_229]]
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_230]], implicit [[V_CVT_I32_F64_e32_231]], implicit [[V_CVT_I32_F64_e32_232]], implicit [[V_CVT_I32_F64_e32_233]], implicit [[V_CVT_I32_F64_e32_234]], implicit [[V_CVT_I32_F64_e32_235]], implicit [[V_CVT_I32_F64_e32_236]], implicit [[V_CVT_I32_F64_e32_237]], implicit [[V_CVT_I32_F64_e32_238]], implicit [[V_CVT_I32_F64_e32_239]]
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_240]], implicit [[V_CVT_I32_F64_e32_241]], implicit [[V_CVT_I32_F64_e32_242]], implicit [[V_CVT_I32_F64_e32_243]], implicit [[V_CVT_I32_F64_e32_244]], implicit [[V_CVT_I32_F64_e32_245]], implicit [[V_CVT_I32_F64_e32_246]], implicit [[V_CVT_I32_F64_e32_247]], implicit [[V_CVT_I32_F64_e32_248]], implicit [[V_CVT_I32_F64_e32_249]]
+ ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode
; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[DEF]]
; GFX90A-NEXT: S_ENDPGM 0
bb.0:
@@ -2246,35 +2184,35 @@ body: |
; GFX908-NEXT: [[DEF253:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX908-NEXT: [[DEF254:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX908-NEXT: [[DEF255:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
- ; GFX908-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: bb.1:
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF128]], implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF138]], implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF148]], implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF158]], implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF168]], implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF178]], implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF188]], implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF198]], implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF208]], implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF218]], implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF228]], implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF238]], implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF248]], implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[DEF256]], implicit [[DEF]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]], implicit [[DEF39]], implicit [[DEF40]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]], implicit [[DEF49]], implicit [[DEF50]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]], implicit [[DEF59]], implicit [[DEF60]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]], implicit [[DEF69]], implicit [[DEF70]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]], implicit [[DEF79]], implicit [[DEF80]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]], implicit [[DEF89]], implicit [[DEF90]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]], implicit [[DEF99]], implicit [[DEF100]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]], implicit [[DEF109]], implicit [[DEF110]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]], implicit [[DEF119]], implicit [[DEF120]]
- ; GFX908-NEXT: S_NOP 0, implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]]
+ ; GFX908-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]]
+ ; GFX908-NEXT: S_NOP 0, implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: reduce_spill_agpr_above_addressable_limit
@@ -2533,41 +2471,41 @@ body: |
; GFX90A-NEXT: [[DEF249:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[DEF250:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[DEF251:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 257, implicit $exec, implicit $mode
- ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 258, implicit $exec, implicit $mode
; GFX90A-NEXT: [[DEF252:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[DEF253:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[DEF254:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[DEF255:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
- ; GFX90A-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
+ ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 257, implicit $exec, implicit $mode
+ ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 258, implicit $exec, implicit $mode
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.1:
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]], implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]], implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]], implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]], implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]], implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]], implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]], implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]], implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]], implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]], implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]], implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]], implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]], implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]], implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]], implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]], implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]], implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]], implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]], implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]], implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]], implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]]
- ; GFX90A-NEXT: S_NOP 0, implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]], implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]]
+ ; GFX90A-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]]
+ ; GFX90A-NEXT: S_NOP 0, implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]]
; GFX90A-NEXT: S_ENDPGM 0
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir
index f69337e..06d8474 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir
+++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir
@@ -2104,13 +2104,9 @@ body: |
; GFX908-NEXT: [[S_MOV_B32_58:%[0-9]+]]:sgpr_32 = S_MOV_B32 69
; GFX908-NEXT: [[S_MOV_B32_59:%[0-9]+]]:sgpr_32 = S_MOV_B32 70
; GFX908-NEXT: [[S_MOV_B32_60:%[0-9]+]]:sgpr_32 = S_MOV_B32 71
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[S_MOV_B32_61:%[0-9]+]]:sgpr_32 = S_MOV_B32 72
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[S_MOV_B32_62:%[0-9]+]]:sgpr_32 = S_MOV_B32 73
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[S_MOV_B32_63:%[0-9]+]]:sgpr_32 = S_MOV_B32 74
- ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode
; GFX908-NEXT: [[S_MOV_B32_64:%[0-9]+]]:sgpr_32 = S_MOV_B32 75
; GFX908-NEXT: [[S_MOV_B32_65:%[0-9]+]]:sgpr_32 = S_MOV_B32 76
; GFX908-NEXT: [[S_MOV_B32_66:%[0-9]+]]:sgpr_32 = S_MOV_B32 77
@@ -2120,7 +2116,11 @@ body: |
; GFX908-NEXT: [[S_MOV_B32_70:%[0-9]+]]:sgpr_32 = S_MOV_B32 81
; GFX908-NEXT: [[S_MOV_B32_71:%[0-9]+]]:sgpr_32 = S_MOV_B32 82
; GFX908-NEXT: [[S_MOV_B32_72:%[0-9]+]]:sgpr_32 = S_MOV_B32 83
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0
; GFX908-NEXT: [[S_MOV_B32_73:%[0-9]+]]:sgpr_32 = S_MOV_B32 84
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0
+ ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: bb.1:
; GFX908-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll
index 11cda2d..c96ba75 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll
@@ -199,7 +199,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_negabsf32(bfloat %src0, bfloat %src1,
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_f32imm1(bfloat %src0, bfloat %src1) #0 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_f32imm1:
; GFX1250: ; %bb.0:
@@ -230,7 +229,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_f32imminv2pi(bfloat %src0, bfloat %src
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat %src1) #0 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi:
; GFX1250: ; %bb.0:
@@ -247,7 +245,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63(bfloat %src0, bfloat %src1) #0 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63:
; GFX1250: ; %bb.0:
@@ -360,7 +357,6 @@ define float @no_mix_simple_fabs(float %src0, float %src1, float %src2) #0 {
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals(bfloat %src0, bfloat %src1, bfloat %src2) #1 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals:
; GFX1250: ; %bb.0:
@@ -469,7 +465,6 @@ define float @v_mad_mix_f32_negprecvtbf16lo_bf16lo_bf16lo(i32 %src0.arg, bfloat
ret float %result
}
-
define float @v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo(i32 %src0.arg, bfloat %src1, bfloat %src2) #0 {
; GFX1250-LABEL: v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
index 1b2eb83..03304ae 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
@@ -74,9 +74,7 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_post_cvt(bfloat %src0, b
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%src0.ext = fpext bfloat %src0 to float
%src1.ext = fpext bfloat %src1 to float
@@ -105,7 +103,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_pre_cvt(bfloat %src0, bf
ret bfloat %cvt.result
}
-
define <2 x bfloat> @v_mad_mix_v2f32(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v2f32:
; GFX1250: ; %bb.0:
@@ -178,7 +175,6 @@ define <4 x bfloat> @v_mad_mix_v4f32(<4 x bfloat> %src0, <4 x bfloat> %src1, <4
ret <4 x bfloat> %cvt.result
}
-
define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v2f32_clamp_postcvt:
; GFX1250: ; %bb.0:
@@ -191,9 +187,7 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo
; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[4:5], v[6:7], v[0:1]
-; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%src0.ext = fpext <2 x bfloat> %src0 to <2 x float>
%src1.ext = fpext <2 x bfloat> %src1 to <2 x float>
@@ -205,7 +199,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo
ret <2 x bfloat> %clamp
}
-
define <3 x bfloat> @v_mad_mix_v3f32_clamp_postcvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; GFX1250: ; %bb.0:
@@ -247,11 +240,8 @@ define <4 x bfloat> @v_mad_mix_v4f32_clamp_postcvt(<4 x bfloat> %src0, <4 x bflo
; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[6:7], v[0:1], v[2:3]
; GFX1250-NEXT: v_pk_fma_f32 v[2:3], v[8:9], v[10:11], v[12:13]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
-; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
-; GFX1250-NEXT: v_pk_max_num_bf16 v1, v1, v1 clamp
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%src0.ext = fpext <4 x bfloat> %src0 to <4 x float>
%src1.ext = fpext <4 x bfloat> %src1 to <4 x float>
@@ -323,7 +313,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt_hi(<2 x bfloat> %src0, <2 x b
ret <2 x bfloat> %insert
}
-
define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v2f32_clamp_precvt:
; GFX1250: ; %bb.0:
@@ -351,7 +340,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloa
ret <2 x bfloat> %cvt.result
}
-
define <3 x bfloat> @v_mad_mix_v3f32_clamp_precvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v3f32_clamp_precvt:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
index 42401af..8304be9 100644
--- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
+++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
@@ -78,12 +78,14 @@ define amdgpu_kernel void @fadd_v2_vs(ptr addrspace(1) %a, <2 x float> %x) {
; GFX1250-LABEL: fadd_v2_vs:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -142,13 +144,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[4:5]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v4_vs:
@@ -156,13 +161,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id
@@ -332,56 +340,69 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fadd_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1]
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35]
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112
+; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[12:13]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[14:15]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[10:11]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[16:17]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[40:41]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[36:37]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[38:39]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[48:49]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[38:39]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[44:45]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[46:47]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[50:51]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[36:37]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[42:43]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[18:19]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[20:21]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[22:23]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[8:9]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[54:55]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[56:57]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[52:53]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[42:43]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[48:49]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[50:51]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[44:45]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[46:47]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[32:33]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[36:37]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35]
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v32_vs:
@@ -389,54 +410,70 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[16:17]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[20:21]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[22:23]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[24:25]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[26:27]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[28:29]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[30:31]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[44:45]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[46:47]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[2:3]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[48:49]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[50:51]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[4:5]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[6:7]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[52:53]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[54:55]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[8:9]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[10:11]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[34:35]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[12:13]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[14:15]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -502,15 +539,16 @@ define amdgpu_kernel void @fadd_v2_v_imm(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fadd_v2_v_imm:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -645,15 +683,16 @@ define amdgpu_kernel void @fadd_v2_v_lit_splat(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fadd_v2_v_lit_splat:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1.0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -703,13 +742,15 @@ define amdgpu_kernel void @fadd_v2_v_lit_hi0(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fadd_v2_v_lit_hi0:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x3f800000
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -746,17 +787,31 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) {
; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; PACKED-NEXT: s_endpgm
;
-; GFX1250-LABEL: fadd_v2_v_lit_lo0:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000)
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
-; GFX1250-NEXT: s_endpgm
+; GFX1250-SDAG-LABEL: fadd_v2_v_lit_lo0:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x3f80000000000000)
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: fadd_v2_v_lit_lo0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
%load = load <2 x float>, ptr addrspace(1) %gep, align 8
@@ -792,17 +847,31 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; PACKED-NEXT: s_endpgm
;
-; GFX1250-LABEL: fadd_v2_v_unfoldable_lit:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
-; GFX1250-NEXT: s_endpgm
+; GFX1250-SDAG-LABEL: fadd_v2_v_unfoldable_lit:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x400000003f800000)
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: fadd_v2_v_unfoldable_lit:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
%load = load <2 x float>, ptr addrspace(1) %gep, align 8
@@ -1085,12 +1154,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_lo2(ptr addrspace(1) %a, float %x, flo
; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_lo2:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] neg_lo:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] neg_lo:[0,1]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_lo2:
@@ -1159,12 +1230,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_hi2(ptr addrspace(1) %a, float %x, flo
; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_hi2:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_hi2:
@@ -1262,12 +1335,14 @@ define amdgpu_kernel void @fmul_v2_vs(ptr addrspace(1) %a, <2 x float> %x) {
; GFX1250-LABEL: fmul_v2_vs:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -1326,13 +1401,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[4:5]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fmul_v4_vs:
@@ -1340,13 +1418,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id
@@ -1516,56 +1597,69 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fmul_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1]
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35]
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112
+; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[12:13]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[14:15]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[10:11]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[16:17]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[40:41]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[36:37]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[38:39]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[48:49]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[38:39]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[44:45]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[46:47]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[50:51]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[36:37]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[42:43]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[18:19]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[20:21]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[22:23]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[8:9]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[54:55]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[56:57]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[52:53]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[42:43]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[48:49]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[50:51]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[44:45]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[46:47]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[32:33]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[36:37]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35]
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fmul_v32_vs:
@@ -1573,54 +1667,70 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[16:17]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[20:21]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[22:23]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[24:25]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[26:27]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[28:29]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[30:31]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[44:45]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[46:47]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[2:3]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[48:49]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[50:51]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[4:5]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[6:7]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[52:53]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[54:55]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[8:9]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[10:11]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[34:35]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[12:13]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[14:15]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -1685,15 +1795,16 @@ define amdgpu_kernel void @fmul_v2_v_imm(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fmul_v2_v_imm:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -1828,15 +1939,16 @@ define amdgpu_kernel void @fmul_v2_v_lit_splat(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fmul_v2_v_lit_splat:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -1873,17 +1985,31 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; PACKED-NEXT: s_endpgm
;
-; GFX1250-LABEL: fmul_v2_v_unfoldable_lit:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
-; GFX1250-NEXT: s_endpgm
+; GFX1250-SDAG-LABEL: fmul_v2_v_unfoldable_lit:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: fmul_v2_v_unfoldable_lit:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
%load = load <2 x float>, ptr addrspace(1) %gep, align 8
@@ -2040,12 +2166,14 @@ define amdgpu_kernel void @fma_v2_vs(ptr addrspace(1) %a, <2 x float> %x) {
; GFX1250-LABEL: fma_v2_vs:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -2104,13 +2232,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[4:5], v[4:5]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[6:7], v[6:7]
+; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v4_vs:
@@ -2118,13 +2249,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[4:5], v[4:5]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[6:7], v[6:7]
+; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id
@@ -2294,56 +2428,68 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fma_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v34, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1]
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v34, s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v34, s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v34, s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v34, s[34:35]
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v34, s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v34, s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v34, s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v34, s[34:35] offset:112
+; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[12:13], s[12:13]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[14:15], s[14:15]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[30:31]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[28:29]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[12:13]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[56:57], s[14:15]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[2:3]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[4:5]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[6:7]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[24:25]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[26:27]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[18:19]
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[8:9]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[10:11]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[10:11], s[10:11]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[16:17], s[16:17]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[40:41], s[40:41]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[38:39], s[38:39]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[48:49], s[48:49]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[42:43], v[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[40:41], v[40:41]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[16:17]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[44:45], s[44:45]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[46:47], s[46:47]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[50:51], s[50:51]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[36:37], s[36:37]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[42:43], s[42:43]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[18:19], s[18:19]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[20:21], s[20:21]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[22:23], s[22:23]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[8:9], s[8:9]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[54:55], v[54:55]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[36:37], v[36:37]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[38:39], v[38:39]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[56:57], v[56:57]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[52:53], v[52:53]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[42:43], v[42:43]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[48:49], v[48:49]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[50:51], v[50:51]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[44:45], v[44:45]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[46:47], v[46:47]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[32:33], v[32:33]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[40:41], v[40:41]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[16:19], s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[12:15], s[34:35] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[8:11], s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[4:7], s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[20:23], s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[24:27], s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[0:3], s[34:35]
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[28:31], s[34:35] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v32_vs:
@@ -2351,54 +2497,70 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[16:17], s[16:17]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[18:19], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[20:21], s[20:21]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[22:23], s[22:23]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[24:25], s[24:25]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[26:27], s[26:27]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[28:29], s[28:29]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[30:31], s[30:31]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[44:45], v[44:45]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[46:47], v[46:47]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[48:49], v[48:49]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[50:51], v[50:51]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[4:5], s[4:5]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[6:7], s[6:7]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[52:53], v[52:53]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[54:55], v[54:55]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[8:9], s[8:9]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[10:11], s[10:11]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[32:33], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[34:35], v[34:35]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[12:13], s[12:13]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[14:15], s[14:15]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -2488,17 +2650,19 @@ define amdgpu_kernel void @fma_v2_v_imm(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fma_v2_v_imm:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 0x43480000
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -2653,17 +2817,19 @@ define amdgpu_kernel void @fma_v2_v_lit_splat(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fma_v2_v_lit_splat:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1.0
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -2740,29 +2906,30 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-SDAG-LABEL: fma_v2_v_unfoldable_lit:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[4:5], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v6, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], lit64(0x400000003f800000)
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[4:5], s[2:3]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-SDAG-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v2_v_unfoldable_lit:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], lit64(0x400000003f800000)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -3268,20 +3435,22 @@ define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) {
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
; GFX1250-SDAG-NEXT: s_add_f32 s1, s1, 0
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_fadd_fsub_0:
; GFX1250-GISEL: ; %bb.0: ; %bb
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], 0
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v0, v1
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v3, v0
-; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3] scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
bb:
%i12 = fadd <2 x float> zeroinitializer, %arg
@@ -3363,15 +3532,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, 0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: s_add_f32 s6, s1, s3
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], s[2:3], s[6:7] op_sel_hi:[1,0]
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, v0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[2:3], s[2:3] neg_lo:[0,1] neg_hi:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[4:5]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[0:1], s[2:3] op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1]
+; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5]
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_fadd_fsub:
@@ -3380,13 +3550,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_sub_f32 s0, s0, s2
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3)
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v0, v1 :: v_dual_mov_b32 v2, s0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[2:3], v[0:1]
-; GFX1250-GISEL-NEXT: v_dual_subrev_f32 v3, s3, v0 :: v_dual_mov_b32 v0, 0
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1]
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX1250-GISEL-NEXT: s_endpgm
bb:
@@ -3593,7 +3766,9 @@ define amdgpu_kernel void @fneg_v2f32_scalar(ptr addrspace(1) %a, <2 x float> %x
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, s[2:3] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, v[0:1] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1]
; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-GISEL-NEXT: s_endpgm
%fneg = fsub <2 x float> <float -0.0, float -0.0>, %x
diff --git a/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll b/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll
index 013b68a..99e5d00 100644
--- a/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll
+++ b/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll
@@ -1,5 +1,7 @@
-;RUN: llc < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK
-;RUN: llc < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK
+;RUN: llc -global-isel=1 < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK
+;RUN: llc -global-isel=1 < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK
+;RUN: llc -global-isel=0 < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK
+;RUN: llc -global-isel=0 < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK
; ;CHECK-LABEL: {{^}}_amdgpu_ps_1_arg:
; ;CHECK: NumVgprs: 4
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
index 0c6339e..b35a74e 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mcpu=gfx90a < %s | FileCheck %s
+; RUN: llc -mcpu=gfx942 -amdgpu-mfma-vgpr-form < %s | FileCheck %s
target triple = "amdgcn-amd-amdhsa"
@@ -7,7 +7,10 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp
; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112
; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96
@@ -18,7 +21,229 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp
; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16
; CHECK-NEXT: s_nop 0
; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1]
+; CHECK-NEXT: v_accvgpr_write_b32 a0, 1.0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, 2.0
; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[32:63], a0, a1, v[0:31]
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_mov_b32_e32 v2, v32
+; CHECK-NEXT: v_mov_b32_e32 v3, v33
+; CHECK-NEXT: v_mov_b32_e32 v4, v34
+; CHECK-NEXT: v_mov_b32_e32 v5, v35
+; CHECK-NEXT: v_mov_b32_e32 v6, v36
+; CHECK-NEXT: v_mov_b32_e32 v7, v37
+; CHECK-NEXT: v_mov_b32_e32 v8, v38
+; CHECK-NEXT: v_mov_b32_e32 v9, v39
+; CHECK-NEXT: v_mov_b32_e32 v10, v40
+; CHECK-NEXT: v_mov_b32_e32 v11, v41
+; CHECK-NEXT: v_mov_b32_e32 v12, v42
+; CHECK-NEXT: v_mov_b32_e32 v13, v43
+; CHECK-NEXT: v_mov_b32_e32 v14, v44
+; CHECK-NEXT: v_mov_b32_e32 v15, v45
+; CHECK-NEXT: v_mov_b32_e32 v16, v46
+; CHECK-NEXT: v_mov_b32_e32 v17, v47
+; CHECK-NEXT: v_mov_b32_e32 v18, v48
+; CHECK-NEXT: v_mov_b32_e32 v19, v49
+; CHECK-NEXT: v_mov_b32_e32 v20, v50
+; CHECK-NEXT: v_mov_b32_e32 v21, v51
+; CHECK-NEXT: v_mov_b32_e32 v22, v52
+; CHECK-NEXT: v_mov_b32_e32 v23, v53
+; CHECK-NEXT: v_mov_b32_e32 v24, v54
+; CHECK-NEXT: v_mov_b32_e32 v25, v55
+; CHECK-NEXT: v_mov_b32_e32 v26, v56
+; CHECK-NEXT: v_mov_b32_e32 v27, v57
+; CHECK-NEXT: v_mov_b32_e32 v28, v58
+; CHECK-NEXT: v_mov_b32_e32 v29, v59
+; CHECK-NEXT: v_mov_b32_e32 v30, v60
+; CHECK-NEXT: v_mov_b32_e32 v31, v61
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], a0, a1, v[0:31]
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0)
+ %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
+ %tmp.1 = shufflevector <32 x float> %mai.2, <32 x float> %mai.1, <32 x i32> <i32 32, i32 33, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29>
+ %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %tmp.1, i32 0, i32 0, i32 0)
+ store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112
+; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96
+; CHECK-NEXT: global_load_dwordx4 v[20:23], v0, s[0:1] offset:80
+; CHECK-NEXT: global_load_dwordx4 v[16:19], v0, s[0:1] offset:64
+; CHECK-NEXT: global_load_dwordx4 v[12:15], v0, s[0:1] offset:48
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0)
+ %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
+ %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0)
+ store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> zeroinitializer, i32 0, i32 0, i32 0)
+ %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
+ %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0)
+ store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 1.0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> splat (float 1.0), i32 0, i32 0, i32 0)
+ %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
+ %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0)
+ store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
+; The inline asm requires the value be copied to an AGPR class, not
+; the AV_* pseudo we usually expect for register allocator live range
+; splits.
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_to_agpr_class(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_to_agpr_class:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; CHECK-NEXT: v_mov_b32_e32 v32, 2.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 4.0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112
+; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96
+; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80
+; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64
+; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48
+; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16
+; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v32, v33, a[0:31]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:31]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %in, i32 0, i32 0, i32 0)
+ call void asm sideeffect "; use $0", "a"(<32 x float> %mai)
+ ret void
+}
+
+; TODO: Handle rewriting this case
+define void @test_rewrite_mfma_imm_src2(float %arg0, float %arg1) #0 {
+; CHECK-LABEL: test_rewrite_mfma_imm_src2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v0, v1, 2.0
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
; CHECK-NEXT: v_accvgpr_write_b32 a2, v2
@@ -51,145 +276,124 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp
; CHECK-NEXT: v_accvgpr_write_b32 a29, v29
; CHECK-NEXT: v_accvgpr_write_b32 a30, v30
; CHECK-NEXT: v_accvgpr_write_b32 a31, v31
-; CHECK-NEXT: v_mov_b32_e32 v0, 1.0
-; CHECK-NEXT: v_mov_b32_e32 v1, 2.0
-; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v0, v1, a[0:31]
-; CHECK-NEXT: s_nop 7
-; CHECK-NEXT: s_nop 7
-; CHECK-NEXT: s_nop 2
-; CHECK-NEXT: v_accvgpr_read_b32 v4, a59
-; CHECK-NEXT: v_accvgpr_read_b32 v5, a58
-; CHECK-NEXT: v_accvgpr_read_b32 v6, a57
-; CHECK-NEXT: v_accvgpr_read_b32 v7, a56
-; CHECK-NEXT: v_accvgpr_read_b32 v8, a55
-; CHECK-NEXT: v_accvgpr_read_b32 v9, a54
-; CHECK-NEXT: v_accvgpr_read_b32 v10, a53
-; CHECK-NEXT: v_accvgpr_read_b32 v11, a52
-; CHECK-NEXT: v_accvgpr_read_b32 v12, a51
-; CHECK-NEXT: v_accvgpr_read_b32 v13, a50
-; CHECK-NEXT: v_accvgpr_read_b32 v14, a49
-; CHECK-NEXT: v_accvgpr_read_b32 v15, a48
-; CHECK-NEXT: v_accvgpr_read_b32 v16, a47
-; CHECK-NEXT: v_accvgpr_read_b32 v17, a46
-; CHECK-NEXT: v_accvgpr_read_b32 v18, a45
-; CHECK-NEXT: v_accvgpr_read_b32 v19, a44
-; CHECK-NEXT: v_accvgpr_read_b32 v20, a43
-; CHECK-NEXT: v_accvgpr_read_b32 v21, a42
-; CHECK-NEXT: v_accvgpr_read_b32 v22, a41
-; CHECK-NEXT: v_accvgpr_read_b32 v23, a40
-; CHECK-NEXT: v_accvgpr_read_b32 v24, a39
-; CHECK-NEXT: v_accvgpr_read_b32 v25, a38
-; CHECK-NEXT: v_accvgpr_read_b32 v26, a37
-; CHECK-NEXT: v_accvgpr_read_b32 v27, a36
-; CHECK-NEXT: v_accvgpr_read_b32 v28, a35
-; CHECK-NEXT: v_accvgpr_read_b32 v29, a34
-; CHECK-NEXT: v_accvgpr_mov_b32 a2, a32
-; CHECK-NEXT: v_accvgpr_mov_b32 a3, a33
-; CHECK-NEXT: v_accvgpr_write_b32 a4, v29
-; CHECK-NEXT: v_accvgpr_write_b32 a5, v28
-; CHECK-NEXT: v_accvgpr_write_b32 a6, v27
-; CHECK-NEXT: v_accvgpr_write_b32 a7, v26
-; CHECK-NEXT: v_accvgpr_write_b32 a8, v25
-; CHECK-NEXT: v_accvgpr_write_b32 a9, v24
-; CHECK-NEXT: v_accvgpr_write_b32 a10, v23
-; CHECK-NEXT: v_accvgpr_write_b32 a11, v22
-; CHECK-NEXT: v_accvgpr_write_b32 a12, v21
-; CHECK-NEXT: v_accvgpr_write_b32 a13, v20
-; CHECK-NEXT: v_accvgpr_write_b32 a14, v19
-; CHECK-NEXT: v_accvgpr_write_b32 a15, v18
-; CHECK-NEXT: v_accvgpr_write_b32 a16, v17
-; CHECK-NEXT: v_accvgpr_write_b32 a17, v16
-; CHECK-NEXT: v_accvgpr_write_b32 a18, v15
-; CHECK-NEXT: v_accvgpr_write_b32 a19, v14
-; CHECK-NEXT: v_accvgpr_write_b32 a20, v13
-; CHECK-NEXT: v_accvgpr_write_b32 a21, v12
-; CHECK-NEXT: v_accvgpr_write_b32 a22, v11
-; CHECK-NEXT: v_accvgpr_write_b32 a23, v10
-; CHECK-NEXT: v_accvgpr_write_b32 a24, v9
-; CHECK-NEXT: v_accvgpr_write_b32 a25, v8
-; CHECK-NEXT: v_accvgpr_write_b32 a26, v7
-; CHECK-NEXT: v_accvgpr_write_b32 a27, v6
-; CHECK-NEXT: v_accvgpr_write_b32 a28, v5
-; CHECK-NEXT: v_accvgpr_write_b32 a29, v4
-; CHECK-NEXT: v_accvgpr_mov_b32 a30, a60
-; CHECK-NEXT: v_accvgpr_mov_b32 a31, a61
-; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:31]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> splat (float 2.0), i32 0, i32 0, i32 0)
+ call void asm sideeffect "; use $0", "a"(<32 x float> %mai)
+ ret void
+}
+
+; TODO: Handle rewriting this case
+define void @test_rewrite_mfma_subreg_extract0(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_subreg_extract0:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112
+; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96
+; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80
+; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64
+; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32
+; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33]
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96
-; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112
-; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64
-; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80
-; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32
-; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48
-; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1]
-; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16
-; CHECK-NEXT: s_endpgm
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v2
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v3
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v4
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v5
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
bb:
- %id = call i32 @llvm.amdgcn.workitem.id.x()
- %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
- %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
- %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0)
- %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
- %tmp.1 = shufflevector <32 x float> %mai.2, <32 x float> %mai.1, <32 x i32> <i32 32, i32 33, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29>
- %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %tmp.1, i32 0, i32 0, i32 0)
- store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ %src2 = load <32 x float>, ptr addrspace(1) %ptr
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0)
+ %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4)
ret void
}
-define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle(ptr addrspace(1) %arg) #0 {
-; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle:
+define void @test_rewrite_mfma_subreg_extract1(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_subreg_extract1:
; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
-; CHECK-NEXT: v_mov_b32_e32 v1, 2.0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112
-; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96
-; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80
-; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64
-; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48
-; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32
-; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16
-; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1]
-; CHECK-NEXT: v_mov_b32_e32 v0, 1.0
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112
+; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96
+; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80
+; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64
+; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32
+; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33]
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v6
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v7
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v8
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v9
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %src2 = load <32 x float>, ptr addrspace(1) %ptr
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0)
+ %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4)
+ ret void
+}
+
+; odd offset
+define void @test_rewrite_mfma_subreg_extract2(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_subreg_extract2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112
+; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96
+; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80
+; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64
+; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32
+; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33]
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96
-; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112
-; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64
-; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80
-; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32
-; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48
-; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1]
-; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16
-; CHECK-NEXT: s_endpgm
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v3
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v4
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v5
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v6
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
bb:
- %id = call i32 @llvm.amdgcn.workitem.id.x()
- %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
- %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
- %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0)
- %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
- %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0)
- store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ %src2 = load <32 x float>, ptr addrspace(1) %ptr
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0)
+ %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4)
ret void
}
-declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1
-declare noundef i32 @llvm.amdgcn.workitem.id.x() #2
+declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2
+declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2
+declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #3
-attributes #0 = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,4" }
+attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" }
attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) }
attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
diff --git a/llvm/test/CodeGen/AMDGPU/saddsat.ll b/llvm/test/CodeGen/AMDGPU/saddsat.ll
index 019eb2c..4995ce6 100644
--- a/llvm/test/CodeGen/AMDGPU/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/saddsat.ll
@@ -124,9 +124,8 @@ define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) {
; GFX6-NEXT: v_add_i32_e64 v1, s[4:5], v0, v1
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddsat_i32:
@@ -136,9 +135,8 @@ define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) {
; GFX8-NEXT: v_add_u32_e64 v1, s[4:5], v0, v1
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddsat_i32:
@@ -383,16 +381,14 @@ define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX6-NEXT: v_add_i32_e64 v2, s[4:5], v0, v2
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX6-NEXT: v_add_i32_e64 v2, s[4:5], v1, v3
; GFX6-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddsat_v2i32:
@@ -402,16 +398,14 @@ define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX8-NEXT: v_add_u32_e64 v2, s[4:5], v0, v2
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX8-NEXT: v_add_u32_e64 v2, s[4:5], v1, v3
; GFX8-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddsat_v2i32:
@@ -442,8 +436,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddsat_i64:
@@ -456,8 +449,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddsat_i64:
@@ -470,8 +462,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_saddsat_i64:
@@ -480,12 +471,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2
; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo
; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[2:3]
-; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_saddsat_i64:
@@ -494,11 +484,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2
; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, v1, v3, vcc_lo
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[2:3]
-; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll b/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll
new file mode 100644
index 0000000..3c7b5bf
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll
@@ -0,0 +1,38 @@
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=CHECK,PACKED16
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck %s --check-prefixes=CHECK,SPLIT16
+
+@global = addrspace(1) global i32 poison, align 4
+
+; The hardware initializes the registers received as arguments by entry points,
+; so they will be counted even if unused.
+
+; Vectors of i1 are always unpacked
+
+; CHECK-LABEL: vec_of_i1:
+; CHECK: TotalNumSgprs: 8
+define amdgpu_ps void @vec_of_i1(<8 x i1> inreg %v8i1) {
+ ret void
+}
+
+; Vectors of i8 are always unpacked
+
+; CHECK-LABEL: vec_of_i8:
+; CHECK: TotalNumSgprs: 4
+define amdgpu_ps void @vec_of_i8(<4 x i8> inreg %v4i8) {
+ ret void
+}
+
+; Vectors of 16-bit types are packed for newer architectures and unpacked for older ones.
+
+; CHECK-LABEL: vec_of_16_bit_ty:
+; PACKED16: TotalNumSgprs: 3
+; SPLIT16: TotalNumSgprs: 6
+define amdgpu_ps void @vec_of_16_bit_ty(<2 x i16> inreg %v2i16, <4 x half> inreg %v4half) {
+ ret void
+}
+
+; CHECK-LABEL: buffer_fat_ptr:
+; CHECK: TotalNumSgprs: 5
+define amdgpu_ps void @buffer_fat_ptr(ptr addrspace(7) inreg %p) {
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/ssubsat.ll
index 40d80f5..09c0e77 100644
--- a/llvm/test/CodeGen/AMDGPU/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/ssubsat.ll
@@ -124,9 +124,8 @@ define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v1, s[4:5], v0, v1
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_i32:
@@ -136,9 +135,8 @@ define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v1, s[4:5], v0, v1
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_i32:
@@ -383,16 +381,14 @@ define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v2, s[4:5], v0, v2
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v3
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v3
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v2i32:
@@ -402,16 +398,14 @@ define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v2, s[4:5], v0, v2
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v2, s[4:5], v1, v3
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v3
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v2i32:
@@ -439,23 +433,20 @@ define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v0, v3
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v1, v4
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v4
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v3
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v5
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v3
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v3, -v2, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v3i32:
@@ -465,23 +456,20 @@ define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v0, v3
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v1, v4
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v4
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v3
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v2, v5
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v3
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v3, -v2, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v3i32:
@@ -511,30 +499,26 @@ define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v0, v4
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v1, v5
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v2, v6
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v6
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v4, -v2, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v7
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v7
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v4, -v3, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v4i32:
@@ -544,30 +528,26 @@ define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v0, v4
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v1, v5
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v2, v6
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v6
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v4, -v2, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v3, v7
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v7
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v4, -v3, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v4i32:
@@ -599,58 +579,50 @@ define <8 x i32> @v_ssubsat_v8i32(<8 x i32> %lhs, <8 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v0, v8
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v8, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v1, v9
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v9
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v8, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v2, v10
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v10
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v8, -v2, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v3, v11
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v11
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v8, -v3, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v4, v12
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v12
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v4
; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v8, -v4, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v5, v13
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v13
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v5
; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v8, -v5, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v6, v14
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v14
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v6
; GFX6-NEXT: v_ashrrev_i32_e32 v6, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v8, -v6, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v7, v15
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v15
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v7
; GFX6-NEXT: v_ashrrev_i32_e32 v7, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v8, -v7, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v8i32:
@@ -660,58 +632,50 @@ define <8 x i32> @v_ssubsat_v8i32(<8 x i32> %lhs, <8 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v0, v8
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v8, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v1, v9
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v9
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v8, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v2, v10
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v10
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, -v2, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v3, v11
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v11
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, -v3, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v4, v12
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v12
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v4
; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, -v4, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v5, v13
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v13
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v5
; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, -v5, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v6, v14
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v14
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v6
; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v8, -v6, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v7, v15
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v15
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v7
; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, -v7, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v8i32:
@@ -751,116 +715,100 @@ define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v0, v16
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v16, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v1, v17
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v17
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v16, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v16, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v2, v18
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v18
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v16, -v2, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v3, v19
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v19
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v16, v3, vcc
-; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v4, v20
-; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
-; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v4
-; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v16, v4, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v16, -v3, s[4:5]
; GFX6-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v4, v20
+; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
+; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v4
+; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v17
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v17, -v4, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v5, v21
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v21
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v5
; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v5, v17, v5, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v17, -v5, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v6, v22
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v22
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v6
; GFX6-NEXT: v_ashrrev_i32_e32 v6, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v6, v17, v6, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v17, -v6, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v7, v23
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v23
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v7
; GFX6-NEXT: v_ashrrev_i32_e32 v7, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v7, v17, v7, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v17, -v7, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v8, v24
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v24
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v8
; GFX6-NEXT: v_ashrrev_i32_e32 v8, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v8, 0x80000000, v8
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v8, v17, v8, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v8, v17, -v8, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v9, v25
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v25
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v9
; GFX6-NEXT: v_ashrrev_i32_e32 v9, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v9, 0x80000000, v9
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v9, v17, v9, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v9, v17, -v9, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v10, v26
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v26
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v10
; GFX6-NEXT: v_ashrrev_i32_e32 v10, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v10, 0x80000000, v10
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v10, v17, v10, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v10, v17, -v10, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v11, v27
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v27
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v11
; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v11, 0x80000000, v11
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v11, v17, -v11, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v12, v28
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v28
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v12
; GFX6-NEXT: v_ashrrev_i32_e32 v12, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v12, 0x80000000, v12
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v12, v17, v12, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v12, v17, -v12, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v13, v29
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v29
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v13
; GFX6-NEXT: v_ashrrev_i32_e32 v13, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v13, 0x80000000, v13
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v13, v17, v13, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v13, v17, -v13, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v14, v30
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v30
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v14
; GFX6-NEXT: v_ashrrev_i32_e32 v14, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v14, 0x80000000, v14
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v14, v17, v14, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v14, v17, -v14, s[4:5]
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v16
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v15, v16
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v15
; GFX6-NEXT: v_ashrrev_i32_e32 v15, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v15, 0x80000000, v15
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v15, v16, -v15, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v16i32:
@@ -870,116 +818,100 @@ define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v0, v16
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v16, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v1, v17
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v17
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v16, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v2, v18
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v18
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v16, -v2, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v3, v19
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v19
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v3, vcc
-; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v4, v20
-; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
-; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v4
-; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v4, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v16, -v3, s[4:5]
; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v4, v20
+; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
+; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v4
+; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v17
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v17, -v4, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v5, v21
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v21
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v5
; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v17, v5, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v17, -v5, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v6, v22
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v22
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v6
; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v17, v6, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v17, -v6, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v7, v23
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v23
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v7
; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v17, v7, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v17, -v7, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v8, v24
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v24
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v8
; GFX8-NEXT: v_ashrrev_i32_e32 v8, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v8, 0x80000000, v8
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v8, v17, v8, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v17, -v8, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v9, v25
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v25
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v9
; GFX8-NEXT: v_ashrrev_i32_e32 v9, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v9, 0x80000000, v9
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v9, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v17, -v9, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v10, v26
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v26
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v10
; GFX8-NEXT: v_ashrrev_i32_e32 v10, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v10, 0x80000000, v10
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v10, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v17, -v10, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v11, v27
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v27
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v11
; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v11, 0x80000000, v11
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v17, -v11, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v12, v28
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v28
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v12
; GFX8-NEXT: v_ashrrev_i32_e32 v12, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v12, 0x80000000, v12
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v12, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v12, v17, -v12, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v13, v29
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v29
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v13
; GFX8-NEXT: v_ashrrev_i32_e32 v13, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v13, 0x80000000, v13
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v13, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v13, v17, -v13, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v14, v30
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v30
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v14
; GFX8-NEXT: v_ashrrev_i32_e32 v14, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v14, 0x80000000, v14
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v14, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v14, v17, -v14, s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v16
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v15, v16
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v15
; GFX8-NEXT: v_ashrrev_i32_e32 v15, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v15, 0x80000000, v15
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v15, v16, -v15, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v16i32:
@@ -1066,8 +998,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_i64:
@@ -1080,8 +1011,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_i64:
@@ -1094,8 +1024,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_ssubsat_i64:
@@ -1104,12 +1033,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2
; GFX10-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[2:3]
-; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_ssubsat_i64:
@@ -1118,11 +1046,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2
; GFX11-NEXT: v_sub_co_ci_u32_e64 v5, null, v1, v3, vcc_lo
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[2:3]
-; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll b/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll
deleted file mode 100644
index 726e35d..0000000
--- a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GCN %s
-
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32)
-
-
-define amdgpu_kernel void @test_isel_single_lane(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test_isel_single_lane:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b32 s4, s[0:1], 0x58
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GCN-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_readfirstlane_b32 s0, v1
-; GCN-NEXT: s_addk_co_i32 s0, 0xf4
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GCN-NEXT: s_lshl_b32 s1, s0, 4
-; GCN-NEXT: s_mul_i32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GCN-NEXT: s_lshl_b32 s0, s0, 12
-; GCN-NEXT: s_sub_co_i32 s0, s1, s0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_mov_b32_e32 v1, s0
-; GCN-NEXT: global_store_b32 v0, v1, s[2:3]
-; GCN-NEXT: s_endpgm
- %gep0 = getelementptr i32, ptr addrspace(1) %in, i32 22
- %val0 = load i32, ptr addrspace(1) %gep0, align 4
- %gep1 = getelementptr i32, ptr addrspace(1) %in, i32 4
- %val1 = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr addrspace(1) %gep1, i32 %val0)
- %res0 = add i32 %val1, 244
- %res1 = shl i32 %res0, 4
- %res2 = mul i32 %res0, %res1
- %res3 = shl i32 %res2, 12
- %res4 = sub i32 %res1, %res3
- store i32 %res4, ptr addrspace(1) %out
- ret void
-}
-
-
-attributes #0 = {
- "amdgpu-flat-work-group-size"="1,1"
- "amdgpu-waves-per-eu"="1,1"
- "uniform-work-group-size"="true"
-}
diff --git a/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll b/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll
index 76c331c..e2ef60b 100644
--- a/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll
@@ -1,6 +1,9 @@
-; RUN: llc -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s
; This compute shader has input args that claim that it has 17 sgprs and 5 vgprs
; in wave dispatch. Ensure that the sgpr and vgpr counts in COMPUTE_PGM_RSRC1
@@ -17,7 +20,7 @@
; GCN-NEXT: .scratch_memory_size: 0
; SI-NEXT: .sgpr_count: 0x11
; VI-NEXT: .sgpr_count: 0x60
-; GFX9-NEXT: .sgpr_count: 0x11
+; GFX9-NEXT: .sgpr_count: 0x15
; SI-NEXT: .vgpr_count: 0x5
; VI-NEXT: .vgpr_count: 0x5
; GFX9-NEXT: .vgpr_count: 0x5
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.mir b/llvm/test/CodeGen/AMDGPU/wqm.mir
index 350b233..ceb1b3e 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.mir
+++ b/llvm/test/CodeGen/AMDGPU/wqm.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-wqm -o - %s | FileCheck %s
# RUN: llc -mtriple=amdgcn -mcpu=fiji -passes=si-wqm -o - %s | FileCheck %s
@@ -46,10 +47,6 @@
---
# Check for awareness that s_or_saveexec_b64 clobbers SCC
-#
-#CHECK: ENTER_STRICT_WWM
-#CHECK: S_CMP_LT_I32
-#CHECK: S_CSELECT_B32
name: test_strict_wwm_scc
alignment: 1
exposesReturnsTwice: false
@@ -80,6 +77,21 @@ body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+ ; CHECK-LABEL: name: test_strict_wwm_scc
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0
%3 = COPY $vgpr0
%2 = COPY $sgpr2
%1 = COPY $sgpr1
@@ -96,16 +108,35 @@ body: |
---
# Second test for awareness that s_or_saveexec_b64 clobbers SCC
# Because entry block is treated differently.
-#
-#CHECK: %bb.1
-#CHECK: S_CMP_LT_I32
-#CHECK: COPY $scc
-#CHECK: ENTER_STRICT_WWM
-#CHECK: $scc = COPY
-#CHECK: S_CSELECT_B32
name: test_strict_wwm_scc2
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: test_strict_wwm_scc2
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY]], [[DEF]], 0, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM1:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: $scc = COPY [[COPY4]]
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM1]]
+ ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec
+ ; CHECK-NEXT: $vgpr1 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
@@ -130,7 +161,6 @@ body: |
---
# V_SET_INACTIVE, when its second operand is undef, is replaced by a
# COPY by si-wqm. Ensure the instruction is removed.
-#CHECK-NOT: V_SET_INACTIVE
name: no_cfg
alignment: 1
exposesReturnsTwice: false
@@ -167,6 +197,28 @@ body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-LABEL: name: no_cfg
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3
+ ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:sgpr_128 = COPY [[REG_SEQUENCE]]
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]], implicit $exec, implicit-def $scc
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY8]], [[COPY7]], 323, 12, 15, 0, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: early-clobber %15:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_dpp]], implicit $exec
+ ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %15, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
%3:sgpr_32 = COPY $sgpr3
%2:sgpr_32 = COPY $sgpr2
%1:sgpr_32 = COPY $sgpr1
@@ -189,18 +241,32 @@ body: |
---
# Ensure that strict_wwm is not put around an EXEC copy
-#CHECK-LABEL: name: copy_exec
-#CHECK: %7:sreg_64 = COPY $exec
-#CHECK-NEXT: %13:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
-#CHECK-NEXT: %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-#CHECK-NEXT: $exec = EXIT_STRICT_WWM %13
-#CHECK-NEXT: %9:vgpr_32 = V_MBCNT_LO_U32_B32_e64 %7.sub0, 0, implicit $exec
name: copy_exec
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-LABEL: name: copy_exec
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: dead [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]].sub0, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_MBCNT_LO_U32_B32_e64_]], 312, 15, 15, 0, implicit $exec
+ ; CHECK-NEXT: dead [[V_READLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READLANE_B32 [[V_MOV_B32_dpp]], 63
+ ; CHECK-NEXT: early-clobber %12:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_e32_]], implicit $exec
+ ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %12, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
%3:sgpr_32 = COPY $sgpr3
%2:sgpr_32 = COPY $sgpr2
%1:sgpr_32 = COPY $sgpr1
@@ -224,20 +290,48 @@ body: |
---
# Check exit of WQM is still inserted correctly when SCC is live until block end.
# Critially this tests that compilation does not fail.
-#CHECK-LABEL: name: scc_always_live
-#CHECK: %8:vreg_128 = IMAGE_SAMPLE_V4_V2 %7
-#CHECK-NEXT: S_CMP_EQ_U32 %2, 0, implicit-def $scc
-#CHECK-NEXT: undef %9.sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64
-#CHECK-NEXT: %9.sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32
-#CHECK-NEXT: %14:sreg_32_xm0 = COPY $scc
-#CHECK-NEXT: $exec = S_AND_B64 $exec, %13, implicit-def $scc
-#CHECK-NEXT: $scc = COPY %14
-#CHECK-NEXT: %10:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64
-#CHECK-NEXT: %11:vreg_128 = IMAGE_SAMPLE_V4_V2
-#CHECK-NEXT: S_CBRANCH_SCC0 %bb.2
name: scc_always_live
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: scc_always_live
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec
+ ; CHECK-NEXT: $m0 = COPY $sgpr1
+ ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[V_INTERP_P1_F32_:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY1]], 3, 2, implicit $mode, implicit $m0, implicit $exec
+ ; CHECK-NEXT: [[V_INTERP_P1_F32_1:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY2]], 3, 2, implicit $mode, implicit $m0, implicit $exec
+ ; CHECK-NEXT: undef [[COPY4:%[0-9]+]].sub0:vreg_64 = COPY [[V_INTERP_P1_F32_]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]].sub1:vreg_64 = COPY [[V_INTERP_P1_F32_1]]
+ ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY4]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
+ ; CHECK-NEXT: S_CMP_EQ_U32 [[COPY3]], 0, implicit-def $scc
+ ; CHECK-NEXT: undef [[V_ADD_F32_e64_:%[0-9]+]].sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[IMAGE_SAMPLE_V4_V2_]].sub0, 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]].sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 [[V_INTERP_P1_F32_]], [[V_INTERP_P1_F32_1]], implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY $scc
+ ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc
+ ; CHECK-NEXT: $scc = COPY [[COPY5]]
+ ; CHECK-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[V_INTERP_P1_F32_]], 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_1:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_ADD_F32_e64_]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
+ ; CHECK-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[V_ADD_F32_e64_1]], [[DEF1]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0
+ ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1
+ ; CHECK-NEXT: $vgpr2 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub0
+ ; CHECK-NEXT: $vgpr3 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub1
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1, $vgpr2, $vgpr3
bb.0:
liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2
@@ -281,18 +375,26 @@ body: |
---
# Check that unnecessary instruction do not get marked for WWM
#
-#CHECK-NOT: ENTER_STRICT_WWM
-#CHECK: BUFFER_LOAD_DWORDX2
-#CHECK: ENTER_STRICT_WWM
-#CHECK: V_SET_INACTIVE_B32
-#CHECK: V_SET_INACTIVE_B32
-#CHECK-NOT: ENTER_STRICT_WWM
-#CHECK: V_MAX
name: test_wwm_set_inactive_propagation
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; CHECK-LABEL: name: test_wwm_set_inactive_propagation
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64_xexec = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub0:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub1:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc
+ ; CHECK-NEXT: [[V_MAX_F64_e64_:%[0-9]+]]:vreg_64 = nnan nsz arcp contract reassoc nofpexcept V_MAX_F64_e64 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr1 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub1, implicit $exec
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:vgpr_32 = COPY $vgpr0
%2:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN %1:vgpr_32, %0:sgpr_128, 0, 0, 0, 0, implicit $exec
@@ -308,15 +410,46 @@ body: |
---
# Check that WQM marking occurs correctly through phi nodes in live range graph.
# If not then initial V_MOV will not be in WQM.
-#
-#CHECK-LABEL: name: test_wqm_lr_phi
-#CHECK: COPY $exec
-#CHECK-NEXT: S_WQM
-#CHECK-NEXT: V_MOV_B32_e32 -10
-#CHECK-NEXT: V_MOV_B32_e32 0
name: test_wqm_lr_phi
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: test_wqm_lr_phi
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec
+ ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc
+ ; CHECK-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: [[S_GETPC_B64_:%[0-9]+]]:sreg_64 = S_GETPC_B64
+ ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[S_GETPC_B64_]], 32, 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $vcc = V_CMP_LT_U32_e64 4, 4, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX8_IMM]], [[DEF]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7)
+ ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0
+ ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1
bb.0:
undef %0.sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec
%0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
@@ -345,14 +478,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_cs
-#CHECK-NOT: S_WQM
name: no_wqm_in_cs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_cs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -362,14 +501,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_es
-#CHECK-NOT: S_WQM
name: no_wqm_in_es
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_es
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -379,14 +524,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_gs
-#CHECK-NOT: S_WQM
name: no_wqm_in_gs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_gs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -396,14 +547,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_hs
-#CHECK-NOT: S_WQM
name: no_wqm_in_hs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_hs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -413,14 +570,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_ls
-#CHECK-NOT: S_WQM
name: no_wqm_in_ls
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_ls
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -430,14 +593,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_vs
-#CHECK-NOT: S_WQM
name: no_wqm_in_vs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_vs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/ARM/scmp.ll b/llvm/test/CodeGen/ARM/scmp.ll
index 6e493c9..9189aee 100644
--- a/llvm/test/CodeGen/ARM/scmp.ll
+++ b/llvm/test/CodeGen/ARM/scmp.ll
@@ -4,12 +4,9 @@
define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
; CHECK-LABEL: scmp_8_8:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +15,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
; CHECK-LABEL: scmp_8_16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,12 +26,9 @@ define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
define i8 @scmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scmp_8_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i32 %x, i32 %y)
ret i8 %1
@@ -92,17 +83,26 @@ define i8 @scmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @scmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scmp_32_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
ret i32 %1
}
+define i32 @scmp_neg(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: scmp_neg:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: adds r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
+; CHECK-NEXT: bx lr
+ %yy = sub nsw i32 0, %y
+ %1 = call i32 @llvm.scmp(i32 %x, i32 %yy)
+ ret i32 %1
+}
+
define i32 @scmp_32_64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: scmp_32_64:
; CHECK: @ %bb.0:
diff --git a/llvm/test/CodeGen/ARM/ucmp.ll b/llvm/test/CodeGen/ARM/ucmp.ll
index ad4af53..bb02014 100644
--- a/llvm/test/CodeGen/ARM/ucmp.ll
+++ b/llvm/test/CodeGen/ARM/ucmp.ll
@@ -4,12 +4,9 @@
define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_8:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +15,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,12 +26,9 @@ define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_8_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
@@ -92,12 +83,9 @@ define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_32_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
diff --git a/llvm/test/CodeGen/AVR/bug-143247.ll b/llvm/test/CodeGen/AVR/bug-143247.ll
index 07c4c65..d449327 100644
--- a/llvm/test/CodeGen/AVR/bug-143247.ll
+++ b/llvm/test/CodeGen/AVR/bug-143247.ll
@@ -8,18 +8,18 @@ define void @complex_sbi() {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: push r16
; CHECK-NEXT: push r17
-; CHECK-NEXT: ldi r24, 0
+; CHECK-NEXT: ldi r24, 1
; CHECK-NEXT: ldi r25, 0
; CHECK-NEXT: .LBB0_1: ; %while.cond
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sbi 1, 7
-; CHECK-NEXT: adiw r24, 1
; CHECK-NEXT: movw r16, r24
; CHECK-NEXT: andi r24, 15
; CHECK-NEXT: andi r25, 0
; CHECK-NEXT: adiw r24, 1
; CHECK-NEXT: call nil
; CHECK-NEXT: movw r24, r16
+; CHECK-NEXT: adiw r24, 1
; CHECK-NEXT: rjmp .LBB0_1
entry:
br label %while.cond
diff --git a/llvm/test/CodeGen/AVR/cmp.ll b/llvm/test/CodeGen/AVR/cmp.ll
index efc9b8d..c932bda1 100644
--- a/llvm/test/CodeGen/AVR/cmp.ll
+++ b/llvm/test/CodeGen/AVR/cmp.ll
@@ -298,3 +298,18 @@ define i16 @cmp_i16_gt_1023(i16 %0) {
%3 = zext i1 %2 to i16
ret i16 %3
}
+
+define void @cmp_issue152097(i16 %a) addrspace(1) {
+; See: https://github.com/llvm/llvm-project/issues/152097
+; CHECK-LABEL: cmp_issue152097
+; CHECK: ldi r18, -1
+; CHECK-NEXT: cpi r24, -2
+; CHECK-NEXT: cpc r25, r18
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i16 -2, %a
+ br i1 %cmp, label %if.then, label %if.else
+if.then:
+ ret void
+if.else:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AVR/half.ll b/llvm/test/CodeGen/AVR/half.ll
new file mode 100644
index 0000000..c922293
--- /dev/null
+++ b/llvm/test/CodeGen/AVR/half.ll
@@ -0,0 +1,534 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -o - -mtriple=avr | FileCheck %s
+
+; Tests for various operations on half precison float. Much of the test is
+; copied from test/CodeGen/X86/half.ll.
+
+define void @store(half %x, ptr %p) nounwind {
+; CHECK-LABEL: store:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r22
+; CHECK-NEXT: mov r31, r23
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: ret
+ store half %x, ptr %p
+ ret void
+}
+
+define half @return(ptr %p) nounwind {
+; CHECK-LABEL: return:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: ret
+ %r = load half, ptr %p
+ ret half %r
+}
+
+define dso_local double @loadd(ptr nocapture readonly %a) local_unnamed_addr nounwind {
+; CHECK-LABEL: loadd:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ldd r24, Z+2
+; CHECK-NEXT: ldd r25, Z+3
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: rcall __extendsfdf2
+; CHECK-NEXT: ret
+entry:
+ %arrayidx = getelementptr inbounds i16, ptr %a, i64 1
+ %0 = load i16, ptr %arrayidx, align 2
+ %1 = tail call double @llvm.convert.from.fp16.f64(i16 %0)
+ ret double %1
+}
+
+define dso_local float @loadf(ptr nocapture readonly %a) local_unnamed_addr nounwind {
+; CHECK-LABEL: loadf:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ldd r24, Z+2
+; CHECK-NEXT: ldd r25, Z+3
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: ret
+entry:
+ %arrayidx = getelementptr inbounds i16, ptr %a, i64 1
+ %0 = load i16, ptr %arrayidx, align 2
+ %1 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
+ ret float %1
+}
+
+define dso_local void @stored(ptr nocapture %a, double %b) local_unnamed_addr nounwind {
+; CHECK-LABEL: stored:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: mov r30, r22
+; CHECK-NEXT: mov r31, r23
+; CHECK-NEXT: mov r22, r20
+; CHECK-NEXT: mov r23, r21
+; CHECK-NEXT: mov r20, r18
+; CHECK-NEXT: mov r21, r19
+; CHECK-NEXT: mov r18, r16
+; CHECK-NEXT: mov r19, r17
+; CHECK-NEXT: mov r16, r24
+; CHECK-NEXT: mov r17, r25
+; CHECK-NEXT: mov r24, r30
+; CHECK-NEXT: mov r25, r31
+; CHECK-NEXT: rcall __truncdfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i16 @llvm.convert.to.fp16.f64(double %b)
+ store i16 %0, ptr %a, align 2
+ ret void
+}
+
+define dso_local void @storef(ptr nocapture %a, float %b) local_unnamed_addr nounwind {
+; CHECK-LABEL: storef:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: mov r18, r22
+; CHECK-NEXT: mov r19, r23
+; CHECK-NEXT: mov r16, r24
+; CHECK-NEXT: mov r17, r25
+; CHECK-NEXT: mov r22, r20
+; CHECK-NEXT: mov r23, r21
+; CHECK-NEXT: mov r24, r18
+; CHECK-NEXT: mov r25, r19
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i16 @llvm.convert.to.fp16.f32(float %b)
+ store i16 %0, ptr %a, align 2
+ ret void
+}
+
+define void @test_load_store(ptr %in, ptr %out) nounwind {
+; CHECK-LABEL: test_load_store:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: mov r30, r22
+; CHECK-NEXT: mov r31, r23
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: ret
+ %val = load half, ptr %in
+ store half %val, ptr %out
+ ret void
+}
+
+define i16 @test_bitcast_from_half(ptr %addr) nounwind {
+; CHECK-LABEL: test_bitcast_from_half:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: ret
+ %val = load half, ptr %addr
+ %val_int = bitcast half %val to i16
+ ret i16 %val_int
+}
+
+define void @test_bitcast_to_half(ptr %addr, i16 %in) nounwind {
+; CHECK-LABEL: test_bitcast_to_half:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: std Z+1, r23
+; CHECK-NEXT: st Z, r22
+; CHECK-NEXT: ret
+ %val_fp = bitcast i16 %in to half
+ store half %val_fp, ptr %addr
+ ret void
+}
+
+define half @from_bits(i16 %x) nounwind {
+; CHECK-LABEL: from_bits:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: ret
+ %res = bitcast i16 %x to half
+ ret half %res
+}
+
+define i16 @to_bits(half %x) nounwind {
+; CHECK-LABEL: to_bits:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: ret
+ %res = bitcast half %x to i16
+ ret i16 %res
+}
+
+define float @test_extend32(ptr %addr) nounwind {
+; CHECK-LABEL: test_extend32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: ret
+ %val16 = load half, ptr %addr
+ %val32 = fpext half %val16 to float
+ ret float %val32
+}
+
+define double @test_extend64(ptr %addr) nounwind {
+; CHECK-LABEL: test_extend64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: rcall __extendsfdf2
+; CHECK-NEXT: ret
+ %val16 = load half, ptr %addr
+ %val32 = fpext half %val16 to double
+ ret double %val32
+}
+
+define void @test_trunc32(float %in, ptr %addr) nounwind {
+; CHECK-LABEL: test_trunc32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: mov r16, r20
+; CHECK-NEXT: mov r17, r21
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: ret
+ %val16 = fptrunc float %in to half
+ store half %val16, ptr %addr
+ ret void
+}
+
+define void @test_trunc64(double %in, ptr %addr) nounwind {
+; CHECK-LABEL: test_trunc64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: rcall __truncdfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: ret
+ %val16 = fptrunc double %in to half
+ store half %val16, ptr %addr
+ ret void
+}
+
+define i64 @test_fptosi_i64(ptr %p) nounwind {
+; CHECK-LABEL: test_fptosi_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: rcall __fixsfdi
+; CHECK-NEXT: ret
+ %a = load half, ptr %p, align 2
+ %r = fptosi half %a to i64
+ ret i64 %r
+}
+
+define void @test_sitofp_i64(i64 %a, ptr %p) nounwind {
+; CHECK-LABEL: test_sitofp_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: rcall __floatdisf
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: ret
+ %r = sitofp i64 %a to half
+ store half %r, ptr %p
+ ret void
+}
+
+define i64 @test_fptoui_i64(ptr %p) nounwind {
+; CHECK-LABEL: test_fptoui_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: rcall __fixunssfdi
+; CHECK-NEXT: ret
+ %a = load half, ptr %p, align 2
+ %r = fptoui half %a to i64
+ ret i64 %r
+}
+
+define void @test_uitofp_i64(i64 %a, ptr %p) nounwind {
+; CHECK-LABEL: test_uitofp_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: rcall __floatundisf
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: ret
+ %r = uitofp i64 %a to half
+ store half %r, ptr %p
+ ret void
+}
+
+define <2 x float> @test_extend32_vec2(ptr %p) nounwind {
+; CHECK-LABEL: test_extend32_vec2:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: push r12
+; CHECK-NEXT: push r13
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: mov r12, r30
+; CHECK-NEXT: mov r13, r31
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: mov r16, r22
+; CHECK-NEXT: mov r17, r23
+; CHECK-NEXT: mov r14, r24
+; CHECK-NEXT: mov r15, r25
+; CHECK-NEXT: mov r30, r12
+; CHECK-NEXT: mov r31, r13
+; CHECK-NEXT: ldd r24, Z+2
+; CHECK-NEXT: ldd r25, Z+3
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: mov r18, r16
+; CHECK-NEXT: mov r19, r17
+; CHECK-NEXT: mov r20, r14
+; CHECK-NEXT: mov r21, r15
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: pop r13
+; CHECK-NEXT: pop r12
+; CHECK-NEXT: ret
+ %a = load <2 x half>, ptr %p, align 8
+ %b = fpext <2 x half> %a to <2 x float>
+ ret <2 x float> %b
+}
+
+define <1 x double> @test_extend64_vec1(ptr %p) nounwind {
+; CHECK-LABEL: test_extend64_vec1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: mov r30, r24
+; CHECK-NEXT: mov r31, r25
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: rcall __extendsfdf2
+; CHECK-NEXT: ret
+ %a = load <1 x half>, ptr %p, align 8
+ %b = fpext <1 x half> %a to <1 x double>
+ ret <1 x double> %b
+}
+
+define void @test_trunc32_vec2(<2 x float> %a, ptr %p) nounwind {
+; CHECK-LABEL: test_trunc32_vec2:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: push r12
+; CHECK-NEXT: push r13
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: mov r14, r20
+; CHECK-NEXT: mov r15, r21
+; CHECK-NEXT: mov r12, r18
+; CHECK-NEXT: mov r13, r19
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+3, r25
+; CHECK-NEXT: std Z+2, r24
+; CHECK-NEXT: mov r22, r12
+; CHECK-NEXT: mov r23, r13
+; CHECK-NEXT: mov r24, r14
+; CHECK-NEXT: mov r25, r15
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: pop r13
+; CHECK-NEXT: pop r12
+; CHECK-NEXT: ret
+ %v = fptrunc <2 x float> %a to <2 x half>
+ store <2 x half> %v, ptr %p
+ ret void
+}
+
+define void @test_trunc64_vec1(<1 x double> %a, ptr %p) nounwind {
+; CHECK-LABEL: test_trunc64_vec1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: rcall __truncdfhf2
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: std Z+1, r25
+; CHECK-NEXT: st Z, r24
+; CHECK-NEXT: ret
+ %v = fptrunc <1 x double> %a to <1 x half>
+ store <1 x half> %v, ptr %p
+ ret void
+}
+
+define float @test_sitofp_fadd_i32(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: test_sitofp_fadd_i32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: push r12
+; CHECK-NEXT: push r13
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: mov r16, r20
+; CHECK-NEXT: mov r17, r21
+; CHECK-NEXT: rcall __floatsisf
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r14, r24
+; CHECK-NEXT: mov r15, r25
+; CHECK-NEXT: mov r30, r16
+; CHECK-NEXT: mov r31, r17
+; CHECK-NEXT: ld r24, Z
+; CHECK-NEXT: ldd r25, Z+1
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: mov r16, r22
+; CHECK-NEXT: mov r17, r23
+; CHECK-NEXT: mov r12, r24
+; CHECK-NEXT: mov r13, r25
+; CHECK-NEXT: mov r24, r14
+; CHECK-NEXT: mov r25, r15
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: mov r18, r22
+; CHECK-NEXT: mov r19, r23
+; CHECK-NEXT: mov r20, r24
+; CHECK-NEXT: mov r21, r25
+; CHECK-NEXT: mov r22, r16
+; CHECK-NEXT: mov r23, r17
+; CHECK-NEXT: mov r24, r12
+; CHECK-NEXT: mov r25, r13
+; CHECK-NEXT: rcall __addsf3
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: pop r13
+; CHECK-NEXT: pop r12
+; CHECK-NEXT: ret
+ %tmp0 = load half, ptr %b
+ %tmp1 = sitofp i32 %a to half
+ %tmp2 = fadd half %tmp0, %tmp1
+ %tmp3 = fpext half %tmp2 to float
+ ret float %tmp3
+}
+
+define half @chained_fp_ops(half %x) {
+; CHECK-LABEL: chained_fp_ops:
+; CHECK: ; %bb.0: ; %start
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: mov r18, r22
+; CHECK-NEXT: mov r19, r23
+; CHECK-NEXT: mov r20, r24
+; CHECK-NEXT: mov r21, r25
+; CHECK-NEXT: rcall __addsf3
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: ldi r18, 0
+; CHECK-NEXT: ldi r19, 0
+; CHECK-NEXT: ldi r20, 0
+; CHECK-NEXT: ldi r21, 63
+; CHECK-NEXT: rcall __mulsf3
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: ret
+start:
+ %y = fmul half %x, 0xH4000
+ %z = fdiv half %y, 0xH4000
+ ret half %z
+}
+
+define half @test_select_cc(half) nounwind {
+; CHECK-LABEL: test_select_cc:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: rcall __extendhfsf2
+; CHECK-NEXT: ldi r16, 0
+; CHECK-NEXT: ldi r17, 0
+; CHECK-NEXT: mov r18, r16
+; CHECK-NEXT: mov r19, r17
+; CHECK-NEXT: mov r20, r16
+; CHECK-NEXT: mov r21, r17
+; CHECK-NEXT: rcall __nesf2
+; CHECK-NEXT: cpi r24, 0
+; CHECK-NEXT: breq .LBB25_2
+; CHECK-NEXT: ; %bb.1:
+; CHECK-NEXT: ldi r16, 0
+; CHECK-NEXT: ldi r17, 60
+; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: mov r24, r16
+; CHECK-NEXT: mov r25, r17
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: ret
+ %2 = fcmp une half %0, 0xH0000
+ %3 = uitofp i1 %2 to half
+ ret half %3
+}
+
+define half @fabs(half %x) nounwind {
+; CHECK-LABEL: fabs:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: andi r25, 127
+; CHECK-NEXT: ret
+ %a = call half @llvm.fabs.f16(half %x)
+ ret half %a
+}
+
+define half @fcopysign(half %x, half %y) nounwind {
+; CHECK-LABEL: fcopysign:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: andi r22, 0
+; CHECK-NEXT: andi r23, 128
+; CHECK-NEXT: andi r25, 127
+; CHECK-NEXT: or r24, r22
+; CHECK-NEXT: or r25, r23
+; CHECK-NEXT: ret
+ %a = call half @llvm.copysign.f16(half %x, half %y)
+ ret half %a
+}
diff --git a/llvm/test/CodeGen/AVR/issue-151080.ll b/llvm/test/CodeGen/AVR/issue-151080.ll
new file mode 100644
index 0000000..9829224
--- /dev/null
+++ b/llvm/test/CodeGen/AVR/issue-151080.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -O=3 -mtriple=avr-none -mcpu=attiny85 -verify-machineinstrs | FileCheck %s
+
+declare dso_local void @foo(i16 noundef) addrspace(1)
+@ci = dso_local global [30 x i16] zeroinitializer, align 1
+define void @loopreduce() {
+; CHECK-LABEL: loopreduce:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: push r16
+; CHECK-NEXT: push r17
+; CHECK-NEXT: ldi r26, lo8(ci)
+; CHECK-NEXT: ldi r27, hi8(ci)
+; CHECK-NEXT: ldi r16, lo8(ci+60)
+; CHECK-NEXT: ldi r17, hi8(ci+60)
+; CHECK-NEXT: .LBB0_1: ; %for.body
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ld r24, X+
+; CHECK-NEXT: ld r25, X+
+; CHECK-NEXT: movw r14, r26
+; CHECK-NEXT: rcall foo
+; CHECK-NEXT: movw r26, r14
+; CHECK-NEXT: cp r26, r16
+; CHECK-NEXT: cpc r27, r17
+; CHECK-NEXT: brne .LBB0_1
+; CHECK-NEXT: ; %bb.2: ; %for.cond.cleanup
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r16
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: ret
+entry:
+ br label %for.body
+for.body: ; preds = %entry, %for.body
+ %i.03 = phi i16 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds nuw [30 x i16], ptr @ci, i16 0, i16 %i.03
+ %0 = load i16, ptr %arrayidx, align 1
+ tail call addrspace(1) void @foo(i16 noundef %0)
+ %inc = add nuw nsw i16 %i.03, 1
+ %exitcond.not = icmp eq i16 %inc, 30
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+; Exit blocks
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @indvar() {
+; CHECK-LABEL: indvar:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: push r12
+; CHECK-NEXT: push r13
+; CHECK-NEXT: push r14
+; CHECK-NEXT: push r15
+; CHECK-NEXT: push r17
+; CHECK-NEXT: ldi r24, 8
+; CHECK-NEXT: ldi r25, 0
+; CHECK-NEXT: movw r14, r24
+; CHECK-NEXT: ldi r24, 1
+; CHECK-NEXT: ldi r25, 0
+; CHECK-NEXT: movw r12, r24
+; CHECK-NEXT: ldi r17, 3
+; CHECK-NEXT: .LBB1_1: ; %for.body
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movw r24, r12
+; CHECK-NEXT: rcall foo
+; CHECK-NEXT: movw r22, r14
+; CHECK-NEXT: movw r24, r22
+; CHECK-NEXT: rcall __mulhi3
+; CHECK-NEXT: movw r30, r14
+; CHECK-NEXT: adiw r30, 1
+; CHECK-NEXT: movw r14, r30
+; CHECK-NEXT: cpi r24, -24
+; CHECK-NEXT: cpc r25, r17
+; CHECK-NEXT: brlo .LBB1_1
+; CHECK-NEXT: ; %bb.2: ; %for.cond.cleanup
+; CHECK-NEXT: pop r17
+; CHECK-NEXT: pop r15
+; CHECK-NEXT: pop r14
+; CHECK-NEXT: pop r13
+; CHECK-NEXT: pop r12
+; CHECK-NEXT: ret
+entry:
+ br label %for.body
+for.body: ; preds = %entry, %for.body
+ %i.03 = phi i16 [ 7, %entry ], [ %inc, %for.body ]
+ tail call addrspace(1) void @foo(i16 noundef 1)
+ %inc = add nuw nsw i16 %i.03, 1
+ %mul = mul nuw nsw i16 %inc, %inc
+ %cmp = icmp samesign ult i16 %mul, 1000
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/AVR/llvm.sincos.ll b/llvm/test/CodeGen/AVR/llvm.sincos.ll
index b70b8d3..ff01da9 100644
--- a/llvm/test/CodeGen/AVR/llvm.sincos.ll
+++ b/llvm/test/CodeGen/AVR/llvm.sincos.ll
@@ -11,8 +11,6 @@ define { half, half } @test_sincos_f16(half %a) #0 {
; CHECK-NEXT: push r15
; CHECK-NEXT: push r16
; CHECK-NEXT: push r17
-; CHECK-NEXT: mov r24, r22
-; CHECK-NEXT: mov r25, r23
; CHECK-NEXT: rcall __extendhfsf2
; CHECK-NEXT: mov r16, r22
; CHECK-NEXT: mov r17, r23
@@ -28,10 +26,8 @@ define { half, half } @test_sincos_f16(half %a) #0 {
; CHECK-NEXT: mov r25, r15
; CHECK-NEXT: rcall cos
; CHECK-NEXT: rcall __truncsfhf2
-; CHECK-NEXT: mov r22, r24
-; CHECK-NEXT: mov r23, r25
-; CHECK-NEXT: mov r18, r12
-; CHECK-NEXT: mov r19, r13
+; CHECK-NEXT: mov r22, r12
+; CHECK-NEXT: mov r23, r13
; CHECK-NEXT: pop r17
; CHECK-NEXT: pop r16
; CHECK-NEXT: pop r15
@@ -46,13 +42,9 @@ define { half, half } @test_sincos_f16(half %a) #0 {
define half @test_sincos_f16_only_use_sin(half %a) #0 {
; CHECK-LABEL: test_sincos_f16_only_use_sin:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov r24, r22
-; CHECK-NEXT: mov r25, r23
; CHECK-NEXT: rcall __extendhfsf2
; CHECK-NEXT: rcall sin
; CHECK-NEXT: rcall __truncsfhf2
-; CHECK-NEXT: mov r22, r24
-; CHECK-NEXT: mov r23, r25
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincos.f16(half %a)
%result.0 = extractvalue { half, half } %result, 0
@@ -62,13 +54,9 @@ define half @test_sincos_f16_only_use_sin(half %a) #0 {
define half @test_sincos_f16_only_use_cos(half %a) #0 {
; CHECK-LABEL: test_sincos_f16_only_use_cos:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov r24, r22
-; CHECK-NEXT: mov r25, r23
; CHECK-NEXT: rcall __extendhfsf2
; CHECK-NEXT: rcall cos
; CHECK-NEXT: rcall __truncsfhf2
-; CHECK-NEXT: mov r22, r24
-; CHECK-NEXT: mov r23, r25
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincos.f16(half %a)
%result.1 = extractvalue { half, half } %result, 1
@@ -90,48 +78,50 @@ define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) #0 {
; CHECK-NEXT: push r15
; CHECK-NEXT: push r16
; CHECK-NEXT: push r17
-; CHECK-NEXT: mov r10, r22
-; CHECK-NEXT: mov r11, r23
+; CHECK-NEXT: mov r16, r24
+; CHECK-NEXT: mov r17, r25
+; CHECK-NEXT: mov r24, r22
+; CHECK-NEXT: mov r25, r23
; CHECK-NEXT: rcall __extendhfsf2
-; CHECK-NEXT: mov r16, r22
-; CHECK-NEXT: mov r17, r23
-; CHECK-NEXT: mov r14, r24
-; CHECK-NEXT: mov r15, r25
-; CHECK-NEXT: rcall sin
-; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r14, r22
+; CHECK-NEXT: mov r15, r23
; CHECK-NEXT: mov r12, r24
; CHECK-NEXT: mov r13, r25
-; CHECK-NEXT: mov r24, r10
-; CHECK-NEXT: mov r25, r11
+; CHECK-NEXT: rcall sin
+; CHECK-NEXT: rcall __truncsfhf2
+; CHECK-NEXT: mov r10, r24
+; CHECK-NEXT: mov r11, r25
+; CHECK-NEXT: mov r24, r16
+; CHECK-NEXT: mov r25, r17
; CHECK-NEXT: rcall __extendhfsf2
-; CHECK-NEXT: mov r10, r22
-; CHECK-NEXT: mov r11, r23
+; CHECK-NEXT: mov r16, r22
+; CHECK-NEXT: mov r17, r23
; CHECK-NEXT: mov r8, r24
; CHECK-NEXT: mov r9, r25
-; CHECK-NEXT: rcall cos
+; CHECK-NEXT: rcall sin
; CHECK-NEXT: rcall __truncsfhf2
; CHECK-NEXT: mov r6, r24
; CHECK-NEXT: mov r7, r25
-; CHECK-NEXT: mov r22, r10
-; CHECK-NEXT: mov r23, r11
-; CHECK-NEXT: mov r24, r8
-; CHECK-NEXT: mov r25, r9
-; CHECK-NEXT: rcall sin
+; CHECK-NEXT: mov r22, r14
+; CHECK-NEXT: mov r23, r15
+; CHECK-NEXT: mov r24, r12
+; CHECK-NEXT: mov r25, r13
+; CHECK-NEXT: rcall cos
; CHECK-NEXT: rcall __truncsfhf2
-; CHECK-NEXT: mov r10, r24
-; CHECK-NEXT: mov r11, r25
+; CHECK-NEXT: mov r14, r24
+; CHECK-NEXT: mov r15, r25
; CHECK-NEXT: mov r22, r16
; CHECK-NEXT: mov r23, r17
-; CHECK-NEXT: mov r24, r14
-; CHECK-NEXT: mov r25, r15
+; CHECK-NEXT: mov r24, r8
+; CHECK-NEXT: mov r25, r9
; CHECK-NEXT: rcall cos
; CHECK-NEXT: rcall __truncsfhf2
; CHECK-NEXT: mov r18, r10
; CHECK-NEXT: mov r19, r11
-; CHECK-NEXT: mov r20, r12
-; CHECK-NEXT: mov r21, r13
-; CHECK-NEXT: mov r22, r6
-; CHECK-NEXT: mov r23, r7
+; CHECK-NEXT: mov r20, r6
+; CHECK-NEXT: mov r21, r7
+; CHECK-NEXT: mov r22, r14
+; CHECK-NEXT: mov r23, r15
; CHECK-NEXT: pop r17
; CHECK-NEXT: pop r16
; CHECK-NEXT: pop r15
diff --git a/llvm/test/CodeGen/AVR/load.ll b/llvm/test/CodeGen/AVR/load.ll
index 5de6b48..6a1e067 100644
--- a/llvm/test/CodeGen/AVR/load.ll
+++ b/llvm/test/CodeGen/AVR/load.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mattr=avr6,sram < %s -mtriple=avr -verify-machineinstrs | FileCheck %s
+; RUN: llc -mattr=avr6,sram < %s -mtriple=avr-none -verify-machineinstrs | FileCheck %s
define i8 @load8(ptr %x) {
; CHECK-LABEL: load8:
@@ -98,9 +98,33 @@ while.end: ; preds = %while.body, %entry
ret i16 %r.0.lcssa
}
+define i16 @load16postincloopreduce(ptr %p, i16 %cnt) {
+; CHECK-LABEL: load16postincloopreduce:
+; CHECK: ld {{.*}}, {{[XYZ]}}+
+; CHECK: ld {{.*}}, {{[XYZ]}}+
+entry:
+ %cmp3 = icmp sgt i16 %cnt, 0
+ br i1 %cmp3, label %for.body, label %for.cond.cleanup
+for.cond.cleanup: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ]
+ ret i16 %sum.0.lcssa
+for.body: ; preds = %entry, %for.body
+ %i.06 = phi i16 [ %inc, %for.body ], [ 0, %entry ]
+ %sum.05 = phi i16 [ %add, %for.body ], [ 0, %entry ]
+ %p.addr.04 = phi ptr [ %incdec.ptr, %for.body ], [ %p, %entry ]
+ %incdec.ptr = getelementptr inbounds nuw i8, ptr %p.addr.04, i16 2
+ %0 = load i16, ptr %p.addr.04, align 1
+ %add = add nsw i16 %0, %sum.05
+ %inc = add nuw nsw i16 %i.06, 1
+ %exitcond.not = icmp eq i16 %inc, %cnt
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
define i8 @load8predec(ptr %x, i8 %y) {
; CHECK-LABEL: load8predec:
-; CHECK: ld {{.*}}, -{{[XYZ]}}
+; TODO: ld {{.*}}, -{{[XYZ]}}
+; CHECK: sbiw r26, 1
+; CHECK: ld {{.*}}, X
entry:
%tobool6 = icmp eq i8 %y, 0
br i1 %tobool6, label %while.end, label %while.body
@@ -121,8 +145,12 @@ while.end: ; preds = %while.body, %entry
define i16 @load16predec(ptr %x, i16 %y) {
; CHECK-LABEL: load16predec:
-; CHECK: ld {{.*}}, -{{[XYZ]}}
-; CHECK: ld {{.*}}, -{{[XYZ]}}
+; TODO: ld {{.*}}, -{{[XYZ]}}
+; TODO: ld {{.*}}, -{{[XYZ]}}
+; CHECK: sbiw r24, 2
+; CHECK: movw r30, r24
+; CHECK: ld {{.*}}, Z
+; CHECK: ldd {{.*}}, Z+1
entry:
%tobool2 = icmp eq i16 %y, 0
br i1 %tobool2, label %while.end, label %while.body
diff --git a/llvm/test/CodeGen/AVR/shift.ll b/llvm/test/CodeGen/AVR/shift.ll
index 9836f93..1bd9b45 100644
--- a/llvm/test/CodeGen/AVR/shift.ll
+++ b/llvm/test/CodeGen/AVR/shift.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=avr -mtriple=avr -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=avr-none -verify-machineinstrs | FileCheck %s
; Optimize for speed.
define i8 @shift_i8_i8_speed(i8 %a, i8 %b) {
diff --git a/llvm/test/CodeGen/AVR/store.ll b/llvm/test/CodeGen/AVR/store.ll
index aab0270..9c41645 100644
--- a/llvm/test/CodeGen/AVR/store.ll
+++ b/llvm/test/CodeGen/AVR/store.ll
@@ -94,7 +94,9 @@ while.end: ; preds = %while.body, %entry
define void @store8predec(ptr %x, i8 %y) {
; CHECK-LABEL: store8predec:
-; CHECK: st -{{[XYZ]}}, {{.*}}
+; TODO: st -{{[XYZ]}}, {{.*}}
+; CHECK: sbiw r26, 1
+; CHECK: st X, {{.*}}
entry:
%tobool3 = icmp eq i8 %y, 0
br i1 %tobool3, label %while.end, label %while.body
@@ -112,8 +114,12 @@ while.end: ; preds = %while.body, %entry
define void @store16predec(ptr %x, i16 %y) {
; CHECK-LABEL: store16predec:
-; CHECK: st -{{[XYZ]}}, {{.*}}
-; CHECK: st -{{[XYZ]}}, {{.*}}
+; TODO: st -{{[XYZ]}}, {{.*}}
+; TODO: st -{{[XYZ]}}, {{.*}}
+; CHECK: sbiw r24, 2
+; CHECK: movw r30, r24
+; CHECK: std Z+1, {{.*}}
+; CHECK: st Z, {{.*}}
entry:
%tobool3 = icmp eq i16 %y, 0
br i1 %tobool3, label %while.end, label %while.body
diff --git a/llvm/test/CodeGen/BPF/loop-exit-cond.ll b/llvm/test/CodeGen/BPF/loop-exit-cond.ll
index 69fe714..fa6a4a0 100644
--- a/llvm/test/CodeGen/BPF/loop-exit-cond.ll
+++ b/llvm/test/CodeGen/BPF/loop-exit-cond.ll
@@ -35,14 +35,14 @@ define dso_local i32 @test(i32 %len, ptr %data) #0 {
; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY:%.*]], label [[IF_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 1, [[ENTRY:%.*]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[D]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[DATA]], align 1, !tbaa [[TBAA3:![0-9]+]]
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP1]], 0
; CHECK-NEXT: [[NARROW:%.*]] = select i1 [[TOBOOL_NOT]], i8 48, i8 [[TMP1]]
; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[NARROW]] to i64
; CHECK-NEXT: store i64 [[CONV2]], ptr [[D]], align 8, !tbaa [[TBAA6:![0-9]+]]
; CHECK-NEXT: call void @foo(ptr nonnull @.str, i32 [[I_05]], ptr nonnull [[D]]) #[[ATTR3]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[D]]) #[[ATTR3]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_05]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -61,7 +61,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i32 1, ptr %i, align 4, !tbaa !3
br label %for.cond
@@ -73,11 +73,11 @@ for.cond: ; preds = %for.inc, %if.then
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 8, ptr %d) #3
+ call void @llvm.lifetime.start.p0(ptr %d) #3
%3 = load ptr, ptr %data.addr, align 8, !tbaa !7
%4 = load i8, ptr %3, align 1, !tbaa !9
%conv = sext i8 %4 to i32
@@ -96,7 +96,7 @@ cond.end: ; preds = %cond.false, %cond.t
store i64 %conv2, ptr %d, align 8, !tbaa !10
%5 = load i32, ptr %i, align 4, !tbaa !3
call void @foo(ptr @.str, i32 %5, ptr %d)
- call void @llvm.lifetime.end.p0(i64 8, ptr %d) #3
+ call void @llvm.lifetime.end.p0(ptr %d) #3
br label %for.inc
for.inc: ; preds = %cond.end
@@ -113,12 +113,12 @@ if.end: ; preds = %for.end, %entry
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare dso_local void @foo(ptr, i32, ptr) #2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/CodeGen/BPF/vla.ll b/llvm/test/CodeGen/BPF/vla.ll
index 9a22769..708b41e 100644
--- a/llvm/test/CodeGen/BPF/vla.ll
+++ b/llvm/test/CodeGen/BPF/vla.ll
@@ -33,17 +33,17 @@ define dso_local i32 @test1() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 8, ptr [[A]], align 4
; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 68, align 1
; CHECK-NEXT: call void @foo(ptr [[VLA]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 0
;
entry:
%a = alloca i32, align 4
%saved_stack = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 8, ptr %a, align 4
%0 = call ptr @llvm.stacksave()
store ptr %0, ptr %saved_stack, align 8
@@ -51,11 +51,11 @@ entry:
call void @foo(ptr %vla)
%1 = load ptr, ptr %saved_stack, align 8
call void @llvm.stackrestore(ptr %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret i32 0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare ptr @llvm.stacksave()
@@ -63,7 +63,7 @@ declare dso_local void @foo(ptr)
declare void @llvm.stackrestore(ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define dso_local i32 @test2(i32 %b) {
; CHECK-LABEL: @test2(
@@ -73,7 +73,7 @@ define dso_local i32 @test2(i32 %b) {
; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
; CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 8, ptr [[A]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 8, [[TMP1]]
@@ -81,7 +81,7 @@ define dso_local i32 @test2(i32 %b) {
; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP2]], align 1
; CHECK-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8
; CHECK-NEXT: call void @foo(ptr [[VLA]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 0
;
entry:
@@ -90,7 +90,7 @@ entry:
%saved_stack = alloca ptr, align 8
%__vla_expr0 = alloca i64, align 8
store i32 %b, ptr %b.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 8, ptr %a, align 4
%0 = load i32, ptr %b.addr, align 4
%add = add nsw i32 8, %0
@@ -102,6 +102,6 @@ entry:
call void @foo(ptr %vla)
%3 = load ptr, ptr %saved_stack, align 8
call void @llvm.stackrestore(ptr %3)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret i32 0
}
diff --git a/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll b/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll
new file mode 100644
index 0000000..25f81dd
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll
@@ -0,0 +1,35 @@
+; Use llc for this test so that we don't abort after the first error.
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+; Check that there is no overlap with unbounded array in different space
+
+ ; Buffer<double> A[2] : register(t2, space4);
+ ; Buffer<double> B : register(t20, space5); // does not overlap
+ ; Buffer<double> C[] : register(t2, space4); // overlaps with A
+
+; CHECK: error: resource A at register 2 overlaps with resource C at register 2 in space 4
+; CHECK-NOT: error: resource C at register 2 overlaps with resource B at register 20 in space 5
+
+target triple = "dxil-pc-shadermodel6.3-library"
+
+@A.str = private unnamed_addr constant [2 x i8] c"A\00", align 1
+@B.str = private unnamed_addr constant [2 x i8] c"B\00", align 1
+@C.str = private unnamed_addr constant [2 x i8] c"C\00", align 1
+
+define void @test_not_overlapping_in_different_spaces() {
+entry:
+
+ ; Buffer<double> A[2] : register(t2, space4);
+ %h0 = call target("dx.TypedBuffer", double, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 4, i32 2, i32 2, i32 10, i1 false, ptr @A.str)
+
+ ; Buffer<double> B : register(t20, space5);
+ %h1 = call target("dx.TypedBuffer", i64, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 5, i32 20, i32 1, i32 0, i1 false, ptr @B.str)
+
+ ; Buffer<double> C[] : register(t2, space4);
+ %h2 = call target("dx.TypedBuffer", double, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 4, i32 2, i32 -1, i32 10, i1 false, ptr @C.str)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll
index 736c86e..5cf4fe8 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll
@@ -15,16 +15,16 @@ target triple = "dxil-pc-shadermodel6.7-library"
define void @lifetimes() #0 {
%a = alloca [4 x i32], align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %a)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
; Function Attrs: nounwind memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64, ptr) #1
+declare void @llvm.lifetime.start.p0(ptr) #1
; Function Attrs: nounwind memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64, ptr) #1
+declare void @llvm.lifetime.end.p0(ptr) #1
attributes #0 = { convergent norecurse nounwind "hlsl.export"}
attributes #1 = { nounwind memory(argmem: readwrite) }
diff --git a/llvm/test/CodeGen/DirectX/imad.ll b/llvm/test/CodeGen/DirectX/imad.ll
index 5d9463d..2e612f0 100644
--- a/llvm/test/CodeGen/DirectX/imad.ll
+++ b/llvm/test/CodeGen/DirectX/imad.ll
@@ -1,17 +1,13 @@
-; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+; RUN: opt -S -scalarizer -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
-; CHECK:call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
-; CHECK:call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
-
-; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
target triple = "dxil-pc-shadermodel6.7-library"
; Function Attrs: noinline nounwind optnone
define noundef i16 @imad_short(i16 noundef %p0, i16 noundef %p1, i16 noundef %p2) #0 {
entry:
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
%p2.addr = alloca i16, align 2
%p1.addr = alloca i16, align 2
%p0.addr = alloca i16, align 2
@@ -31,6 +27,7 @@ declare i16 @llvm.dx.imad.i16(i16, i16, i16) #1
; Function Attrs: noinline nounwind optnone
define noundef i32 @imad_int(i32 noundef %p0, i32 noundef %p1, i32 noundef %p2) #0 {
entry:
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i32, align 4
%p1.addr = alloca i32, align 4
%p0.addr = alloca i32, align 4
@@ -50,6 +47,7 @@ declare i32 @llvm.dx.imad.i32(i32, i32, i32) #1
; Function Attrs: noinline nounwind optnone
define noundef i64 @imad_int64(i64 noundef %p0, i64 noundef %p1, i64 noundef %p2) #0 {
entry:
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i64, align 8
%p1.addr = alloca i64, align 8
%p0.addr = alloca i64, align 8
@@ -65,3 +63,95 @@ entry:
; Function Attrs: nocallback nofree nosync nounwind willreturn
declare i64 @llvm.dx.imad.i64(i64, i64, i64) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i16> @imad_int16_t4(<4 x i16> noundef %p0, <4 x i16> noundef %p1, <4 x i16> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i16> %p0, i64 0
+ ; CHECK: extractelement <4 x i16> %p1, i64 0
+ ; CHECK: extractelement <4 x i16> %p2, i64 0
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 1
+ ; CHECK: extractelement <4 x i16> %p1, i64 1
+ ; CHECK: extractelement <4 x i16> %p2, i64 1
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 2
+ ; CHECK: extractelement <4 x i16> %p1, i64 2
+ ; CHECK: extractelement <4 x i16> %p2, i64 2
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 3
+ ; CHECK: extractelement <4 x i16> %p1, i64 3
+ ; CHECK: extractelement <4 x i16> %p2, i64 3
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i16> poison, i16 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 3
+ %dx.imad = call <4 x i16> @llvm.dx.imad.v4i16(<4 x i16> %p0, <4 x i16> %p1, <4 x i16> %p2)
+ ret <4 x i16> %dx.imad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i16> @llvm.dx.imad.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i32> @imad_int4(<4 x i32> noundef %p0, <4 x i32> noundef %p1, <4 x i32> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i32> %p0, i64 0
+ ; CHECK: extractelement <4 x i32> %p1, i64 0
+ ; CHECK: extractelement <4 x i32> %p2, i64 0
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 1
+ ; CHECK: extractelement <4 x i32> %p1, i64 1
+ ; CHECK: extractelement <4 x i32> %p2, i64 1
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 2
+ ; CHECK: extractelement <4 x i32> %p1, i64 2
+ ; CHECK: extractelement <4 x i32> %p2, i64 2
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 3
+ ; CHECK: extractelement <4 x i32> %p1, i64 3
+ ; CHECK: extractelement <4 x i32> %p2, i64 3
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 3
+ %dx.imad = call <4 x i32> @llvm.dx.imad.v4i32(<4 x i32> %p0, <4 x i32> %p1, <4 x i32> %p2)
+ ret <4 x i32> %dx.imad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i32> @llvm.dx.imad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i64> @imad_int64_t4(<4 x i64> noundef %p0, <4 x i64> noundef %p1, <4 x i64> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i64> %p0, i64 0
+ ; CHECK: extractelement <4 x i64> %p1, i64 0
+ ; CHECK: extractelement <4 x i64> %p2, i64 0
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 1
+ ; CHECK: extractelement <4 x i64> %p1, i64 1
+ ; CHECK: extractelement <4 x i64> %p2, i64 1
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 2
+ ; CHECK: extractelement <4 x i64> %p1, i64 2
+ ; CHECK: extractelement <4 x i64> %p2, i64 2
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 3
+ ; CHECK: extractelement <4 x i64> %p1, i64 3
+ ; CHECK: extractelement <4 x i64> %p2, i64 3
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 3
+ %dx.imad = call <4 x i64> @llvm.dx.imad.v4i64(<4 x i64> %p0, <4 x i64> %p1, <4 x i64> %p2)
+ ret <4 x i64> %dx.imad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i64> @llvm.dx.imad.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) #1
+
+; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
diff --git a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll
index e485fa2..b1eea30 100644
--- a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll
+++ b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll
@@ -11,9 +11,9 @@
define void @test_legal_lifetime() {
%accum.i.flat = alloca [1 x i32], align 4
%gep = getelementptr i32, ptr %accum.i.flat, i32 0
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat)
+ call void @llvm.lifetime.start.p0(ptr nonnull %accum.i.flat)
store i32 0, ptr %gep, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat)
+ call void @llvm.lifetime.end.p0(ptr nonnull %accum.i.flat)
ret void
}
diff --git a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll
index 77133eb..256fcc0 100644
--- a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll
+++ b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll
@@ -13,12 +13,12 @@
; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[ACCUM_I_FLAT]], i32 0
; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4
-; CHECK-SM66-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
+; CHECK-SM66-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-EMBED-NOT: bitcast
; CHECK-EMBED-NOT: lifetime
; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4
-; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
+; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-EMBED-NOT: bitcast
; CHECK-EMBED-NOT: lifetime
; CHECK-NEXT: ret void
@@ -26,9 +26,9 @@
define void @test_legal_lifetime() {
%accum.i.flat = alloca [1 x i32], align 4
%gep = getelementptr i32, ptr %accum.i.flat, i32 0
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat)
+ call void @llvm.lifetime.start.p0(ptr nonnull %accum.i.flat)
store i32 0, ptr %gep, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat)
+ call void @llvm.lifetime.end.p0(ptr nonnull %accum.i.flat)
ret void
}
diff --git a/llvm/test/CodeGen/DirectX/legalize-memset.ll b/llvm/test/CodeGen/DirectX/legalize-memset.ll
index a73e737..ad45ac6 100644
--- a/llvm/test/CodeGen/DirectX/legalize-memset.ll
+++ b/llvm/test/CodeGen/DirectX/legalize-memset.ll
@@ -5,18 +5,14 @@ define void @replace_float_memset_test() #0 {
; CHECK-LABEL: define void @replace_float_memset_test(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [2 x float], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x float], ptr [[ACCUM_I_FLAT]], i32 0, i32 0
; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP]], align 4
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x float], ptr [[ACCUM_I_FLAT]], i32 0, i32 1
; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: ret void
;
%accum.i.flat = alloca [2 x float], align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %accum.i.flat)
call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 8, i1 false)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %accum.i.flat)
ret void
}
@@ -24,18 +20,14 @@ define void @replace_half_memset_test() #0 {
; CHECK-LABEL: define void @replace_half_memset_test(
; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [2 x half], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x half], ptr [[ACCUM_I_FLAT]], i32 0, i32 0
; CHECK-NEXT: store half 0xH0000, ptr [[GEP]], align 2
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x half], ptr [[ACCUM_I_FLAT]], i32 0, i32 1
; CHECK-NEXT: store half 0xH0000, ptr [[GEP1]], align 2
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: ret void
;
%accum.i.flat = alloca [2 x half], align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat)
call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat)
ret void
}
@@ -43,18 +35,14 @@ define void @replace_double_memset_test() #0 {
; CHECK-LABEL: define void @replace_double_memset_test(
; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [2 x double], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x double], ptr [[ACCUM_I_FLAT]], i32 0, i32 0
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP]], align 8
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x double], ptr [[ACCUM_I_FLAT]], i32 0, i32 1
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP1]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: ret void
;
%accum.i.flat = alloca [2 x double], align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %accum.i.flat)
call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %accum.i.flat)
ret void
}
@@ -62,18 +50,14 @@ define void @replace_int16_memset_test() #0 {
; CHECK-LABEL: define void @replace_int16_memset_test(
; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: [[CACHE_I:%.*]] = alloca [2 x i16], align 2
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[CACHE_I]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i16], ptr [[CACHE_I]], i32 0, i32 0
; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x i16], ptr [[CACHE_I]], i32 0, i32 1
; CHECK-NEXT: store i16 0, ptr [[GEP1]], align 2
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[CACHE_I]])
; CHECK-NEXT: ret void
;
%cache.i = alloca [2 x i16], align 2
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cache.i)
call void @llvm.memset.p0.i32(ptr nonnull align 2 dereferenceable(4) %cache.i, i8 0, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cache.i)
ret void
}
@@ -81,16 +65,12 @@ define void @replace_int_memset_test() #0 {
; CHECK-LABEL: define void @replace_int_memset_test(
; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [1 x i32], ptr [[ACCUM_I_FLAT]], i32 0, i32 0
; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: ret void
;
%accum.i.flat = alloca [1 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat)
call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat)
ret void
}
@@ -101,25 +81,19 @@ define void @replace_int_memset_to_var_test() #0 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 1, ptr [[I]], align 4
; CHECK-NEXT: [[I8_LOAD:%.*]] = load i32, ptr [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [1 x i32], ptr [[ACCUM_I_FLAT]], i32 0, i32 0
; CHECK-NEXT: store i32 [[I8_LOAD]], ptr [[GEP]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
; CHECK-NEXT: ret void
;
%accum.i.flat = alloca [1 x i32], align 4
%i = alloca i8, align 4
store i8 1, ptr %i
%i8.load = load i8, ptr %i
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat)
call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 %i8.load, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat)
ret void
}
attributes #0 = {"hlsl.export"}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none))
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none))
declare void @llvm.memset.p0.i32(ptr writeonly captures(none), i8, i32, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/umad.ll b/llvm/test/CodeGen/DirectX/umad.ll
index 104d238..76516a2 100644
--- a/llvm/test/CodeGen/DirectX/umad.ll
+++ b/llvm/test/CodeGen/DirectX/umad.ll
@@ -1,17 +1,13 @@
-; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+; RUN: opt -S -scalarizer -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
-; CHECK:call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
-; CHECK:call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
-
-; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
target triple = "dxil-pc-shadermodel6.7-library"
; Function Attrs: noinline nounwind optnone
define noundef i16 @umad_ushort(i16 noundef %p0, i16 noundef %p1, i16 noundef %p2) #0 {
entry:
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
%p2.addr = alloca i16, align 2
%p1.addr = alloca i16, align 2
%p0.addr = alloca i16, align 2
@@ -31,6 +27,7 @@ declare i16 @llvm.dx.umad.i16(i16, i16, i16) #1
; Function Attrs: noinline nounwind optnone
define noundef i32 @umad_uint(i32 noundef %p0, i32 noundef %p1, i32 noundef %p2) #0 {
entry:
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i32, align 4
%p1.addr = alloca i32, align 4
%p0.addr = alloca i32, align 4
@@ -50,6 +47,7 @@ declare i32 @llvm.dx.umad.i32(i32, i32, i32) #1
; Function Attrs: noinline nounwind optnone
define noundef i64 @umad_uint64(i64 noundef %p0, i64 noundef %p1, i64 noundef %p2) #0 {
entry:
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i64, align 8
%p1.addr = alloca i64, align 8
%p0.addr = alloca i64, align 8
@@ -65,3 +63,95 @@ entry:
; Function Attrs: nocallback nofree nosync nounwind willreturn
declare i64 @llvm.dx.umad.i64(i64, i64, i64) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i16> @umad_uint16_t4(<4 x i16> noundef %p0, <4 x i16> noundef %p1, <4 x i16> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i16> %p0, i64 0
+ ; CHECK: extractelement <4 x i16> %p1, i64 0
+ ; CHECK: extractelement <4 x i16> %p2, i64 0
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 1
+ ; CHECK: extractelement <4 x i16> %p1, i64 1
+ ; CHECK: extractelement <4 x i16> %p2, i64 1
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 2
+ ; CHECK: extractelement <4 x i16> %p1, i64 2
+ ; CHECK: extractelement <4 x i16> %p2, i64 2
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 3
+ ; CHECK: extractelement <4 x i16> %p1, i64 3
+ ; CHECK: extractelement <4 x i16> %p2, i64 3
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i16> poison, i16 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 3
+ %dx.umad = call <4 x i16> @llvm.dx.umad.v4i16(<4 x i16> %p0, <4 x i16> %p1, <4 x i16> %p2)
+ ret <4 x i16> %dx.umad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i16> @llvm.dx.umad.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i32> @umad_uint4(<4 x i32> noundef %p0, <4 x i32> noundef %p1, <4 x i32> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i32> %p0, i64 0
+ ; CHECK: extractelement <4 x i32> %p1, i64 0
+ ; CHECK: extractelement <4 x i32> %p2, i64 0
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 1
+ ; CHECK: extractelement <4 x i32> %p1, i64 1
+ ; CHECK: extractelement <4 x i32> %p2, i64 1
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 2
+ ; CHECK: extractelement <4 x i32> %p1, i64 2
+ ; CHECK: extractelement <4 x i32> %p2, i64 2
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 3
+ ; CHECK: extractelement <4 x i32> %p1, i64 3
+ ; CHECK: extractelement <4 x i32> %p2, i64 3
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 3
+ %dx.umad = call <4 x i32> @llvm.dx.umad.v4i32(<4 x i32> %p0, <4 x i32> %p1, <4 x i32> %p2)
+ ret <4 x i32> %dx.umad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i32> @llvm.dx.umad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i64> @umad_uint64_t4(<4 x i64> noundef %p0, <4 x i64> noundef %p1, <4 x i64> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i64> %p0, i64 0
+ ; CHECK: extractelement <4 x i64> %p1, i64 0
+ ; CHECK: extractelement <4 x i64> %p2, i64 0
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 1
+ ; CHECK: extractelement <4 x i64> %p1, i64 1
+ ; CHECK: extractelement <4 x i64> %p2, i64 1
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 2
+ ; CHECK: extractelement <4 x i64> %p1, i64 2
+ ; CHECK: extractelement <4 x i64> %p2, i64 2
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 3
+ ; CHECK: extractelement <4 x i64> %p1, i64 3
+ ; CHECK: extractelement <4 x i64> %p2, i64 3
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 3
+ %dx.umad = call <4 x i64> @llvm.dx.umad.v4i64(<4 x i64> %p0, <4 x i64> %p1, <4 x i64> %p2)
+ ret <4 x i64> %dx.umad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i64> @llvm.dx.umad.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) #1
+
+; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
diff --git a/llvm/test/CodeGen/Generic/half.ll b/llvm/test/CodeGen/Generic/half.ll
new file mode 100644
index 0000000..f4ea5b5
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/half.ll
@@ -0,0 +1,87 @@
+; Simple cross-platform smoke checks for basic f16 operations.
+;
+; There shouldn't be any architectures that crash when trying to use `half`;
+; check that here. Additionally do a small handful of smoke tests that work
+; well cross-platform.
+
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; FIXME(#94434) unsupported on arm64ec
+; RUN: %if aarch64-registered-target %{ ! llc %s -o - -mtriple=arm64ec-pc-windows-msvc -filetype=null %}
+; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if bpf-registered-target %{ llc %s -o - -mtriple=bpfel | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 -mcpu=ck860fv -mattr=+hard-float | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if directx-registered-target %{ llc %s -o - -mtriple=dxil-pc-shadermodel6.3-library | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if hexagon-registered-target %{ llc %s -o - -mtriple=hexagon-unknown-linux-musl | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64el-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mipsel-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if spirv-registered-target %{ llc %s -o - -mtriple=spirv-unknown-unknown | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if systemz-registered-target %{ llc %s -o - -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if ve-registered-target %{ llc %s -o - -mtriple=ve-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if webassembly-registered-target %{ llc %s -o - -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+
+; Codegen tests don't work the same for graphics targets. Add a dummy directive
+; for filecheck, just make sure we don't crash.
+; NOCRASH: {{.*}}
+
+; All backends need to be able to bitcast without converting to another format,
+; so we assert against __extendhfsf2, __truncsfhf2, __gnu_{h2f,f2h}_ieee. This
+; doesn't catch issues on platforms with hardware f32<->f16, but those tend to
+; work better anyway.
+; Regression test for https://github.com/llvm/llvm-project/issues/97981.
+
+define half @from_bits(i16 %bits) nounwind {
+; ALL-LABEL: from_bits:
+; CHECK-NOT: __extend
+; CHECK-NOT: __trunc
+; CHECK-NOT: __gnu
+; BAD: __extendhfsf2
+ %f = bitcast i16 %bits to half
+ ret half %f
+}
+
+define i16 @to_bits(half %f) nounwind {
+; ALL-LABEL: to_bits:
+; CHECK-NOT: __extend
+; CHECK-NOT: __trunc
+; CHECK-NOT: __gnu
+; BAD: __truncsfhf2
+ %bits = bitcast half %f to i16
+ ret i16 %bits
+}
+
+; Some platforms have had problems freezing. Regression test for
+; https://github.com/llvm/llvm-project/issues/117337 and similar issues.
+
+define half @check_freeze(half %f) nounwind {
+; ALL-LABEL: check_freeze:
+ %t0 = freeze half %f
+ ret half %t0
+}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
index 0e172950..fed0858 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
@@ -17,14 +17,25 @@ define <32 x i8> @shuffle_v32i8(<32 x i8> %a) {
ret <32 x i8> %shuffle
}
+define <32 x i8> @shuffle_v32i8_same_lane(<32 x i8> %a) {
+; CHECK-LABEL: shuffle_v32i8_same_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI1_0)
+; CHECK-NEXT: xvshuf.h $xr1, $xr0, $xr0
+; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> <i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ ret <32 x i8> %shuffle
+}
define <16 x i16> @shuffle_v16i16(<16 x i16> %a) {
; CHECK-LABEL: shuffle_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
-; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI1_0)
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_1)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI1_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0)
+; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI2_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_1)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_1)
; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78
; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3
; CHECK-NEXT: xvshuf.w $xr1, $xr2, $xr0
@@ -34,13 +45,25 @@ define <16 x i16> @shuffle_v16i16(<16 x i16> %a) {
ret <16 x i16> %shuffle
}
+define <16 x i16> @shuffle_v16i16_same_lane(<16 x i16> %a) {
+; CHECK-LABEL: shuffle_v16i16_same_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT: xvshuf.h $xr1, $xr0, $xr0
+; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> <i32 6, i32 7, i32 0, i32 5, i32 2, i32 3, i32 6, i32 5, i32 8, i32 9, i32 10, i32 13, i32 12, i32 15, i32 13, i32 15>
+ ret <16 x i16> %shuffle
+}
+
define <8 x i32> @shuffle_v8i32(<8 x i32> %a) {
; CHECK-LABEL: shuffle_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0)
-; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI2_0)
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_1)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0)
+; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI4_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_1)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_1)
; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78
; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3
; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0
@@ -50,13 +73,25 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %a) {
ret <8 x i32> %shuffle
}
+define <8 x i32> @shuffle_v8i32_same_lane(<8 x i32> %a) {
+; CHECK-LABEL: shuffle_v8i32_same_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI5_0)
+; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0
+; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i32> %shuffle
+}
+
define <4 x i64> @shuffle_v4i64(<4 x i64> %a) {
; CHECK-LABEL: shuffle_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI6_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_1)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI6_1)
; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78
; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3
; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0
@@ -66,13 +101,25 @@ define <4 x i64> @shuffle_v4i64(<4 x i64> %a) {
ret <4 x i64> %shuffle
}
+define <4 x i64> @shuffle_v4i64_same_lane(<4 x i64> %a) {
+; CHECK-LABEL: shuffle_v4i64_same_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI7_0)
+; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0
+; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+ ret <4 x i64> %shuffle
+}
+
define <8 x float> @shuffle_v8f32(<8 x float> %a) {
; CHECK-LABEL: shuffle_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0)
-; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI4_0)
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_1)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0)
+; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI8_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_1)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI8_1)
; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78
; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3
; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0
@@ -82,13 +129,26 @@ define <8 x float> @shuffle_v8f32(<8 x float> %a) {
ret <8 x float> %shuffle
}
+define <8 x float> @shuffle_v8f32_same_lane(<8 x float> %a) {
+; CHECK-LABEL: shuffle_v8f32_same_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI9_0)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI9_0)
+; CHECK-NEXT: xvpermi.d $xr0, $xr0, 68
+; CHECK-NEXT: xvshuf.w $xr1, $xr0, $xr0
+; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> <i32 3, i32 2, i32 0, i32 2, i32 3, i32 1, i32 2, i32 3>
+ ret <8 x float> %shuffle
+}
+
define <4 x double> @shuffle_v4f64(<4 x double> %a) {
; CHECK-LABEL: shuffle_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
-; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI5_0)
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI5_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI10_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_1)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI10_1)
; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78
; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3
; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0
@@ -97,3 +157,16 @@ define <4 x double> @shuffle_v4f64(<4 x double> %a) {
%shuffle = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
ret <4 x double> %shuffle
}
+
+define <4 x double> @shuffle_v4f64_same_lane(<4 x double> %a) {
+; CHECK-LABEL: shuffle_v4f64_same_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI11_0)
+; CHECK-NEXT: xvpermi.d $xr0, $xr0, 78
+; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0
+; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x double> %shuffle
+}
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll b/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll
index 278cf01..929db4c 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll
+++ b/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll
@@ -17,6 +17,8 @@
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 0
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
; CHECK-NEXT: frameOffsetReg: '$fp_reg'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
@@ -287,6 +289,8 @@
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 0
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
; CHECK-NEXT: frameOffsetReg: '$fp_reg'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll
index 890ea44..f054bea 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll
@@ -16,6 +16,8 @@
; AFTER-PEI-NEXT: waveLimiter: false
; AFTER-PEI-NEXT: hasSpilledSGPRs: true
; AFTER-PEI-NEXT: hasSpilledVGPRs: false
+; AFTER-PEI-NEXT: numWaveDispatchSGPRs: 0
+; AFTER-PEI-NEXT: numWaveDispatchVGPRs: 0
; AFTER-PEI-NEXT: scratchRSrcReg: '$sgpr68_sgpr69_sgpr70_sgpr71'
; AFTER-PEI-NEXT: frameOffsetReg: '$fp_reg'
; AFTER-PEI-NEXT: stackPtrOffsetReg: '$sgpr32'
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll
index f84ef8a..924216e 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll
@@ -17,6 +17,8 @@
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 0
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
; CHECK-NEXT: frameOffsetReg: '$fp_reg'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll
index cc834d0..39f1ddd 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll
@@ -17,6 +17,8 @@
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 0
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
; CHECK-NEXT: frameOffsetReg: '$fp_reg'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir
index 06c580e..0cb9bc0 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir
@@ -17,6 +17,8 @@
# FULL-NEXT: waveLimiter: true
# FULL-NEXT: hasSpilledSGPRs: false
# FULL-NEXT: hasSpilledVGPRs: false
+# FULL-NEXT: numWaveDispatchSGPRs: 0
+# FULL-NEXT: numWaveDispatchVGPRs: 0
# FULL-NEXT: scratchRSrcReg: '$sgpr8_sgpr9_sgpr10_sgpr11'
# FULL-NEXT: frameOffsetReg: '$sgpr12'
# FULL-NEXT: stackPtrOffsetReg: '$sgpr13'
@@ -127,6 +129,8 @@ body: |
# FULL-NEXT: waveLimiter: false
# FULL-NEXT: hasSpilledSGPRs: false
# FULL-NEXT: hasSpilledVGPRs: false
+# FULL-NEXT: numWaveDispatchSGPRs: 0
+# FULL-NEXT: numWaveDispatchVGPRs: 0
# FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg'
# FULL-NEXT: frameOffsetReg: '$fp_reg'
# FULL-NEXT: stackPtrOffsetReg: '$sp_reg'
@@ -206,6 +210,8 @@ body: |
# FULL-NEXT: waveLimiter: false
# FULL-NEXT: hasSpilledSGPRs: false
# FULL-NEXT: hasSpilledVGPRs: false
+# FULL-NEXT: numWaveDispatchSGPRs: 0
+# FULL-NEXT: numWaveDispatchVGPRs: 0
# FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg'
# FULL-NEXT: frameOffsetReg: '$fp_reg'
# FULL-NEXT: stackPtrOffsetReg: '$sp_reg'
@@ -286,6 +292,8 @@ body: |
# FULL-NEXT: waveLimiter: false
# FULL-NEXT: hasSpilledSGPRs: false
# FULL-NEXT: hasSpilledVGPRs: false
+# FULL-NEXT: numWaveDispatchSGPRs: 0
+# FULL-NEXT: numWaveDispatchVGPRs: 0
# FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg'
# FULL-NEXT: frameOffsetReg: '$fp_reg'
# FULL-NEXT: stackPtrOffsetReg: '$sp_reg'
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll
index 4271546..ab4383b 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll
@@ -20,6 +20,8 @@
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 0
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
; CHECK-NEXT: frameOffsetReg: '$fp_reg'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
@@ -80,6 +82,8 @@ define amdgpu_kernel void @kernel(i32 %arg0, i64 %arg1, <16 x i32> %arg2) {
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 3
+; CHECK-NEXT: numWaveDispatchVGPRs: 1
; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
; CHECK-NEXT: frameOffsetReg: '$fp_reg'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
@@ -144,6 +148,8 @@ define amdgpu_ps void @gds_size_shader(i32 %arg0, i32 inreg %arg1) #5 {
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 16
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
; CHECK-NEXT: frameOffsetReg: '$sgpr33'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
@@ -200,6 +206,8 @@ define void @function() {
; CHECK-NEXT: waveLimiter: false
; CHECK-NEXT: hasSpilledSGPRs: false
; CHECK-NEXT: hasSpilledVGPRs: false
+; CHECK-NEXT: numWaveDispatchSGPRs: 16
+; CHECK-NEXT: numWaveDispatchVGPRs: 0
; CHECK-NEXT: scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
; CHECK-NEXT: frameOffsetReg: '$sgpr33'
; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32'
diff --git a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll
index 4265553..9c564ff 100644
--- a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll
+++ b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll
@@ -44,8 +44,8 @@ declare void @bar(ptr)
define void @foo() {
%p = alloca i32
- call void @llvm.lifetime.start(i64 4, ptr %p)
+ call void @llvm.lifetime.start(ptr %p)
call void @bar(ptr %p)
- call void @llvm.lifetime.end(i64 4, ptr %p)
+ call void @llvm.lifetime.end(ptr %p)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
new file mode 100644
index 0000000..3efe9be
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
@@ -0,0 +1,80 @@
+; RUN: opt < %s -S -passes=infer-address-spaces | FileCheck %s --check-prefix=INFER
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | FileCheck %s --check-prefix=PTX
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | %ptxas-verify -arch=sm_90 %}
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@constant_tensormap = addrspace(4) global [64 x i8] zeroinitializer, align 64
+
+; Inference from const address space
+define void @test_infer_const_from_cast() {
+; INFER-LABEL: @test_infer_const_from_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; BOTH: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_const_from_cast(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %casted = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %casted)
+ ret void
+}
+
+; Cast from Const space to Generic
+define void @test_const_to_generic_cast(ptr addrspace(4) %const_ptr) {
+; INFER-LABEL: @test_const_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+; PTX-LABEL: .visible .func test_const_to_generic_cast(
+; PTX: prefetch.const.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(4) %const_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; No inference possible
+define void @test_no_inference_possible(ptr %generic_ptr) {
+; INFER-LABEL: @test_no_inference_possible
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+; PTX-LABEL: .visible .func test_no_inference_possible(
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+ ret void
+}
+
+; Cast from Parameter space to Generic
+define void @test_param_to_generic_cast(ptr addrspace(101) %param_ptr) {
+; INFER-LABEL: @test_param_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+; PTX-LABEL: .visible .func test_param_to_generic_cast(
+; PTX: prefetch.param.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(101) %param_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; Multiple casts in sequence
+define void @test_infer_through_multiple_casts() {
+; INFER-LABEL: @test_infer_through_multiple_casts
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_through_multiple_casts(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4)
+ %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr
+ call void @llvm.nvvm.prefetch.tensormap(ptr %cast3)
+ ret void
+}
+
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4))
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101))
+
+
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll
index a64e4fe..862e26d 100644
--- a/llvm/test/CodeGen/NVPTX/prefetch.ll
+++ b/llvm/test/CodeGen/NVPTX/prefetch.ll
@@ -12,6 +12,10 @@ declare void @llvm.nvvm.prefetch.local.L2(ptr addrspace(5) %local_ptr)
declare void @llvm.nvvm.prefetch.L1(ptr %ptr)
declare void @llvm.nvvm.prefetch.L2(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+
declare void @llvm.nvvm.prefetch.global.L2.evict.normal(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.last(ptr addrspace(1) %global_ptr)
@@ -78,4 +82,43 @@ define void @prefetchu_l1(ptr %ptr) {
; CHECK-PTX64-NEXT: ret;
tail call void @llvm.nvvm.prefetchu.L1(ptr %ptr)
ret void
+}
+
+define void @prefetch_tensormap(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+ ret void
+}
+
+define void @prefetch_const_tensormap(ptr addrspace(4) %const_ptr) {
+; CHECK-PTX64-LABEL: prefetch_const_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_const_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.const.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+ ret void
+}
+
+define void @prefetch_param_tensormap(ptr addrspace(101) %param_ptr) {
+; CHECK-PTX64-LABEL: prefetch_param_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_param_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.param.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+ ret void
} \ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
index 92cb51b..94c2637 100644
--- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
@@ -2,19 +2,18 @@
; RUN: llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM80 %s
-; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \
+; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | %ptxas-verify -arch=sm_80 %}
-; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM100 %s
-; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \
+; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | %ptxas-verify -arch=sm_100 %}
target triple = "nvptx64-nvidia-cuda"
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-; Check straight line reduction.
define half @reduce_fadd_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fadd_half(
; CHECK: {
@@ -43,45 +42,22 @@ define half @reduce_fadd_half(<8 x half> %in) {
}
define half @reduce_fadd_half_reassoc(<8 x half> %in) {
-; CHECK-SM80-LABEL: reduce_fadd_half_reassoc(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM80-NEXT: .reg .b32 %r<10>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
-; CHECK-SM80-NEXT: add.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: add.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: add.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: add.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: mov.b16 %rs4, 0x0000;
-; CHECK-SM80-NEXT: add.rn.f16 %rs5, %rs3, %rs4;
-; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs5;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_fadd_half_reassoc(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM100-NEXT: .reg .b32 %r<10>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
-; CHECK-SM100-NEXT: add.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: add.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: add.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: add.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: mov.b16 %rs4, 0x0000;
-; CHECK-SM100-NEXT: add.rn.f16 %rs5, %rs3, %rs4;
-; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs5;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_fadd_half_reassoc(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<6>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
+; CHECK-NEXT: add.rn.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: add.rn.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: add.rn.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: add.rn.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: mov.b16 %rs4, 0x0000;
+; CHECK-NEXT: add.rn.f16 %rs5, %rs3, %rs4;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs5;
+; CHECK-NEXT: ret;
%res = call reassoc half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in)
ret half %res
}
@@ -109,7 +85,6 @@ define half @reduce_fadd_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fadd_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fadd_float(
; CHECK: {
@@ -148,15 +123,15 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0];
; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4;
; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-SM80-NEXT: add.rn.f32 %r5, %r3, %r1;
+; CHECK-SM80-NEXT: add.rn.f32 %r5, %r4, %r2;
; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3;
; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-SM80-NEXT: add.rn.f32 %r10, %r8, %r6;
-; CHECK-SM80-NEXT: add.rn.f32 %r11, %r4, %r2;
-; CHECK-SM80-NEXT: add.rn.f32 %r12, %r9, %r7;
-; CHECK-SM80-NEXT: add.rn.f32 %r13, %r12, %r11;
-; CHECK-SM80-NEXT: add.rn.f32 %r14, %r10, %r5;
-; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT: add.rn.f32 %r10, %r9, %r7;
+; CHECK-SM80-NEXT: add.rn.f32 %r11, %r10, %r5;
+; CHECK-SM80-NEXT: add.rn.f32 %r12, %r3, %r1;
+; CHECK-SM80-NEXT: add.rn.f32 %r13, %r8, %r6;
+; CHECK-SM80-NEXT: add.rn.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r11;
; CHECK-SM80-NEXT: add.rn.f32 %r16, %r15, 0f00000000;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r16;
; CHECK-SM80-NEXT: ret;
@@ -164,7 +139,7 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-SM100-LABEL: reduce_fadd_float_reassoc(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b32 %r<5>;
-; CHECK-SM100-NEXT: .reg .b64 %rd<10>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16];
@@ -172,11 +147,8 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4;
; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3;
; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5;
-; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7;
-; CHECK-SM100-NEXT: // implicit-def: %r2
-; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2};
-; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8;
-; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9;
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7;
+; CHECK-SM100-NEXT: add.rn.f32 %r3, %r1, %r2;
; CHECK-SM100-NEXT: add.rn.f32 %r4, %r3, 0f00000000;
; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r4;
; CHECK-SM100-NEXT: ret;
@@ -229,7 +201,6 @@ define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) {
ret float %res
}
-; Check straight line reduction.
define half @reduce_fmul_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmul_half(
; CHECK: {
@@ -256,41 +227,20 @@ define half @reduce_fmul_half(<8 x half> %in) {
}
define half @reduce_fmul_half_reassoc(<8 x half> %in) {
-; CHECK-SM80-LABEL: reduce_fmul_half_reassoc(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<10>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs3;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_fmul_half_reassoc(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<10>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs3;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_fmul_half_reassoc(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
+; CHECK-NEXT: mul.rn.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: mul.rn.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: mul.rn.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: mul.rn.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs3;
+; CHECK-NEXT: ret;
%res = call reassoc half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in)
ret half %res
}
@@ -321,7 +271,6 @@ define half @reduce_fmul_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fmul_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmul_float(
; CHECK: {
@@ -359,22 +308,22 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) {
; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0];
; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4;
; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r3, %r1;
+; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r4, %r2;
; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3;
; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r8, %r6;
-; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r4, %r2;
-; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r9, %r7;
-; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r12, %r11;
-; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r10, %r5;
-; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r9, %r7;
+; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r10, %r5;
+; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r3, %r1;
+; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r8, %r6;
+; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r11;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-SM80-NEXT: ret;
;
; CHECK-SM100-LABEL: reduce_fmul_float_reassoc(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b32 %r<4>;
-; CHECK-SM100-NEXT: .reg .b64 %rd<10>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16];
@@ -382,11 +331,8 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) {
; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4;
; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3;
; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5;
-; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7;
-; CHECK-SM100-NEXT: // implicit-def: %r2
-; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2};
-; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8;
-; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9;
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7;
+; CHECK-SM100-NEXT: mul.rn.f32 %r3, %r1, %r2;
; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r3;
; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in)
@@ -436,7 +382,6 @@ define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) {
ret float %res
}
-; Check straight line reduction.
define half @reduce_fmax_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmax_half(
; CHECK: {
@@ -501,84 +446,256 @@ define half @reduce_fmax_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
-define float @reduce_fmax_float(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmax_float(
+define half @reduce_fmax_half_nnan(<8 x half> %in) {
+; CHECK-LABEL: reduce_fmax_half_nnan(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_half_nnan_param_0];
+; CHECK-NEXT: max.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: max.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: max.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: max.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs3;
; CHECK-NEXT: ret;
- %res = call float @llvm.vector.reduce.fmax(<8 x float> %in)
- ret float %res
+ %res = call nnan half @llvm.vector.reduce.fmax(<8 x half> %in)
+ ret half %res
}
-define float @reduce_fmax_float_reassoc(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmax_float_reassoc(
+define half @reduce_fmax_half_nnan_nonpow2(<7 x half> %in) {
+; CHECK-LABEL: reduce_fmax_half_nnan_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<12>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmax_half_nnan_nonpow2_param_0+8];
+; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1;
+; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmax_half_nnan_nonpow2_param_0];
+; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmax_half_nnan_nonpow2_param_0+12];
+; CHECK-NEXT: max.f16x2 %r4, %r2, %r1;
+; CHECK-NEXT: mov.b16 %rs8, 0xFC00;
+; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8};
+; CHECK-NEXT: max.f16x2 %r6, %r3, %r5;
+; CHECK-NEXT: max.f16x2 %r7, %r4, %r6;
+; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7;
+; CHECK-NEXT: max.f16 %rs11, %rs9, %rs10;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs11;
; CHECK-NEXT: ret;
+ %res = call nnan half @llvm.vector.reduce.fmax(<7 x half> %in)
+ ret half %res
+}
+
+define float @reduce_fmax_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call float @llvm.vector.reduce.fmax(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmax_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmax(<8 x float> %in)
ret float %res
}
define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: max.f32 %r8, %r3, %r7;
-; CHECK-NEXT: max.f32 %r9, %r1, %r5;
-; CHECK-NEXT: max.f32 %r10, %r9, %r8;
-; CHECK-NEXT: max.f32 %r11, %r2, %r6;
-; CHECK-NEXT: max.f32 %r12, %r11, %r4;
-; CHECK-NEXT: max.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmax_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmax(<7 x float> %in)
ret float %res
}
-; Check straight line reduction.
+define float @reduce_fmax_float_nnan(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_nnan(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float_nnan(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmax(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmax_float_nnan_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_nnan_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0];
+; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float_nnan_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0];
+; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmax(<7 x float> %in)
+ ret float %res
+}
+
define half @reduce_fmin_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmin_half(
; CHECK: {
@@ -643,84 +760,256 @@ define half @reduce_fmin_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
-define float @reduce_fmin_float(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmin_float(
+define half @reduce_fmin_half_nnan(<8 x half> %in) {
+; CHECK-LABEL: reduce_fmin_half_nnan(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_half_nnan_param_0];
+; CHECK-NEXT: min.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: min.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: min.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: min.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs3;
; CHECK-NEXT: ret;
- %res = call float @llvm.vector.reduce.fmin(<8 x float> %in)
- ret float %res
+ %res = call nnan half @llvm.vector.reduce.fmin(<8 x half> %in)
+ ret half %res
}
-define float @reduce_fmin_float_reassoc(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmin_float_reassoc(
+define half @reduce_fmin_half_nnan_nonpow2(<7 x half> %in) {
+; CHECK-LABEL: reduce_fmin_half_nnan_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<12>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmin_half_nnan_nonpow2_param_0+8];
+; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1;
+; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmin_half_nnan_nonpow2_param_0];
+; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmin_half_nnan_nonpow2_param_0+12];
+; CHECK-NEXT: min.f16x2 %r4, %r2, %r1;
+; CHECK-NEXT: mov.b16 %rs8, 0x7C00;
+; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8};
+; CHECK-NEXT: min.f16x2 %r6, %r3, %r5;
+; CHECK-NEXT: min.f16x2 %r7, %r4, %r6;
+; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7;
+; CHECK-NEXT: min.f16 %rs11, %rs9, %rs10;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs11;
; CHECK-NEXT: ret;
+ %res = call nnan half @llvm.vector.reduce.fmin(<7 x half> %in)
+ ret half %res
+}
+
+define float @reduce_fmin_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call float @llvm.vector.reduce.fmin(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmin_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmin(<8 x float> %in)
ret float %res
}
define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: min.f32 %r8, %r3, %r7;
-; CHECK-NEXT: min.f32 %r9, %r1, %r5;
-; CHECK-NEXT: min.f32 %r10, %r9, %r8;
-; CHECK-NEXT: min.f32 %r11, %r2, %r6;
-; CHECK-NEXT: min.f32 %r12, %r11, %r4;
-; CHECK-NEXT: min.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmin_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmin(<7 x float> %in)
ret float %res
}
-; Check straight-line reduction.
+define float @reduce_fmin_float_nnan(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_nnan(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float_nnan(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmin(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmin_float_nnan_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_nnan_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0];
+; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float_nnan_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0];
+; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmin(<7 x float> %in)
+ ret float %res
+}
+
define half @reduce_fmaximum_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmaximum_half(
; CHECK: {
@@ -785,84 +1074,131 @@ define half @reduce_fmaximum_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fmaximum_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmaximum_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmaximum_float(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmaximum_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call float @llvm.vector.reduce.fmaximum(<8 x float> %in)
ret float %res
}
define float @reduce_fmaximum_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmaximum_float_reassoc(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmaximum(<8 x float> %in)
ret float %res
}
define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: max.NaN.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: max.NaN.f32 %r8, %r3, %r7;
-; CHECK-NEXT: max.NaN.f32 %r9, %r1, %r5;
-; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r8;
-; CHECK-NEXT: max.NaN.f32 %r11, %r2, %r6;
-; CHECK-NEXT: max.NaN.f32 %r12, %r11, %r4;
-; CHECK-NEXT: max.NaN.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: max.NaN.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmaximum(<7 x float> %in)
ret float %res
}
-; Check straight-line reduction.
define half @reduce_fminimum_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fminimum_half(
; CHECK: {
@@ -927,79 +1263,127 @@ define half @reduce_fminimum_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fminimum_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fminimum_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fminimum_float(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fminimum_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call float @llvm.vector.reduce.fminimum(<8 x float> %in)
ret float %res
}
define float @reduce_fminimum_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fminimum_float_reassoc(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fminimum(<8 x float> %in)
ret float %res
}
define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: min.NaN.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: min.NaN.f32 %r8, %r3, %r7;
-; CHECK-NEXT: min.NaN.f32 %r9, %r1, %r5;
-; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r8;
-; CHECK-NEXT: min.NaN.f32 %r11, %r2, %r6;
-; CHECK-NEXT: min.NaN.f32 %r12, %r11, %r4;
-; CHECK-NEXT: min.NaN.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: min.NaN.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fminimum(<7 x float> %in)
ret float %res
}
@@ -1014,15 +1398,15 @@ define i16 @reduce_add_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: add.s16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: add.s16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: add.s16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: add.s16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: add.s16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: add.s16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: add.s16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: add.s16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: add.s16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: add.s16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: add.s16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: add.s16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1030,20 +1414,17 @@ define i16 @reduce_add_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_add_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0];
; CHECK-SM100-NEXT: add.s16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: add.s16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: add.s16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: add.s16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: add.s16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.add(<8 x i16> %in)
ret i16 %res
@@ -1103,13 +1484,13 @@ define i32 @reduce_add_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0];
-; CHECK-NEXT: add.s32 %r9, %r3, %r7;
-; CHECK-NEXT: add.s32 %r10, %r1, %r5;
-; CHECK-NEXT: add.s32 %r11, %r4, %r8;
-; CHECK-NEXT: add.s32 %r12, %r2, %r6;
-; CHECK-NEXT: add.s32 %r13, %r12, %r11;
-; CHECK-NEXT: add.s32 %r14, %r10, %r9;
-; CHECK-NEXT: add.s32 %r15, %r14, %r13;
+; CHECK-NEXT: add.s32 %r9, %r4, %r8;
+; CHECK-NEXT: add.s32 %r10, %r2, %r6;
+; CHECK-NEXT: add.s32 %r11, %r10, %r9;
+; CHECK-NEXT: add.s32 %r12, %r3, %r7;
+; CHECK-NEXT: add.s32 %r13, %r1, %r5;
+; CHECK-NEXT: add.s32 %r14, %r13, %r12;
+; CHECK-NEXT: add.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.add(<8 x i32> %in)
@@ -1147,15 +1528,15 @@ define i16 @reduce_mul_i16(<8 x i16> %in) {
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i16_param_0];
; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-NEXT: mul.lo.s16 %rs5, %rs3, %rs1;
+; CHECK-NEXT: mul.lo.s16 %rs5, %rs4, %rs2;
; CHECK-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-NEXT: mul.lo.s16 %rs10, %rs8, %rs6;
-; CHECK-NEXT: mul.lo.s16 %rs11, %rs4, %rs2;
-; CHECK-NEXT: mul.lo.s16 %rs12, %rs9, %rs7;
-; CHECK-NEXT: mul.lo.s16 %rs13, %rs12, %rs11;
-; CHECK-NEXT: mul.lo.s16 %rs14, %rs10, %rs5;
-; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs13;
+; CHECK-NEXT: mul.lo.s16 %rs10, %rs9, %rs7;
+; CHECK-NEXT: mul.lo.s16 %rs11, %rs10, %rs5;
+; CHECK-NEXT: mul.lo.s16 %rs12, %rs3, %rs1;
+; CHECK-NEXT: mul.lo.s16 %rs13, %rs8, %rs6;
+; CHECK-NEXT: mul.lo.s16 %rs14, %rs13, %rs12;
+; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs11;
; CHECK-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-NEXT: ret;
@@ -1194,13 +1575,13 @@ define i32 @reduce_mul_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0];
-; CHECK-NEXT: mul.lo.s32 %r9, %r3, %r7;
-; CHECK-NEXT: mul.lo.s32 %r10, %r1, %r5;
-; CHECK-NEXT: mul.lo.s32 %r11, %r4, %r8;
-; CHECK-NEXT: mul.lo.s32 %r12, %r2, %r6;
-; CHECK-NEXT: mul.lo.s32 %r13, %r12, %r11;
-; CHECK-NEXT: mul.lo.s32 %r14, %r10, %r9;
-; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r13;
+; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8;
+; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6;
+; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9;
+; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7;
+; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5;
+; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12;
+; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in)
@@ -1238,15 +1619,15 @@ define i16 @reduce_umax_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: max.u16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.u16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: max.u16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: max.u16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: max.u16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: max.u16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: max.u16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: max.u16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: max.u16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: max.u16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.u16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: max.u16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1254,20 +1635,17 @@ define i16 @reduce_umax_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_umax_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0];
; CHECK-SM100-NEXT: max.u16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: max.u16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: max.u16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: max.u16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: max.u16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.umax(<8 x i16> %in)
ret i16 %res
@@ -1327,13 +1705,13 @@ define i32 @reduce_umax_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0];
-; CHECK-NEXT: max.u32 %r9, %r3, %r7;
-; CHECK-NEXT: max.u32 %r10, %r1, %r5;
-; CHECK-NEXT: max.u32 %r11, %r4, %r8;
-; CHECK-NEXT: max.u32 %r12, %r2, %r6;
-; CHECK-NEXT: max.u32 %r13, %r12, %r11;
-; CHECK-NEXT: max.u32 %r14, %r10, %r9;
-; CHECK-NEXT: max.u32 %r15, %r14, %r13;
+; CHECK-NEXT: max.u32 %r9, %r4, %r8;
+; CHECK-NEXT: max.u32 %r10, %r2, %r6;
+; CHECK-NEXT: max.u32 %r11, %r10, %r9;
+; CHECK-NEXT: max.u32 %r12, %r3, %r7;
+; CHECK-NEXT: max.u32 %r13, %r1, %r5;
+; CHECK-NEXT: max.u32 %r14, %r13, %r12;
+; CHECK-NEXT: max.u32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in)
@@ -1371,15 +1749,15 @@ define i16 @reduce_umin_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: min.u16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.u16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: min.u16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: min.u16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: min.u16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: min.u16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: min.u16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: min.u16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: min.u16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: min.u16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.u16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: min.u16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1387,20 +1765,17 @@ define i16 @reduce_umin_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_umin_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0];
; CHECK-SM100-NEXT: min.u16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: min.u16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: min.u16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: min.u16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: min.u16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.umin(<8 x i16> %in)
ret i16 %res
@@ -1460,13 +1835,13 @@ define i32 @reduce_umin_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0];
-; CHECK-NEXT: min.u32 %r9, %r3, %r7;
-; CHECK-NEXT: min.u32 %r10, %r1, %r5;
-; CHECK-NEXT: min.u32 %r11, %r4, %r8;
-; CHECK-NEXT: min.u32 %r12, %r2, %r6;
-; CHECK-NEXT: min.u32 %r13, %r12, %r11;
-; CHECK-NEXT: min.u32 %r14, %r10, %r9;
-; CHECK-NEXT: min.u32 %r15, %r14, %r13;
+; CHECK-NEXT: min.u32 %r9, %r4, %r8;
+; CHECK-NEXT: min.u32 %r10, %r2, %r6;
+; CHECK-NEXT: min.u32 %r11, %r10, %r9;
+; CHECK-NEXT: min.u32 %r12, %r3, %r7;
+; CHECK-NEXT: min.u32 %r13, %r1, %r5;
+; CHECK-NEXT: min.u32 %r14, %r13, %r12;
+; CHECK-NEXT: min.u32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in)
@@ -1504,15 +1879,15 @@ define i16 @reduce_smax_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: max.s16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.s16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: max.s16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: max.s16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: max.s16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: max.s16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: max.s16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: max.s16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: max.s16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: max.s16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.s16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: max.s16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1520,20 +1895,17 @@ define i16 @reduce_smax_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_smax_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0];
; CHECK-SM100-NEXT: max.s16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: max.s16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: max.s16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: max.s16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: max.s16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.smax(<8 x i16> %in)
ret i16 %res
@@ -1593,13 +1965,13 @@ define i32 @reduce_smax_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0];
-; CHECK-NEXT: max.s32 %r9, %r3, %r7;
-; CHECK-NEXT: max.s32 %r10, %r1, %r5;
-; CHECK-NEXT: max.s32 %r11, %r4, %r8;
-; CHECK-NEXT: max.s32 %r12, %r2, %r6;
-; CHECK-NEXT: max.s32 %r13, %r12, %r11;
-; CHECK-NEXT: max.s32 %r14, %r10, %r9;
-; CHECK-NEXT: max.s32 %r15, %r14, %r13;
+; CHECK-NEXT: max.s32 %r9, %r4, %r8;
+; CHECK-NEXT: max.s32 %r10, %r2, %r6;
+; CHECK-NEXT: max.s32 %r11, %r10, %r9;
+; CHECK-NEXT: max.s32 %r12, %r3, %r7;
+; CHECK-NEXT: max.s32 %r13, %r1, %r5;
+; CHECK-NEXT: max.s32 %r14, %r13, %r12;
+; CHECK-NEXT: max.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in)
@@ -1637,15 +2009,15 @@ define i16 @reduce_smin_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: min.s16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.s16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: min.s16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: min.s16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: min.s16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: min.s16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: min.s16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: min.s16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: min.s16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: min.s16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.s16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: min.s16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1653,20 +2025,17 @@ define i16 @reduce_smin_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_smin_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0];
; CHECK-SM100-NEXT: min.s16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: min.s16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: min.s16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: min.s16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: min.s16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.smin(<8 x i16> %in)
ret i16 %res
@@ -1726,13 +2095,13 @@ define i32 @reduce_smin_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0];
-; CHECK-NEXT: min.s32 %r9, %r3, %r7;
-; CHECK-NEXT: min.s32 %r10, %r1, %r5;
-; CHECK-NEXT: min.s32 %r11, %r4, %r8;
-; CHECK-NEXT: min.s32 %r12, %r2, %r6;
-; CHECK-NEXT: min.s32 %r13, %r12, %r11;
-; CHECK-NEXT: min.s32 %r14, %r10, %r9;
-; CHECK-NEXT: min.s32 %r15, %r14, %r13;
+; CHECK-NEXT: min.s32 %r9, %r4, %r8;
+; CHECK-NEXT: min.s32 %r10, %r2, %r6;
+; CHECK-NEXT: min.s32 %r11, %r10, %r9;
+; CHECK-NEXT: min.s32 %r12, %r3, %r7;
+; CHECK-NEXT: min.s32 %r13, %r1, %r5;
+; CHECK-NEXT: min.s32 %r14, %r13, %r12;
+; CHECK-NEXT: min.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in)
@@ -1761,43 +2130,21 @@ define i32 @reduce_smin_i32_nonpow2(<7 x i32> %in) {
}
define i16 @reduce_and_i16(<8 x i16> %in) {
-; CHECK-SM80-LABEL: reduce_and_i16(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<11>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0];
-; CHECK-SM80-NEXT: and.b32 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: and.b32 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: and.b32 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: and.b32 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_and_i16(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0];
-; CHECK-SM100-NEXT: and.b32 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: and.b32 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: and.b32 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: and.b32 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_and_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0];
+; CHECK-NEXT: and.b32 %r5, %r2, %r4;
+; CHECK-NEXT: and.b32 %r6, %r1, %r3;
+; CHECK-NEXT: and.b32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: and.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: ret;
%res = call i16 @llvm.vector.reduce.and(<8 x i16> %in)
ret i16 %res
}
@@ -1837,13 +2184,13 @@ define i32 @reduce_and_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0];
-; CHECK-NEXT: and.b32 %r9, %r3, %r7;
-; CHECK-NEXT: and.b32 %r10, %r1, %r5;
-; CHECK-NEXT: and.b32 %r11, %r4, %r8;
-; CHECK-NEXT: and.b32 %r12, %r2, %r6;
-; CHECK-NEXT: and.b32 %r13, %r12, %r11;
-; CHECK-NEXT: and.b32 %r14, %r10, %r9;
-; CHECK-NEXT: and.b32 %r15, %r14, %r13;
+; CHECK-NEXT: and.b32 %r9, %r4, %r8;
+; CHECK-NEXT: and.b32 %r10, %r2, %r6;
+; CHECK-NEXT: and.b32 %r11, %r10, %r9;
+; CHECK-NEXT: and.b32 %r12, %r3, %r7;
+; CHECK-NEXT: and.b32 %r13, %r1, %r5;
+; CHECK-NEXT: and.b32 %r14, %r13, %r12;
+; CHECK-NEXT: and.b32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.and(<8 x i32> %in)
@@ -1872,43 +2219,21 @@ define i32 @reduce_and_i32_nonpow2(<7 x i32> %in) {
}
define i16 @reduce_or_i16(<8 x i16> %in) {
-; CHECK-SM80-LABEL: reduce_or_i16(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<11>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0];
-; CHECK-SM80-NEXT: or.b32 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: or.b32 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: or.b32 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: or.b32 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_or_i16(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0];
-; CHECK-SM100-NEXT: or.b32 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: or.b32 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: or.b32 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: or.b32 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_or_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0];
+; CHECK-NEXT: or.b32 %r5, %r2, %r4;
+; CHECK-NEXT: or.b32 %r6, %r1, %r3;
+; CHECK-NEXT: or.b32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: or.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: ret;
%res = call i16 @llvm.vector.reduce.or(<8 x i16> %in)
ret i16 %res
}
@@ -1948,13 +2273,13 @@ define i32 @reduce_or_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0];
-; CHECK-NEXT: or.b32 %r9, %r3, %r7;
-; CHECK-NEXT: or.b32 %r10, %r1, %r5;
-; CHECK-NEXT: or.b32 %r11, %r4, %r8;
-; CHECK-NEXT: or.b32 %r12, %r2, %r6;
-; CHECK-NEXT: or.b32 %r13, %r12, %r11;
-; CHECK-NEXT: or.b32 %r14, %r10, %r9;
-; CHECK-NEXT: or.b32 %r15, %r14, %r13;
+; CHECK-NEXT: or.b32 %r9, %r4, %r8;
+; CHECK-NEXT: or.b32 %r10, %r2, %r6;
+; CHECK-NEXT: or.b32 %r11, %r10, %r9;
+; CHECK-NEXT: or.b32 %r12, %r3, %r7;
+; CHECK-NEXT: or.b32 %r13, %r1, %r5;
+; CHECK-NEXT: or.b32 %r14, %r13, %r12;
+; CHECK-NEXT: or.b32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.or(<8 x i32> %in)
@@ -1983,43 +2308,21 @@ define i32 @reduce_or_i32_nonpow2(<7 x i32> %in) {
}
define i16 @reduce_xor_i16(<8 x i16> %in) {
-; CHECK-SM80-LABEL: reduce_xor_i16(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<11>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0];
-; CHECK-SM80-NEXT: xor.b32 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: xor.b32 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: xor.b32 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: xor.b32 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_xor_i16(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0];
-; CHECK-SM100-NEXT: xor.b32 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: xor.b32 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: xor.b32 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: xor.b32 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_xor_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0];
+; CHECK-NEXT: xor.b32 %r5, %r2, %r4;
+; CHECK-NEXT: xor.b32 %r6, %r1, %r3;
+; CHECK-NEXT: xor.b32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: xor.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: ret;
%res = call i16 @llvm.vector.reduce.xor(<8 x i16> %in)
ret i16 %res
}
@@ -2059,13 +2362,13 @@ define i32 @reduce_xor_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0];
-; CHECK-NEXT: xor.b32 %r9, %r3, %r7;
-; CHECK-NEXT: xor.b32 %r10, %r1, %r5;
-; CHECK-NEXT: xor.b32 %r11, %r4, %r8;
-; CHECK-NEXT: xor.b32 %r12, %r2, %r6;
-; CHECK-NEXT: xor.b32 %r13, %r12, %r11;
-; CHECK-NEXT: xor.b32 %r14, %r10, %r9;
-; CHECK-NEXT: xor.b32 %r15, %r14, %r13;
+; CHECK-NEXT: xor.b32 %r9, %r4, %r8;
+; CHECK-NEXT: xor.b32 %r10, %r2, %r6;
+; CHECK-NEXT: xor.b32 %r11, %r10, %r9;
+; CHECK-NEXT: xor.b32 %r12, %r3, %r7;
+; CHECK-NEXT: xor.b32 %r13, %r1, %r5;
+; CHECK-NEXT: xor.b32 %r14, %r13, %r12;
+; CHECK-NEXT: xor.b32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in)
diff --git a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
index 5502980..1d69f8d 100644
--- a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
+++ b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
@@ -119,7 +119,7 @@ define dso_local i32 @foo() {
; CHECK-NEXT: [[CONV:%.*]] = sext i8 1 to i32
; CHECK-NEXT: [[CONV1:%.*]] = sext i16 1 to i32
; CHECK-NEXT: [[CONV2:%.*]] = fpext float 1.000000e+00 to double
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -133,7 +133,7 @@ define dso_local i32 @foo() {
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 6
; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP5]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics1(i32 noundef 1, ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
@@ -208,7 +208,7 @@ define dso_local i32 @bar() {
; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i32 [[S1_SROA_0_0_COPYLOAD]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -216,7 +216,7 @@ define dso_local i32 @bar() {
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3
; CHECK-NEXT: store i64 [[S1_SROA_31_0_COPYLOAD]], ptr [[TMP2]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics2(i32 noundef 1, ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
@@ -274,11 +274,11 @@ define dso_local i32 @baz() {
; CHECK-LABEL: define dso_local i32 @baz() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAZ_VARARG:%.*]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAZ_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[TMP0]], align 16
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics3(i32 noundef 1, ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
@@ -333,11 +333,11 @@ define dso_local void @qux() {
; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 8
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[QUX_VARARG:%.*]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.qux.s, i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[QUX_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics4(ptr noundef byval([[STRUCT_S2]]) align 8 [[S]], ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
index 41e2124..2796cdb 100644
--- a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
+++ b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
@@ -1,6 +1,12 @@
# RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
# RUN: -o - | FileCheck %s
+# RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+# RUN: llc -mcpu=pwr10 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
--- |
; ModuleID = 'a.ll'
@@ -30,7 +36,7 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #1
- attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/PowerPC/memintr32.ll b/llvm/test/CodeGen/PowerPC/memintr32.ll
index c07a5af..4f0a996 100644
--- a/llvm/test/CodeGen/PowerPC/memintr32.ll
+++ b/llvm/test/CodeGen/PowerPC/memintr32.ll
@@ -11,7 +11,7 @@ define i32 @memcmp_test(ptr nocapture noundef readonly %ptr1, ptr nocapture noun
; CHECK-AIX-32-P9-NEXT: mflr r0
; CHECK-AIX-32-P9-NEXT: stwu r1, -64(r1)
; CHECK-AIX-32-P9-NEXT: stw r0, 72(r1)
-; CHECK-AIX-32-P9-NEXT: bl .memcmp[PR]
+; CHECK-AIX-32-P9-NEXT: bl .___memcmp[PR]
; CHECK-AIX-32-P9-NEXT: nop
; CHECK-AIX-32-P9-NEXT: addi r1, r1, 64
; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1)
diff --git a/llvm/test/CodeGen/PowerPC/memintr64.ll b/llvm/test/CodeGen/PowerPC/memintr64.ll
index b3a6650..0b0e556 100644
--- a/llvm/test/CodeGen/PowerPC/memintr64.ll
+++ b/llvm/test/CodeGen/PowerPC/memintr64.ll
@@ -39,7 +39,7 @@ define noundef i32 @_Z11memcmp_testPKvS0_m(ptr noundef readonly captures(none) %
; CHECK-AIX-64-P9-NEXT: mflr r0
; CHECK-AIX-64-P9-NEXT: stdu r1, -112(r1)
; CHECK-AIX-64-P9-NEXT: std r0, 128(r1)
-; CHECK-AIX-64-P9-NEXT: bl .memcmp[PR]
+; CHECK-AIX-64-P9-NEXT: bl .___memcmp64[PR]
; CHECK-AIX-64-P9-NEXT: nop
; CHECK-AIX-64-P9-NEXT: addi r1, r1, 112
; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1)
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index facb544..0c152e6 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2262,12 +2262,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: addi a2, a3, -1
; RV32IZHINX-NEXT: .LBB10_4: # %start
; RV32IZHINX-NEXT: feq.s a3, s0, s0
-; RV32IZHINX-NEXT: neg a4, a1
-; RV32IZHINX-NEXT: neg a1, s1
+; RV32IZHINX-NEXT: neg a4, s1
+; RV32IZHINX-NEXT: neg a5, a1
; RV32IZHINX-NEXT: neg a3, a3
-; RV32IZHINX-NEXT: and a0, a1, a0
+; RV32IZHINX-NEXT: and a0, a4, a0
; RV32IZHINX-NEXT: and a1, a3, a2
-; RV32IZHINX-NEXT: or a0, a4, a0
+; RV32IZHINX-NEXT: or a0, a5, a0
; RV32IZHINX-NEXT: and a0, a3, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2309,12 +2309,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: addi a2, a3, -1
; RV32IZDINXZHINX-NEXT: .LBB10_4: # %start
; RV32IZDINXZHINX-NEXT: feq.s a3, s0, s0
-; RV32IZDINXZHINX-NEXT: neg a4, a1
-; RV32IZDINXZHINX-NEXT: neg a1, s1
+; RV32IZDINXZHINX-NEXT: neg a4, s1
+; RV32IZDINXZHINX-NEXT: neg a5, a1
; RV32IZDINXZHINX-NEXT: neg a3, a3
-; RV32IZDINXZHINX-NEXT: and a0, a1, a0
+; RV32IZDINXZHINX-NEXT: and a0, a4, a0
; RV32IZDINXZHINX-NEXT: and a1, a3, a2
-; RV32IZDINXZHINX-NEXT: or a0, a4, a0
+; RV32IZDINXZHINX-NEXT: or a0, a5, a0
; RV32IZDINXZHINX-NEXT: and a0, a3, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2653,12 +2653,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: addi a2, a3, -1
; CHECK32-IZHINXMIN-NEXT: .LBB10_4: # %start
; CHECK32-IZHINXMIN-NEXT: feq.s a3, s0, s0
-; CHECK32-IZHINXMIN-NEXT: neg a4, a1
-; CHECK32-IZHINXMIN-NEXT: neg a1, s1
+; CHECK32-IZHINXMIN-NEXT: neg a4, s1
+; CHECK32-IZHINXMIN-NEXT: neg a5, a1
; CHECK32-IZHINXMIN-NEXT: neg a3, a3
-; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0
+; CHECK32-IZHINXMIN-NEXT: and a0, a4, a0
; CHECK32-IZHINXMIN-NEXT: and a1, a3, a2
-; CHECK32-IZHINXMIN-NEXT: or a0, a4, a0
+; CHECK32-IZHINXMIN-NEXT: or a0, a5, a0
; CHECK32-IZHINXMIN-NEXT: and a0, a3, a0
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2701,12 +2701,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a3, -1
; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_4: # %start
; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a3, s0, s0
-; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, s1
+; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, s1
+; CHECK32-IZDINXZHINXMIN-NEXT: neg a5, a1
; CHECK32-IZDINXZHINXMIN-NEXT: neg a3, a3
-; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a4, a0
; CHECK32-IZDINXZHINXMIN-NEXT: and a1, a3, a2
-; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a4, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a5, a0
; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a3, a0
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2972,18 +2972,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: lui a1, 391168
-; RV32IZHINX-NEXT: addi a1, a1, -1
-; RV32IZHINX-NEXT: fle.s a2, zero, a0
-; RV32IZHINX-NEXT: flt.s a1, a1, a0
-; RV32IZHINX-NEXT: neg s0, a1
-; RV32IZHINX-NEXT: neg s1, a2
+; RV32IZHINX-NEXT: fcvt.s.h s0, a0
+; RV32IZHINX-NEXT: fle.s a0, zero, s0
+; RV32IZHINX-NEXT: neg s1, a0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
+; RV32IZHINX-NEXT: lui a2, 391168
; RV32IZHINX-NEXT: and a1, s1, a1
-; RV32IZHINX-NEXT: or a0, s0, a0
-; RV32IZHINX-NEXT: or a1, s0, a1
+; RV32IZHINX-NEXT: addi a2, a2, -1
+; RV32IZHINX-NEXT: flt.s a2, a2, s0
+; RV32IZHINX-NEXT: neg a2, a2
+; RV32IZHINX-NEXT: or a0, a2, a0
+; RV32IZHINX-NEXT: or a1, a2, a1
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -3005,18 +3006,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: lui a1, 391168
-; RV32IZDINXZHINX-NEXT: addi a1, a1, -1
-; RV32IZDINXZHINX-NEXT: fle.s a2, zero, a0
-; RV32IZDINXZHINX-NEXT: flt.s a1, a1, a0
-; RV32IZDINXZHINX-NEXT: neg s0, a1
-; RV32IZDINXZHINX-NEXT: neg s1, a2
+; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0
+; RV32IZDINXZHINX-NEXT: fle.s a0, zero, s0
+; RV32IZDINXZHINX-NEXT: neg s1, a0
+; RV32IZDINXZHINX-NEXT: mv a0, s0
; RV32IZDINXZHINX-NEXT: call __fixunssfdi
; RV32IZDINXZHINX-NEXT: and a0, s1, a0
+; RV32IZDINXZHINX-NEXT: lui a2, 391168
; RV32IZDINXZHINX-NEXT: and a1, s1, a1
-; RV32IZDINXZHINX-NEXT: or a0, s0, a0
-; RV32IZDINXZHINX-NEXT: or a1, s0, a1
+; RV32IZDINXZHINX-NEXT: addi a2, a2, -1
+; RV32IZDINXZHINX-NEXT: flt.s a2, a2, s0
+; RV32IZDINXZHINX-NEXT: neg a2, a2
+; RV32IZDINXZHINX-NEXT: or a0, a2, a0
+; RV32IZDINXZHINX-NEXT: or a1, a2, a1
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -3217,18 +3219,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32-IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK32-IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK32-IZHINXMIN-NEXT: lui a1, 391168
-; CHECK32-IZHINXMIN-NEXT: addi a1, a1, -1
-; CHECK32-IZHINXMIN-NEXT: fle.s a2, zero, a0
-; CHECK32-IZHINXMIN-NEXT: flt.s a1, a1, a0
-; CHECK32-IZHINXMIN-NEXT: neg s0, a1
-; CHECK32-IZHINXMIN-NEXT: neg s1, a2
+; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0
+; CHECK32-IZHINXMIN-NEXT: fle.s a0, zero, s0
+; CHECK32-IZHINXMIN-NEXT: neg s1, a0
+; CHECK32-IZHINXMIN-NEXT: mv a0, s0
; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi
; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0
+; CHECK32-IZHINXMIN-NEXT: lui a2, 391168
; CHECK32-IZHINXMIN-NEXT: and a1, s1, a1
-; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0
-; CHECK32-IZHINXMIN-NEXT: or a1, s0, a1
+; CHECK32-IZHINXMIN-NEXT: addi a2, a2, -1
+; CHECK32-IZHINXMIN-NEXT: flt.s a2, a2, s0
+; CHECK32-IZHINXMIN-NEXT: neg a2, a2
+; CHECK32-IZHINXMIN-NEXT: or a0, a2, a0
+; CHECK32-IZHINXMIN-NEXT: or a1, a2, a1
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -3251,18 +3254,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32-IZDINXZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK32-IZDINXZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, 391168
-; CHECK32-IZDINXZHINXMIN-NEXT: addi a1, a1, -1
-; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a2, zero, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a1, a1, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a2
+; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a0, zero, s0
+; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0
; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi
; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 391168
; CHECK32-IZDINXZHINXMIN-NEXT: and a1, s1, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: or a1, s0, a1
+; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a2, -1
+; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a2, a2, s0
+; CHECK32-IZDINXZHINXMIN-NEXT: neg a2, a2
+; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a2, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: or a1, a2, a1
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
index 57061e1..f89d1abf 100644
--- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
@@ -253,8 +253,8 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind {
; RV64IZHINX-NEXT: srli a1, a2, 1
; RV64IZHINX-NEXT: .LBB4_4:
; RV64IZHINX-NEXT: feq.s a2, s0, s0
-; RV64IZHINX-NEXT: neg a3, a3
; RV64IZHINX-NEXT: neg a4, s1
+; RV64IZHINX-NEXT: neg a3, a3
; RV64IZHINX-NEXT: neg a2, a2
; RV64IZHINX-NEXT: and a0, a4, a0
; RV64IZHINX-NEXT: and a1, a2, a1
@@ -334,18 +334,19 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind {
; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: lui a1, 522240
-; RV64IZHINX-NEXT: addi a1, a1, -1
-; RV64IZHINX-NEXT: fle.s a2, zero, a0
-; RV64IZHINX-NEXT: flt.s a1, a1, a0
-; RV64IZHINX-NEXT: neg s0, a1
-; RV64IZHINX-NEXT: neg s1, a2
+; RV64IZHINX-NEXT: fcvt.s.h s0, a0
+; RV64IZHINX-NEXT: fle.s a0, zero, s0
+; RV64IZHINX-NEXT: neg s1, a0
+; RV64IZHINX-NEXT: mv a0, s0
; RV64IZHINX-NEXT: call __fixunssfti
; RV64IZHINX-NEXT: and a0, s1, a0
+; RV64IZHINX-NEXT: lui a2, 522240
; RV64IZHINX-NEXT: and a1, s1, a1
-; RV64IZHINX-NEXT: or a0, s0, a0
-; RV64IZHINX-NEXT: or a1, s0, a1
+; RV64IZHINX-NEXT: addi a2, a2, -1
+; RV64IZHINX-NEXT: flt.s a2, a2, s0
+; RV64IZHINX-NEXT: neg a2, a2
+; RV64IZHINX-NEXT: or a0, a2, a0
+; RV64IZHINX-NEXT: or a1, a2, a1
; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index f2c41db..4537d18 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -495,6 +495,36 @@ define void @pack_lo_packh_hi_packh_3(i8 %0, i8 %1, i8 %2, i8 %3, ptr %p) nounwi
ret void
}
+define i32 @pack_lo_packh_hi_packh_4(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh_4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: slliw a3, a3, 24
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a2, a2, a3
+; RV64I-NEXT: or a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_4:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %h, %f
+ %j = or i32 %i, %g
+ ret i32 %j
+}
+
define void @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind {
; RV64I-LABEL: pack_lo_zext_hi_packh:
; RV64I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll
new file mode 100644
index 0000000..abf2894
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg2.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
+
+define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg3e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg3.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
+
+define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg4e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg4.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
+
+define void @store_factor5(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg5e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg5.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
+
+define void @store_factor6(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg6e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg6.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
+
+define void @store_factor7(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg7e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg7.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
+
+define void @store_factor8(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, i64 %stride) {
+; CHECK-LABEL: store_factor8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vssseg8e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ call void @llvm.riscv.sseg8.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
index 7990dfc..4c84304 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
@@ -366,8 +366,8 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
; RV64X60-NEXT: # => This Inner Loop Header: Depth=2
; RV64X60-NEXT: vl2r.v v8, (s2)
; RV64X60-NEXT: vl2r.v v10, (s3)
-; RV64X60-NEXT: sub s1, s1, t3
; RV64X60-NEXT: vaaddu.vv v8, v8, v10
+; RV64X60-NEXT: sub s1, s1, t3
; RV64X60-NEXT: vs2r.v v8, (s4)
; RV64X60-NEXT: add s4, s4, t3
; RV64X60-NEXT: add s3, s3, t3
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
index 483d707..3d93eca 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -17,11 +17,11 @@
; CL: OpFunction
; CL: %[[#FooVar:]] = OpVariable
; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]]
-; CL-NEXT: OpLifetimeStart %[[#Casted1]], 72
+; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16
; CL-NEXT: OpBitcast
; CL-NEXT: OpInBoundsPtrAccessChain
; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]]
-; CL-NEXT: OpLifetimeStop %[[#Casted2]], 72
+; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16
; VK: OpFunction
; VK: %[[#FooVar:]] = OpVariable
@@ -29,18 +29,20 @@
; VK-NEXT: OpReturn
define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) {
%RoundedRangeKernel = alloca %tprange, align 8
- call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel)
%KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
- call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel)
ret void
}
; CL: OpFunction
; CL: %[[#BarVar:]] = OpVariable
-; CL-NEXT: OpLifetimeStart %[[#BarVar]], 0
+; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]]
+; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16
; CL-NEXT: OpBitcast
; CL-NEXT: OpInBoundsPtrAccessChain
-; CL-NEXT: OpLifetimeStop %[[#BarVar]], 0
+; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]]
+; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16
; VK: OpFunction
; VK: %[[#BarVar:]] = OpVariable
@@ -48,9 +50,9 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
; VK-NEXT: OpReturn
define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) {
%RoundedRangeKernel = alloca %tprange, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel)
%KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel)
ret void
}
@@ -66,12 +68,12 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
; VK-NEXT: OpReturn
define spir_func void @test(ptr noundef align 8 %_arg) {
%var = alloca i8, align 8
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %var)
+ call void @llvm.lifetime.start.p0(ptr nonnull %var)
%KernelFunc = getelementptr inbounds i8, ptr %var, i64 1
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %var)
+ call void @llvm.lifetime.end.p0(ptr nonnull %var)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/CodeGen/Thumb/scmp.ll b/llvm/test/CodeGen/Thumb/scmp.ll
index 661dbe9..c002449 100644
--- a/llvm/test/CodeGen/Thumb/scmp.ll
+++ b/llvm/test/CodeGen/Thumb/scmp.ll
@@ -1,151 +1,420 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1
+; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2
+; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M
define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
-; CHECK-LABEL: scmp_8_8:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_8:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB0_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB0_4
+; THUMB1-NEXT: .LBB0_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB0_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB0_2
+; THUMB1-NEXT: .LBB0_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_8_8:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_8:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i8 %x, i8 %y)
ret i8 %1
}
define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
-; CHECK-LABEL: scmp_8_16:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_16:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB1_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB1_4
+; THUMB1-NEXT: .LBB1_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB1_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB1_2
+; THUMB1-NEXT: .LBB1_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_8_16:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_16:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i16 %x, i16 %y)
ret i8 %1
}
define i8 @scmp_8_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scmp_8_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB2_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB2_4
+; THUMB1-NEXT: .LBB2_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB2_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB2_2
+; THUMB1-NEXT: .LBB2_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_8_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i32 %x, i32 %y)
ret i8 %1
}
define i8 @scmp_8_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scmp_8_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blt .LBB3_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB3_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blt .LBB3_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB3_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: scmp_8_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lt
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i64 %x, i64 %y)
ret i8 %1
}
define i8 @scmp_8_128(i128 %x, i128 %y) nounwind {
-; CHECK-LABEL: scmp_8_128:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: add.w lr, sp, #16
-; CHECK-NEXT: ldr r4, [sp, #28]
-; CHECK-NEXT: movs r5, #0
-; CHECK-NEXT: ldm.w lr, {r9, r12, lr}
-; CHECK-NEXT: subs.w r6, r0, r9
-; CHECK-NEXT: sbcs.w r6, r1, r12
-; CHECK-NEXT: sbcs.w r6, r2, lr
-; CHECK-NEXT: sbcs.w r6, r3, r4
-; CHECK-NEXT: mov.w r6, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r6, #1
-; CHECK-NEXT: subs.w r0, r9, r0
-; CHECK-NEXT: sbcs.w r0, r12, r1
-; CHECK-NEXT: sbcs.w r0, lr, r2
-; CHECK-NEXT: sbcs.w r0, r4, r3
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r5, #1
-; CHECK-NEXT: subs r0, r5, r6
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; THUMB1-LABEL: scmp_8_128:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: .pad #20
+; THUMB1-NEXT: sub sp, #20
+; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #1
+; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; THUMB1-NEXT: ldr r6, [sp, #52]
+; THUMB1-NEXT: add r7, sp, #40
+; THUMB1-NEXT: ldm r7, {r3, r5, r7}
+; THUMB1-NEXT: subs r4, r0, r3
+; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r1
+; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r5
+; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r2
+; THUMB1-NEXT: sbcs r4, r7
+; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r6
+; THUMB1-NEXT: mov r2, r1
+; THUMB1-NEXT: blt .LBB4_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_2:
+; THUMB1-NEXT: subs r0, r3, r0
+; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r5, r0
+; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r7, r0
+; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r6, r0
+; THUMB1-NEXT: blt .LBB4_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_4:
+; THUMB1-NEXT: subs r0, r1, r2
+; THUMB1-NEXT: add sp, #20
+; THUMB1-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; THUMB2-LABEL: scmp_8_128:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: push {r4, r5, r6, lr}
+; THUMB2-NEXT: add.w lr, sp, #16
+; THUMB2-NEXT: ldr r4, [sp, #28]
+; THUMB2-NEXT: movs r5, #0
+; THUMB2-NEXT: ldm.w lr, {r9, r12, lr}
+; THUMB2-NEXT: subs.w r6, r0, r9
+; THUMB2-NEXT: sbcs.w r6, r1, r12
+; THUMB2-NEXT: sbcs.w r6, r2, lr
+; THUMB2-NEXT: sbcs.w r6, r3, r4
+; THUMB2-NEXT: mov.w r6, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt r6, #1
+; THUMB2-NEXT: subs.w r0, r9, r0
+; THUMB2-NEXT: sbcs.w r0, r12, r1
+; THUMB2-NEXT: sbcs.w r0, lr, r2
+; THUMB2-NEXT: sbcs.w r0, r4, r3
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt r5, #1
+; THUMB2-NEXT: subs r0, r5, r6
+; THUMB2-NEXT: pop {r4, r5, r6, pc}
+;
+; V81M-LABEL: scmp_8_128:
+; V81M: @ %bb.0:
+; V81M-NEXT: .save {r4, r5, r6, lr}
+; V81M-NEXT: push {r4, r5, r6, lr}
+; V81M-NEXT: ldrd r5, r4, [sp, #16]
+; V81M-NEXT: ldrd lr, r12, [sp, #24]
+; V81M-NEXT: subs r6, r0, r5
+; V81M-NEXT: sbcs.w r6, r1, r4
+; V81M-NEXT: sbcs.w r6, r2, lr
+; V81M-NEXT: sbcs.w r6, r3, r12
+; V81M-NEXT: cset r6, lt
+; V81M-NEXT: subs r0, r5, r0
+; V81M-NEXT: sbcs.w r0, r4, r1
+; V81M-NEXT: sbcs.w r0, lr, r2
+; V81M-NEXT: sbcs.w r0, r12, r3
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: subs r0, r0, r6
+; V81M-NEXT: pop {r4, r5, r6, pc}
%1 = call i8 @llvm.scmp(i128 %x, i128 %y)
ret i8 %1
}
define i32 @scmp_32_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scmp_32_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_32_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB5_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB5_4
+; THUMB1-NEXT: .LBB5_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB5_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB5_2
+; THUMB1-NEXT: .LBB5_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_32_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_32_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @scmp_32_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scmp_32_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_32_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blt .LBB6_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB6_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blt .LBB6_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB6_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: scmp_32_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_32_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lt
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.scmp(i64 %x, i64 %y)
ret i32 %1
}
define i64 @scmp_64_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scmp_64_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: asrs r1, r0, #31
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_64_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blt .LBB7_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB7_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blt .LBB7_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB7_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: asrs r1, r0, #31
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: scmp_64_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: asrs r1, r0, #31
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_64_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lt
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: asrs r1, r0, #31
+; V81M-NEXT: bx lr
%1 = call i64 @llvm.scmp(i64 %x, i64 %y)
ret i64 %1
}
diff --git a/llvm/test/CodeGen/Thumb/ucmp.ll b/llvm/test/CodeGen/Thumb/ucmp.ll
index 7e6d0a3..5d0f57e 100644
--- a/llvm/test/CodeGen/Thumb/ucmp.ll
+++ b/llvm/test/CodeGen/Thumb/ucmp.ll
@@ -1,151 +1,376 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1
+; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2
+; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M
define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
-; CHECK-LABEL: ucmp_8_8:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_8:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_8_8:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_8:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
}
define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
-; CHECK-LABEL: ucmp_8_16:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_16:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_8_16:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_16:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
}
define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: ucmp_8_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_8_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
}
define i8 @ucmp_8_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: ucmp_8_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blo .LBB3_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB3_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blo .LBB3_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB3_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: ucmp_8_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lo
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
ret i8 %1
}
define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind {
-; CHECK-LABEL: ucmp_8_128:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: add.w lr, sp, #16
-; CHECK-NEXT: ldr r4, [sp, #28]
-; CHECK-NEXT: movs r5, #0
-; CHECK-NEXT: ldm.w lr, {r9, r12, lr}
-; CHECK-NEXT: subs.w r6, r0, r9
-; CHECK-NEXT: sbcs.w r6, r1, r12
-; CHECK-NEXT: sbcs.w r6, r2, lr
-; CHECK-NEXT: sbcs.w r6, r3, r4
-; CHECK-NEXT: mov.w r6, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r6, #1
-; CHECK-NEXT: subs.w r0, r9, r0
-; CHECK-NEXT: sbcs.w r0, r12, r1
-; CHECK-NEXT: sbcs.w r0, lr, r2
-; CHECK-NEXT: sbcs.w r0, r4, r3
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r5, #1
-; CHECK-NEXT: subs r0, r5, r6
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; THUMB1-LABEL: ucmp_8_128:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: .pad #20
+; THUMB1-NEXT: sub sp, #20
+; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #1
+; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; THUMB1-NEXT: ldr r6, [sp, #52]
+; THUMB1-NEXT: add r7, sp, #40
+; THUMB1-NEXT: ldm r7, {r3, r5, r7}
+; THUMB1-NEXT: subs r4, r0, r3
+; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r1
+; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r5
+; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r2
+; THUMB1-NEXT: sbcs r4, r7
+; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r6
+; THUMB1-NEXT: mov r2, r1
+; THUMB1-NEXT: blo .LBB4_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_2:
+; THUMB1-NEXT: subs r0, r3, r0
+; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r5, r0
+; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r7, r0
+; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r6, r0
+; THUMB1-NEXT: blo .LBB4_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_4:
+; THUMB1-NEXT: subs r0, r1, r2
+; THUMB1-NEXT: add sp, #20
+; THUMB1-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; THUMB2-LABEL: ucmp_8_128:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: push {r4, r5, r6, lr}
+; THUMB2-NEXT: add.w lr, sp, #16
+; THUMB2-NEXT: ldr r4, [sp, #28]
+; THUMB2-NEXT: movs r5, #0
+; THUMB2-NEXT: ldm.w lr, {r9, r12, lr}
+; THUMB2-NEXT: subs.w r6, r0, r9
+; THUMB2-NEXT: sbcs.w r6, r1, r12
+; THUMB2-NEXT: sbcs.w r6, r2, lr
+; THUMB2-NEXT: sbcs.w r6, r3, r4
+; THUMB2-NEXT: mov.w r6, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo r6, #1
+; THUMB2-NEXT: subs.w r0, r9, r0
+; THUMB2-NEXT: sbcs.w r0, r12, r1
+; THUMB2-NEXT: sbcs.w r0, lr, r2
+; THUMB2-NEXT: sbcs.w r0, r4, r3
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo r5, #1
+; THUMB2-NEXT: subs r0, r5, r6
+; THUMB2-NEXT: pop {r4, r5, r6, pc}
+;
+; V81M-LABEL: ucmp_8_128:
+; V81M: @ %bb.0:
+; V81M-NEXT: .save {r4, r5, r6, lr}
+; V81M-NEXT: push {r4, r5, r6, lr}
+; V81M-NEXT: ldrd r5, r4, [sp, #16]
+; V81M-NEXT: ldrd lr, r12, [sp, #24]
+; V81M-NEXT: subs r6, r0, r5
+; V81M-NEXT: sbcs.w r6, r1, r4
+; V81M-NEXT: sbcs.w r6, r2, lr
+; V81M-NEXT: sbcs.w r6, r3, r12
+; V81M-NEXT: cset r6, lo
+; V81M-NEXT: subs r0, r5, r0
+; V81M-NEXT: sbcs.w r0, r4, r1
+; V81M-NEXT: sbcs.w r0, lr, r2
+; V81M-NEXT: sbcs.w r0, r12, r3
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: subs r0, r0, r6
+; V81M-NEXT: pop {r4, r5, r6, pc}
%1 = call i8 @llvm.ucmp(i128 %x, i128 %y)
ret i8 %1
}
define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: ucmp_32_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_32_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_32_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_32_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @ucmp_32_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: ucmp_32_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_32_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blo .LBB6_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB6_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blo .LBB6_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB6_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: ucmp_32_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_32_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lo
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
ret i32 %1
}
define i64 @ucmp_64_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: ucmp_64_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: asrs r1, r0, #31
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_64_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blo .LBB7_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB7_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blo .LBB7_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB7_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: asrs r1, r0, #31
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: ucmp_64_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: asrs r1, r0, #31
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_64_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lo
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: asrs r1, r0, #31
+; V81M-NEXT: bx lr
%1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
ret i64 %1
}
diff --git a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll
index a27650f..7a90d28 100644
--- a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll
+++ b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll
@@ -37,52 +37,52 @@ define hidden void @copy(ptr noundef %va) {
; CHECK-NEXT: %va.addr = alloca ptr, align 4
; CHECK-NEXT: %cp = alloca ptr, align 4
; CHECK-NEXT: store ptr %va, ptr %va.addr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %cp)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp, ptr %va.addr, i32 4, i1 false)
; CHECK-NEXT: %0 = load ptr, ptr %cp, align 4
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %cp)
; CHECK-NEXT: ret void
;
entry:
%va.addr = alloca ptr, align 4
%cp = alloca ptr, align 4
store ptr %va, ptr %va.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %cp)
call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr)
%0 = load ptr, ptr %cp, align 4
call void @valist(ptr noundef %0)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %cp)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.va_copy.p0(ptr, ptr)
declare void @valist(ptr noundef)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define hidden void @start_once(...) {
; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %s = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s)
; CHECK-NEXT: store ptr %varargs, ptr %s, align 4
; CHECK-NEXT: %0 = load ptr, ptr %s, align 4
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s)
; CHECK-NEXT: ret void
;
entry:
%s = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
call void @llvm.va_start.p0(ptr nonnull %s)
%0 = load ptr, ptr %s, align 4
call void @valist(ptr noundef %0)
call void @llvm.va_end.p0(ptr %s)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
ret void
}
@@ -95,23 +95,23 @@ define hidden void @start_twice(...) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %s0 = alloca ptr, align 4
; CHECK-NEXT: %s1 = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s0)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s1)
; CHECK-NEXT: store ptr %varargs, ptr %s0, align 4
; CHECK-NEXT: %0 = load ptr, ptr %s0, align 4
; CHECK-NEXT: call void @valist(ptr noundef %0)
; CHECK-NEXT: store ptr %varargs, ptr %s1, align 4
; CHECK-NEXT: %1 = load ptr, ptr %s1, align 4
; CHECK-NEXT: call void @valist(ptr noundef %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s1)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s0)
; CHECK-NEXT: ret void
;
entry:
%s0 = alloca ptr, align 4
%s1 = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s1)
call void @llvm.va_start.p0(ptr nonnull %s0)
%0 = load ptr, ptr %s0, align 4
call void @valist(ptr noundef %0)
@@ -120,8 +120,8 @@ entry:
%1 = load ptr, ptr %s1, align 4
call void @valist(ptr noundef %1)
call void @llvm.va_end.p0(ptr %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s0)
ret void
}
@@ -129,11 +129,11 @@ define hidden void @single_i32(i32 noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -147,11 +147,11 @@ define hidden void @single_double(double noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr %0, align 8
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -163,11 +163,11 @@ define hidden void @single_v4f32(<4 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -179,11 +179,11 @@ define hidden void @single_v8f32(<8 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -195,11 +195,11 @@ define hidden void @single_v16f32(<16 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 64
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -211,11 +211,11 @@ define hidden void @single_v32f32(<32 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -227,13 +227,13 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store double %y, ptr %1, align 8
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -245,13 +245,13 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr %0, align 8
; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -265,13 +265,13 @@ define hidden void @i32_libcS(i32 noundef %x, ptr noundef byval(%struct.libcS) a
; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8
; CHECK-NEXT: %vararg_buffer = alloca %i32_libcS.vararg, align 16
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %y, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -285,13 +285,13 @@ define hidden void @libcS_i32(ptr noundef byval(%struct.libcS) align 8 %x, i32 n
; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8
; CHECK-NEXT: %vararg_buffer = alloca %libcS_i32.vararg, align 16
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store ptr %IndirectAlloca, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -303,13 +303,13 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <4 x float> %y, ptr %1, align 16
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -321,13 +321,13 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -339,13 +339,13 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <8 x float> %y, ptr %1, align 32
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -357,13 +357,13 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 36, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 36, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -375,13 +375,13 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 64
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <16 x float> %y, ptr %1, align 64
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -393,13 +393,13 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 64
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 68, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 68, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -411,13 +411,13 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <32 x float> %y, ptr %1, align 128
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -429,13 +429,13 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 132, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 132, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -448,11 +448,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 16
; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %1, align 4
; CHECK-NEXT: call void %0(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -468,11 +468,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) {
; CHECK-NEXT: %vararg_buffer = alloca %fptr_libcS.vararg, align 16
; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4
; CHECK-NEXT: call void %0(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll
index 0f968de..3264fe9 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll
@@ -18,7 +18,7 @@ define void @test_static() {
; CHECK-NEXT: i32 1, label %[[ENTRY_SPLIT_SPLIT:.*]]
; CHECK-NEXT: ]
; CHECK: [[ENTRY_SPLIT]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: call void @__wasm_setjmp(ptr @buf, i32 1, ptr [[FUNCTIONINVOCATIONID]])
; CHECK-NEXT: br label %[[ENTRY_SPLIT_SPLIT]]
; CHECK: [[ENTRY_SPLIT_SPLIT]]:
@@ -31,7 +31,7 @@ define void @test_static() {
; CHECK: [[_NOEXC:.*:]]
; CHECK-NEXT: ret void
; CHECK: [[ELSE]]:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: ret void
; CHECK: [[CATCH_DISPATCH_LONGJMP]]:
; CHECK-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch.longjmp] unwind to caller
@@ -53,7 +53,7 @@ define void @test_static() {
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
%call = call i32 @setjmp(ptr @buf) returns_twice
%cmp = icmp eq i32 %call, 0
br i1 %cmp, label %if, label %else
@@ -63,7 +63,7 @@ if:
ret void
else:
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
@@ -114,7 +114,7 @@ define void @test_dynamic(i32 %size) {
;
entry:
%x = alloca i32, i32 %size, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
%call = call i32 @setjmp(ptr @buf) returns_twice
%cmp = icmp eq i32 %call, 0
br i1 %cmp, label %if, label %else
@@ -124,6 +124,6 @@ if:
ret void
else:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
diff --git a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
index ea2453f..4fda253 100644
--- a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
+++ b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
@@ -31,7 +31,7 @@ define void @test_fpsig_return_i32(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -48,7 +48,7 @@ define void @test_fpsig_return_i64(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 0)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -65,7 +65,7 @@ define void @test_fpsig_return_f32(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float 0.)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -82,7 +82,7 @@ define void @test_fpsig_return_f64(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double 0.)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -100,7 +100,7 @@ define void @test_fpsig_param_i32(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double 0.)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -118,7 +118,7 @@ define void @test_fpsig_multiple_params_and_returns(ptr noundef %func) local_unn
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0, i64 0, float 0., double 0., token poison, i64 0, float 0., i64 0)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 poison, i64 poison, float poison, double poison, token poison, i64 poison, float poison, i64 poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -137,10 +137,26 @@ define void @test_fpsig_ptrs(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr null, token poison, ptr null, ptr null)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr poison, token poison, ptr poison, ptr poison)
tail call void @use(i32 noundef %res) #3
ret void
}
+define void @test_reference_types(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_reference_types:
+; CHK32: .functype test_reference_types (i32) -> ()
+; CHK64: .functype test_reference_types (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHK64-NEXT: i32.wrap_i64
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test (funcref, externref) -> (externref)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(10) poison, token poison, ptr addrspace(20) poison, ptr addrspace(10) poison)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
declare void @use(i32 noundef) local_unnamed_addr #1
diff --git a/llvm/test/CodeGen/WebAssembly/returned.ll b/llvm/test/CodeGen/WebAssembly/returned.ll
index aef75d8..bad9d60 100644
--- a/llvm/test/CodeGen/WebAssembly/returned.ll
+++ b/llvm/test/CodeGen/WebAssembly/returned.ll
@@ -99,8 +99,8 @@ define void @test() {
; CHECK-NEXT: return
entry:
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%ret = call ptr @returns_arg(ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll b/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll
new file mode 100644
index 0000000..f65d99d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=CHECK
+
+define i1 @ptrtoaddr_1(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: # kill: def $al killed $al killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i1
+ %ret = xor i1 %trunc, 1
+ ret i1 %ret
+}
+
+define i8 @ptrtoaddr_8(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notb %al
+; CHECK-NEXT: # kill: def $al killed $al killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i8
+ %ret = xor i8 %trunc, -1
+ ret i8 %ret
+}
+
+define i16 @ptrtoaddr_16(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notw %ax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i16
+ %ret = xor i16 %trunc, -1
+ ret i16 %ret
+}
+
+define i32 @ptrtoaddr_32(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notl %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i32
+ %ret = xor i32 %trunc, -1
+ ret i32 %ret
+}
+
+define i64 @ptrtoaddr_64(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notq %rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %ret = xor i64 %addr, -1
+ ret i64 %ret
+}
+
+define i128 @ptrtoaddr_128(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_128:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: notq %rax
+; CHECK-NEXT: notq %rdx
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %ext = zext i64 %addr to i128
+ %ret = xor i128 %ext, -1
+ ret i128 %ret
+}
+
+; TODO: Vector version cannot be handled by GlobalIsel yet (same error as ptrtoint: https://github.com/llvm/llvm-project/issues/150875).
+; define <2 x i64> @ptrtoaddr_vec(<2 x ptr> %p) {
+; entry:
+; %addr = ptrtoaddr <2 x ptr> %p to <2 x i64>
+; %ret = xor <2 x i64> %addr, <i64 -1, i64 -1>
+; ret <2 x i64> %ret
+;}
+
+; UTC_ARGS: --disable
+
+@foo = global [16 x i8] zeroinitializer
+@addr = global i64 ptrtoaddr (ptr @foo to i64)
+; CHECK: addr:
+; CHECK-NEXT: .quad foo
+; CHECK-NEXT: .size addr, 8
+@addr_plus_one = global i64 ptrtoaddr (ptr getelementptr (i8, ptr @foo, i64 1) to i64)
+; CHECK: addr_plus_one:
+; CHECK-NEXT: .quad foo+1
+; CHECK-NEXT: .size addr_plus_one, 8
+@const_addr = global i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64)
+; CHECK: const_addr:
+; CHECK-NEXT: .quad 0+1
+; CHECK-NEXT: .size const_addr, 8
diff --git a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll
index 2ca99bd..58dfd63 100644
--- a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll
+++ b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll
@@ -51,20 +51,20 @@ entry:
%sincos = tail call { float, float } @llvm.sincos.f32(float %in)
%sin = extractvalue { float, float } %sincos, 0
%cos = extractvalue { float, float } %sincos, 1
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed)
+ call void @llvm.lifetime.start.p0(ptr nonnull %computed)
store float %cos, ptr %computed, align 4
call void @use_ptr(ptr nonnull %computed)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %computed)
+ call void @llvm.lifetime.start.p0(ptr nonnull %computed1)
%fneg_sin = fneg float %sin
store float %fneg_sin, ptr %computed1, align 4
call void @use_ptr(ptr nonnull %computed1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed1)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %computed1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %computed3)
%fneg_cos = fneg float %cos
store float %fneg_cos, ptr %computed3, align 4
call void @use_ptr(ptr nonnull %computed3)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %computed3)
ret i32 0
}
diff --git a/llvm/test/CodeGen/X86/pr152630.ll b/llvm/test/CodeGen/X86/pr152630.ll
new file mode 100644
index 0000000..8fa9883
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr152630.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+define i32 @pr152630(i1 %cond) nounwind {
+; CHECK-LABEL: pr152630:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: decl %edi
+; CHECK-NEXT: cmpl $-1, %edi
+; CHECK-NEXT: je .LBB0_2
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: jne .LBB0_3
+; CHECK-NEXT: .LBB0_2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_3: # %if.else
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: retq
+entry:
+ %sel = select i1 %cond, i32 0, i32 -1
+ %conv = trunc nsw i32 %sel to i8
+ switch i8 %conv, label %if.else [
+ i8 -1, label %if.then
+ i8 0, label %if.then
+ ]
+
+if.then:
+ ret i32 0
+
+if.else:
+ ret i32 1
+}
diff --git a/llvm/test/CodeGen/X86/ptrtoaddr.ll b/llvm/test/CodeGen/X86/ptrtoaddr.ll
new file mode 100644
index 0000000..24bf9db
--- /dev/null
+++ b/llvm/test/CodeGen/X86/ptrtoaddr.ll
@@ -0,0 +1,113 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=CHECK
+
+define i1 @ptrtoaddr_1(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: # kill: def $al killed $al killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i1
+ %ret = xor i1 %trunc, 1
+ ret i1 %ret
+}
+
+define i8 @ptrtoaddr_8(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notb %al
+; CHECK-NEXT: # kill: def $al killed $al killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i8
+ %ret = xor i8 %trunc, -1
+ ret i8 %ret
+}
+
+define i16 @ptrtoaddr_16(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notl %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i16
+ %ret = xor i16 %trunc, -1
+ ret i16 %ret
+}
+
+define i32 @ptrtoaddr_32(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notl %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %trunc = trunc i64 %addr to i32
+ %ret = xor i32 %trunc, -1
+ ret i32 %ret
+}
+
+define i64 @ptrtoaddr_64(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notq %rax
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %ret = xor i64 %addr, -1
+ ret i64 %ret
+}
+
+define i128 @ptrtoaddr_128(ptr %p) {
+; CHECK-LABEL: ptrtoaddr_128:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: notq %rax
+; CHECK-NEXT: movq $-1, %rdx
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr ptr %p to i64
+ %ext = zext i64 %addr to i128
+ %ret = xor i128 %ext, -1
+ ret i128 %ret
+}
+
+
+define <2 x i64> @ptrtoaddr_vec(<2 x ptr> %p) {
+; CHECK-LABEL: ptrtoaddr_vec:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pxor %xmm1, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %addr = ptrtoaddr <2 x ptr> %p to <2 x i64>
+ %ret = xor <2 x i64> %addr, <i64 -1, i64 -1>
+ ret <2 x i64> %ret
+}
+
+; UTC_ARGS: --disable
+
+@foo = global [16 x i8] zeroinitializer
+@addr = global i64 ptrtoaddr (ptr @foo to i64)
+; CHECK: addr:
+; CHECK-NEXT: .quad foo
+; CHECK-NEXT: .size addr, 8
+@addr_plus_one = global i64 ptrtoaddr (ptr getelementptr (i8, ptr @foo, i64 1) to i64)
+; CHECK: addr_plus_one:
+; CHECK-NEXT: .quad foo+1
+; CHECK-NEXT: .size addr_plus_one, 8
+@const_addr = global i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64)
+; CHECK: const_addr:
+; CHECK-NEXT: .quad 0+1
+; CHECK-NEXT: .size const_addr, 8
diff --git a/llvm/test/CodeGen/X86/select-optimize.ll b/llvm/test/CodeGen/X86/select-optimize.ll
index c7cf9cb..6cb49f2 100644
--- a/llvm/test/CodeGen/X86/select-optimize.ll
+++ b/llvm/test/CodeGen/X86/select-optimize.ll
@@ -233,7 +233,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) {
; CHECK-LABEL: @expensive_val_operand5(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: [[CMP_FROZEN:%.*]] = freeze i1 [[CMP:%.*]]
; CHECK-NEXT: br i1 [[CMP_FROZEN]], label [[SELECT_TRUE_SINK:%.*]], label [[SELECT_END:%.*]], !prof [[PROF18]]
; CHECK: select.true.sink:
@@ -245,7 +245,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) {
;
%a = alloca i32
%load = load i32, ptr %a, align 8
- call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
%x = add i32 %load, %b
%sel = select i1 %cmp, i32 %x, i32 %y, !prof !17
ret i32 %sel
@@ -520,7 +520,7 @@ for.body: ; preds = %for.body.preheader,
declare void @llvm.dbg.value(metadata, metadata, metadata)
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @free(ptr nocapture)
diff --git a/llvm/test/DebugInfo/KeyInstructions/debugify.ll b/llvm/test/DebugInfo/KeyInstructions/debugify.ll
index 551ae27..d3be513 100644
--- a/llvm/test/DebugInfo/KeyInstructions/debugify.ll
+++ b/llvm/test/DebugInfo/KeyInstructions/debugify.ll
@@ -1,10 +1,7 @@
; RUN: opt -passes=debugify --debugify-atoms -S -o - < %s \
; RUN: | FileCheck %s
-;; Mirrors llvm/test/DebugInfo/debugify.ll. Split out here because the
-;; test is only supported if LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS is enabled
-;; (which is a condition for running this test directory). Once the conditional
-;; compilation of the feature is removed this can be merged into the original.
+;; Mirrors llvm/test/DebugInfo/debugify.ll
; CHECK-LABEL: define void @foo
define void @foo() {
diff --git a/llvm/test/DebugInfo/KeyInstructions/lit.local.cfg b/llvm/test/DebugInfo/KeyInstructions/lit.local.cfg
deleted file mode 100644
index 482bd5c..0000000
--- a/llvm/test/DebugInfo/KeyInstructions/lit.local.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-if not config.has_key_instructions:
- config.unsupported = True
diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
index 3685d8d..aad8940 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
@@ -8,9 +8,6 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
-
define i32 @basic_test(i64 %i) sanitize_address {
; CHECK-LABEL: define i32 @basic_test(
@@ -22,7 +19,7 @@ entry:
; CHECK-UAS: store i64 -868082052615769615, ptr %{{[0-9]+}}
; CHECK-UAS-SS-NOT: store i64
- call void @llvm.lifetime.start.p0(i64 2, ptr %c)
+ call void @llvm.lifetime.start.p0(ptr %c)
; Memory is unpoisoned at llvm.lifetime.start: 01
; CHECK-UAS: store i8 2, ptr %{{[0-9]+}}
@@ -30,7 +27,7 @@ entry:
store volatile i32 0, ptr %retval
store volatile i8 0, ptr %ci, align 1
- call void @llvm.lifetime.end.p0(i64 2, ptr %c)
+ call void @llvm.lifetime.end.p0(ptr %c)
; Memory is poisoned at llvm.lifetime.end: F8
; CHECK-UAS: store i8 -8, ptr %{{[0-9]+}}
; CHECK-UAS-SS-NOT: store i8 -8,
diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
index 9594370..d1e0180 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
@@ -9,73 +9,10 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
-define void @lifetime_no_size(i64 %i) sanitize_address {
-; CHECK-LABEL: define void @lifetime_no_size(
-; CHECK-SAME: i64 [[I:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 32
-; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to ptr
-; CHECK-NEXT: store i64 1102416563, ptr [[TMP3]], align 8
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], 16
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
-; CHECK-NEXT: store i64 ptrtoint (ptr @lifetime_no_size to i64), ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP0]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 2147450880
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: store i64 -868083117767659023, ptr [[TMP11]], align 1
-; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP2]], i64 0, i64 [[I]]
-; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[AI]] to i64
-; CHECK-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP12]], 3
-; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP13]], 2147450880
-; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr [[TMP15]], align 1
-; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i8 [[TMP16]], 0
-; CHECK-NEXT: br i1 [[TMP17]], label %[[BB18:.*]], label %[[BB23:.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: [[BB18]]:
-; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP12]], 7
-; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i8
-; CHECK-NEXT: [[TMP21:%.*]] = icmp sge i8 [[TMP20]], [[TMP16]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB23]]
-; CHECK: [[BB22]]:
-; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP12]]) #[[ATTR4:[0-9]+]]
-; CHECK-NEXT: unreachable
-; CHECK: [[BB23]]:
-; CHECK-NEXT: store volatile i8 0, ptr [[AI]], align 4
-; CHECK-NEXT: store i64 1172321806, ptr [[TMP3]], align 8
-; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: store i64 0, ptr [[TMP25]], align 1
-; CHECK-NEXT: ret void
-;
-entry:
- %a = alloca [2 x i32], align 4
-
- ; Poison memory in prologue: 0xf3f3f300f1f1f1f1
-
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a)
- ; Check that lifetime with no size are ignored.
-
- %ai = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 %i
- store volatile i8 0, ptr %ai, align 4
-
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a)
- ; Check that lifetime with no size are ignored.
-
- ; Unpoison stack frame on exit.
- ret void
-}
-
; Generic case of lifetime analysis.
define void @lifetime() sanitize_address {
; CHECK-DEFAULT-LABEL: define void @lifetime(
-; CHECK-DEFAULT-SAME: ) #[[ATTR1]] {
+; CHECK-DEFAULT-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-DEFAULT-NEXT: [[TMP1:%.*]] = alloca i64, align 32
; CHECK-DEFAULT-NEXT: store i64 0, ptr [[TMP1]], align 8
; CHECK-DEFAULT-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
@@ -86,7 +23,7 @@ define void @lifetime() sanitize_address {
; CHECK-DEFAULT-NEXT: store i64 1102416563, ptr [[TMP5]], align 8
; CHECK-DEFAULT-NEXT: [[TMP6:%.*]] = add i64 [[TMP2]], 8
; CHECK-DEFAULT-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
-; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack.1 to i64), ptr [[TMP7]], align 8
+; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP7]], align 8
; CHECK-DEFAULT-NEXT: [[TMP8:%.*]] = add i64 [[TMP2]], 16
; CHECK-DEFAULT-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @lifetime to i64), ptr [[TMP9]], align 8
@@ -104,14 +41,14 @@ define void @lifetime() sanitize_address {
; CHECK-DEFAULT-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
; CHECK-DEFAULT-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
; CHECK-DEFAULT-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
-; CHECK-DEFAULT-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB27:.*]], !prof [[PROF1]]
+; CHECK-DEFAULT-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB27:.*]], !prof [[PROF1:![0-9]+]]
; CHECK-DEFAULT: [[BB22]]:
; CHECK-DEFAULT-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
; CHECK-DEFAULT-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
; CHECK-DEFAULT-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
; CHECK-DEFAULT-NEXT: br i1 [[TMP25]], label %[[BB26:.*]], label %[[BB27]]
; CHECK-DEFAULT: [[BB26]]:
-; CHECK-DEFAULT-NEXT: call void @__asan_report_store1(i64 [[TMP16]]) #[[ATTR4]]
+; CHECK-DEFAULT-NEXT: call void @__asan_report_store1(i64 [[TMP16]]) #[[ATTR4:[0-9]+]]
; CHECK-DEFAULT-NEXT: unreachable
; CHECK-DEFAULT: [[BB27]]:
; CHECK-DEFAULT-NEXT: store volatile i8 0, ptr [[TMP4]], align 1
@@ -182,7 +119,7 @@ define void @lifetime() sanitize_address {
; CHECK-DEFAULT-NEXT: ret void
;
; CHECK-NO-DYNAMIC-LABEL: define void @lifetime(
-; CHECK-NO-DYNAMIC-SAME: ) #[[ATTR1]] {
+; CHECK-NO-DYNAMIC-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NO-DYNAMIC-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
; CHECK-NO-DYNAMIC-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
; CHECK-NO-DYNAMIC-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 32
@@ -191,7 +128,7 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: store i64 1102416563, ptr [[TMP4]], align 8
; CHECK-NO-DYNAMIC-NEXT: [[TMP5:%.*]] = add i64 [[TMP1]], 8
; CHECK-NO-DYNAMIC-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
-; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack.1 to i64), ptr [[TMP6]], align 8
+; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP6]], align 8
; CHECK-NO-DYNAMIC-NEXT: [[TMP7:%.*]] = add i64 [[TMP1]], 16
; CHECK-NO-DYNAMIC-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @lifetime to i64), ptr [[TMP8]], align 8
@@ -209,14 +146,14 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
; CHECK-NO-DYNAMIC-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1
; CHECK-NO-DYNAMIC-NEXT: [[TMP20:%.*]] = icmp ne i8 [[TMP19]], 0
-; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP20]], label %[[BB21:.*]], label %[[BB26:.*]], !prof [[PROF1]]
+; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP20]], label %[[BB21:.*]], label %[[BB26:.*]], !prof [[PROF1:![0-9]+]]
; CHECK-NO-DYNAMIC: [[BB21]]:
; CHECK-NO-DYNAMIC-NEXT: [[TMP22:%.*]] = and i64 [[TMP15]], 7
; CHECK-NO-DYNAMIC-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP22]] to i8
; CHECK-NO-DYNAMIC-NEXT: [[TMP24:%.*]] = icmp sge i8 [[TMP23]], [[TMP19]]
; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP24]], label %[[BB25:.*]], label %[[BB26]]
; CHECK-NO-DYNAMIC: [[BB25]]:
-; CHECK-NO-DYNAMIC-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR4]]
+; CHECK-NO-DYNAMIC-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR4:[0-9]+]]
; CHECK-NO-DYNAMIC-NEXT: unreachable
; CHECK-NO-DYNAMIC: [[BB26]]:
; CHECK-NO-DYNAMIC-NEXT: store volatile i8 0, ptr [[TMP3]], align 1
@@ -227,7 +164,7 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr
; CHECK-NO-DYNAMIC-NEXT: store i8 -8, ptr [[TMP30]], align 1
; CHECK-NO-DYNAMIC-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16
-; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[ARR]])
+; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NO-DYNAMIC-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[ARR]] to i64
; CHECK-NO-DYNAMIC-NEXT: [[TMP32:%.*]] = lshr i64 [[TMP31]], 3
; CHECK-NO-DYNAMIC-NEXT: [[TMP33:%.*]] = add i64 [[TMP32]], 2147450880
@@ -245,7 +182,7 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: unreachable
; CHECK-NO-DYNAMIC: [[BB42]]:
; CHECK-NO-DYNAMIC-NEXT: store volatile i8 0, ptr [[ARR]], align 1
-; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[ARR]])
+; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NO-DYNAMIC-NEXT: [[TMP43:%.*]] = add i64 [[TMP10]], 4
; CHECK-NO-DYNAMIC-NEXT: [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr
; CHECK-NO-DYNAMIC-NEXT: store i8 4, ptr [[TMP44]], align 1
@@ -318,8 +255,8 @@ define void @zero_sized(i64 %a) #0 {
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[B:%.*]] = alloca [0 x i8], align 1
; CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 0, ptr [[B]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 0, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll b/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll
index 481e780..07b28f4 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll
@@ -10,9 +10,9 @@ declare void @foo(ptr writeonly) memory(argmem: write)
define void @bar() sanitize_address {
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
call void @foo(ptr %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll
index ac5d8b8..37b280c 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll
@@ -10,8 +10,8 @@ target triple = "riscv64-unknown-linux"
declare void @mayFail(ptr %x) sanitize_hwaddress
declare void @onExcept(ptr %x) sanitize_hwaddress
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @__gxx_personality_v0(...)
define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
@@ -46,7 +46,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[X_HWASAN:%.*]] = inttoptr i64 [[TMP19]] to ptr
; CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP15]] to i8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], 72057594037927935
@@ -65,7 +65,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP29]], 4
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP30]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP31]], i8 [[TMP27]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: ret void
; CHECK: lpad:
; CHECK-NEXT: [[TMP32:%.*]] = landingpad { ptr, i32 }
@@ -81,7 +81,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP38:%.*]] = lshr i64 [[TMP37]], 4
; CHECK-NEXT: [[TMP39:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP38]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP39]], i8 [[TMP35]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br label [[EH_RESUME:%.*]]
; CHECK: eh.resume:
; CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
@@ -94,12 +94,12 @@ entry:
%x = alloca i32, align 8
%exn.slot = alloca ptr, align 8
%ehselector.slot = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
invoke void @mayFail(ptr %x) to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
lpad: ; preds = %entry
@@ -111,7 +111,7 @@ lpad: ; preds = %entry
%2 = extractvalue { ptr, i32 } %0, 1
store i32 %2, ptr %ehselector.slot, align 4
call void @onExcept(ptr %x) #18
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br label %eh.resume
eh.resume: ; preds = %lpad
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll
index db78c1f..d2949bf 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll
@@ -78,13 +78,13 @@ sw.bb1: ; preds = %entry
br label %return
while.body: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf) #10
store ptr %buf, ptr @stackbuf, align 8
; may_jump may call longjmp, going back to the switch (and then the return),
; bypassing the lifetime.end. This is why we need to untag on the return,
; rather than the lifetime.end.
call void @may_jump()
- call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf) #10
br label %return
return: ; preds = %entry, %while.body, %sw.bb1
@@ -94,5 +94,5 @@ return: ; preds = %entry, %while.body,
declare i32 @setjmp(ptr noundef) returns_twice
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll
index 292a565..ef86e63 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll
@@ -109,7 +109,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP5]], 57
; CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP7]], [[TMP8]]
; CHECK-NEXT: [[BUF_SROA_0_HWASAN:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUF_SROA_0]])
; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i8
; CHECK-NEXT: call void @__hwasan_tag_memory(ptr [[BUF_SROA_0]], i8 [[TMP10]], i64 16)
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[BUF_SROA_0_HWASAN]] to i64
@@ -117,7 +117,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; CHECK-NEXT: store volatile i8 0, ptr [[BUF_SROA_0_HWASAN]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; CHECK-NEXT: call void @__hwasan_tag_memory(ptr [[BUF_SROA_0]], i8 [[TMP12]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUF_SROA_0]])
; CHECK-NEXT: ret i32 0
;
; INLINE-LABEL: define i32 @test_simple
@@ -150,7 +150,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; INLINE-NEXT: [[TMP19:%.*]] = shl i64 [[TMP16]], 57
; INLINE-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]]
; INLINE-NEXT: [[BUF_SROA_0_HWASAN:%.*]] = inttoptr i64 [[TMP20]] to ptr
-; INLINE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; INLINE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUF_SROA_0]])
; INLINE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP16]] to i8
; INLINE-NEXT: [[TMP22:%.*]] = ptrtoint ptr [[BUF_SROA_0]] to i64
; INLINE-NEXT: [[TMP23:%.*]] = and i64 [[TMP22]], -9079256848778919937
@@ -197,19 +197,19 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; INLINE-NEXT: [[TMP54:%.*]] = lshr i64 [[TMP53]], 4
; INLINE-NEXT: [[TMP55:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP54]]
; INLINE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP55]], i8 [[TMP51]], i64 1, i1 false)
-; INLINE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; INLINE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUF_SROA_0]])
; INLINE-NEXT: ret i32 0
;
entry:
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll b/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll
index 3e13eb4..f2ba94c 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll
@@ -10,8 +10,8 @@ target triple = "aarch64--linux-android"
declare void @mayFail(ptr %x) sanitize_hwaddress
declare void @onExcept(ptr %x) sanitize_hwaddress
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @__gxx_personality_v0(...)
define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
@@ -48,7 +48,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[X_HWASAN:%.*]] = inttoptr i64 [[TMP21]] to ptr
; CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP17]] to i8
; CHECK-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = and i64 [[TMP23]], 72057594037927935
@@ -64,7 +64,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP29]], 4
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP30]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP31]], i8 [[TMP27]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: ret void
; CHECK: lpad:
; CHECK-NEXT: [[TMP32:%.*]] = landingpad { ptr, i32 }
@@ -82,7 +82,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP39:%.*]] = lshr i64 [[TMP38]], 4
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP39]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP40]], i8 [[TMP36]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br label [[EH_RESUME:%.*]]
; CHECK: eh.resume:
; CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
@@ -95,12 +95,12 @@ entry:
%x = alloca i32, align 8
%exn.slot = alloca ptr, align 8
%ehselector.slot = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
invoke void @mayFail(ptr %x) to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
lpad: ; preds = %entry
@@ -112,7 +112,7 @@ lpad: ; preds = %entry
%2 = extractvalue { ptr, i32 } %0, 1
store i32 %2, ptr %ehselector.slot, align 4
call void @onExcept(ptr %x) #18
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br label %eh.resume
eh.resume: ; preds = %lpad
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll
index ae6fe57..a40d964 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll
@@ -16,22 +16,22 @@ define i32 @myCall_w2(i32 %in) sanitize_hwaddress {
entry:
%a = alloca [17 x ptr], align 8
%a2 = alloca [16 x ptr], align 8
- call void @llvm.lifetime.start.p0(i64 136, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%t1 = call i32 @foo(i32 %in, ptr %a)
%t2 = call i32 @foo(i32 %in, ptr %a)
- call void @llvm.lifetime.end.p0(i64 136, ptr %a)
- call void @llvm.lifetime.start.p0(i64 128, ptr %a2)
+ call void @llvm.lifetime.end.p0(ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a2)
%t3 = call i32 @foo(i32 %in, ptr %a2)
%t4 = call i32 @foo(i32 %in, ptr %a2)
- call void @llvm.lifetime.end.p0(i64 128, ptr %a2)
+ call void @llvm.lifetime.end.p0(ptr %a2)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
ret i32 %t7
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @foo(i32, ptr)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll
index 60af551..a76566b 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll
@@ -24,9 +24,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_simple
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_simple
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -43,9 +43,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_cmpxchg
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_cmpxchg
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = cmpxchg ptr %buf.sroa.0, i8 1, i8 2 monotonic monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -62,9 +62,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_atomicrwm
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_atomicrwm
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = atomicrmw add ptr %buf.sroa.0, i8 1 monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -82,9 +82,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_use
%buf.sroa.0 = alloca i8, align 4
call void @use(ptr nonnull %buf.sroa.0)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -101,9 +101,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_in_range
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_in_range
%buf.sroa.0 = alloca [10 x i8], align 4
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -121,9 +121,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_in_range2
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -194,9 +194,9 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -213,9 +213,9 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range2
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = cmpxchg ptr %ptr, i8 1, i8 2 monotonic monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -268,11 +268,11 @@ entry:
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9
%buf.sroa.1 = alloca [10 x i8], align 4
%ptr1 = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.1)
call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %ptr1, i32 1, i1 true)
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.1)
ret i32 0
}
@@ -289,31 +289,9 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range6
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = atomicrmw add ptr %ptr, i32 1 monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
- ret i32 0
-}
-
-; Check an alloca with potentially out of range GEP to ensure it gets a tag and
-; check.
-define i32 @test_potentially_out_of_range(ptr %a) sanitize_hwaddress {
-entry:
- ; CHECK-LABEL: @test_potentially_out_of_range
- ; NOSAFETY: call {{.*}}__hwasan_generate_tag
- ; NOSAFETY: call {{.*}}__hwasan_store
- ; SAFETY: call {{.*}}__hwasan_generate_tag
- ; SAFETY: call {{.*}}__hwasan_store
- ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag
- ; NOSTACK-NOT: call {{.*}}__hwasan_store
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_potentially_out_of_range
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_potentially_out_of_range
- %buf.sroa.0 = alloca [10 x i8], align 4
- %off = call i32 @getoffset()
- %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 %off
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %ptr)
- store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %ptr)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -333,26 +311,6 @@ entry:
call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %a, i32 1, i1 true)
ret i32 0
}
-; Check an alloca with potentially out of range GEP to ensure it gets a tag and
-; check.
-define i32 @test_unclear(ptr %a) sanitize_hwaddress {
-entry:
- ; CHECK-LABEL: @test_unclear
- ; NOSAFETY: call {{.*}}__hwasan_generate_tag
- ; NOSAFETY: call {{.*}}__hwasan_store
- ; SAFETY: call {{.*}}__hwasan_generate_tag
- ; SAFETY: call {{.*}}__hwasan_store
- ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag
- ; NOSTACK: call {{.*}}__hwasan_store
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_unclear
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_unclear
- %buf.sroa.0 = alloca i8, align 4
- %ptr = call ptr @getptr(ptr %buf.sroa.0)
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %ptr)
- store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %ptr)
- ret i32 0
-}
define i32 @test_select(ptr %a) sanitize_hwaddress {
entry:
@@ -367,11 +325,11 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_select
%x = call ptr @getptr(ptr %a)
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%c = call i1 @cond()
%ptr = select i1 %c, ptr %x, ptr %buf.sroa.0
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -388,10 +346,10 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_retptr
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_retptr
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%ptr = call ptr @retptr(ptr %buf.sroa.0)
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -408,17 +366,17 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_lifetime_poison
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_lifetime_poison
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr poison)
+ call void @llvm.lifetime.start.p0(ptr poison)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr poison)
+ call void @llvm.lifetime.end.p0(ptr poison)
ret i32 0
}
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
index 57d37ca..af6411a 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
@@ -79,13 +79,13 @@ sw.bb1: ; preds = %entry
br label %return
while.body: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf) #10
store ptr %buf, ptr @stackbuf, align 8
; may_jump may call longjmp, going back to the switch (and then the return),
; bypassing the lifetime.end. This is why we need to untag on the return,
; rather than the lifetime.end.
call void @may_jump()
- call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf) #10
br label %return
return: ; preds = %entry, %while.body, %sw.bb1
@@ -95,5 +95,5 @@ return: ; preds = %entry, %while.body,
declare i32 @setjmp(ptr noundef) returns_twice
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
index e30b518..cfded02 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
@@ -26,13 +26,13 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
; X86-SCOPE-NEXT: br label [[TMP11:%.*]]
; X86-SCOPE: 11:
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
; X86-SCOPE-NEXT: [[TMP13:%.*]] = tail call i1 (...) @cond()
; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br i1 [[TMP13]], label [[TMP15:%.*]], label [[TMP11]]
; X86-SCOPE: 15:
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -96,7 +96,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SCOPE: 25:
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -110,7 +110,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4
; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br i1 [[TMP31]], label [[TMP37:%.*]], label [[TMP25]]
; AARCH64-SCOPE: 37:
; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -198,7 +198,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SHORT-SCOPE: 25:
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -215,7 +215,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP33]], label [[TMP39:%.*]], label [[TMP25]]
; AARCH64-SHORT-SCOPE: 39:
; AARCH64-SHORT-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -279,10 +279,10 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
2: ; preds = %2, %0
; We should tag the memory after the br (in the loop).
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%3 = tail call i1 (...) @cond() #2
; We should tag the memory before the next br (before the jump back).
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br i1 %3, label %4, label %2
4: ; preds = %2
@@ -307,13 +307,13 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
; X86-SCOPE-NEXT: br label [[TMP11:%.*]]
; X86-SCOPE: 11:
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
; X86-SCOPE-NEXT: [[TMP13:%.*]] = tail call i1 (...) @cond()
; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br i1 [[TMP13]], label [[TMP15:%.*]], label [[TMP11]]
; X86-SCOPE: 15:
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -377,7 +377,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SCOPE: 25:
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -391,7 +391,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4
; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br i1 [[TMP31]], label [[TMP37:%.*]], label [[TMP25]]
; AARCH64-SCOPE: 37:
; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -479,7 +479,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SHORT-SCOPE: 25:
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -496,7 +496,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP33]], label [[TMP39:%.*]], label [[TMP25]]
; AARCH64-SHORT-SCOPE: 39:
; AARCH64-SHORT-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -560,10 +560,10 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
2: ; preds = %2, %0
; We should tag the memory after the br (in the loop).
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%3 = tail call i1 (...) @cond() #2
; We should tag the memory before the next br (before the jump back).
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br i1 %3, label %4, label %2
4: ; preds = %2
@@ -809,12 +809,12 @@ define dso_local i32 @multiple_lifetimes() local_unnamed_addr sanitize_hwaddress
%1 = alloca i8, align 1
; We erase lifetime markers if we insert instrumentation outside of the
; lifetime.
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret i32 0
}
@@ -833,7 +833,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57
; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
; X86-SCOPE-NEXT: [[TMP12:%.*]] = tail call i1 (...) @cond()
@@ -906,7 +906,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1019,7 +1019,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1109,13 +1109,13 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0
;
%1 = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%2 = tail call i1 (...) @cond() #2
br i1 %2, label %3, label %4
3:
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret i32 0
4:
@@ -1137,7 +1137,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57
; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
; X86-SCOPE-NEXT: [[TMP12:%.*]] = tail call i1 (...) @cond()
@@ -1146,12 +1146,12 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br label [[TMP17:%.*]]
; X86-SCOPE: 15:
; X86-SCOPE-NEXT: [[TMP16:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP16]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br label [[TMP17]]
; X86-SCOPE: 17:
; X86-SCOPE-NEXT: ret i32 0
@@ -1214,7 +1214,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1231,7 +1231,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4
; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br label [[TMP43:%.*]]
; AARCH64-SCOPE: 37:
; AARCH64-SCOPE-NEXT: [[TMP38:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
@@ -1240,7 +1240,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP41:%.*]] = lshr i64 [[TMP40]], 4
; AARCH64-SCOPE-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP41]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP42]], i8 [[TMP38]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br label [[TMP43]]
; AARCH64-SCOPE: 43:
; AARCH64-SCOPE-NEXT: ret i32 0
@@ -1327,7 +1327,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1347,7 +1347,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP45:%.*]]
; AARCH64-SHORT-SCOPE: 39:
; AARCH64-SHORT-SCOPE-NEXT: [[TMP40:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
@@ -1356,7 +1356,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP43:%.*]] = lshr i64 [[TMP42]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP44:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP43]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP44]], i8 [[TMP40]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP45]]
; AARCH64-SHORT-SCOPE: 45:
; AARCH64-SHORT-SCOPE-NEXT: ret i32 0
@@ -1417,17 +1417,17 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0
;
%1 = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%2 = tail call i1 (...) @cond() #2
br i1 %2, label %3, label %4
3:
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br label %5
4:
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br label %5
5:
@@ -1439,7 +1439,7 @@ declare dso_local i1 @cond(...) local_unnamed_addr
declare dso_local void @use(ptr) local_unnamed_addr
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll
index 2189424..b64dfbf 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll
@@ -7,10 +7,10 @@ target triple = "aarch64-unknown-linux-gnu"
define i32 @foo(i32 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -45,7 +45,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
index 0bd0968..f3cceb7c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
@@ -7,10 +7,10 @@ target triple = "aarch64-unknown-linux-gnu"
define i32 @foo(i32 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -45,10 +45,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00,
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
index 9133b32..06a34ac 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -749,7 +749,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -809,26 +809,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
declare void @llvm.va_start(ptr) #4
declare void @llvm.va_end(ptr) #4
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
@@ -842,7 +842,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -902,16 +902,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -927,7 +927,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -987,16 +987,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1012,7 +1012,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1072,16 +1072,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1097,7 +1097,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1157,16 +1157,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1182,7 +1182,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1242,16 +1242,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1267,7 +1267,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1327,16 +1327,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1352,7 +1352,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1412,16 +1412,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1437,7 +1437,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1497,16 +1497,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1522,7 +1522,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1582,16 +1582,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1607,7 +1607,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1667,16 +1667,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1692,7 +1692,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1752,16 +1752,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
index 52f4901..e05018c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
index 23df3fc..e6d3a4b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
@@ -4,10 +4,10 @@ target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
target triple = "loongarch64-unknown-linux-gnu"
;; First, check allocation of the save area.
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: @foo
; CHECK: [[TMP1:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
@@ -17,10 +17,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP4]], i1 false)
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index 64a76c5..69a74a3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -5,10 +5,10 @@ target triple = "mips64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index 9f3127e..b19da8e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -5,10 +5,10 @@ target triple = "mips64el--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
index 05a88f0..4d47b02 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
index 971b25f..98294e7 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index 45e8b2d..9351067 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -5,10 +5,10 @@ target triple = "powerpc64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index d6b956c..4151f3b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -5,10 +5,10 @@ target triple = "powerpc64le--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
index 246db9d..29d1fbd 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 2147483647
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i32 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 [[TMP6]], i8 0, i32 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i32
; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 2147483647
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP8]] to ptr
@@ -50,15 +50,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP31:%.*]] = inttoptr i32 [[TMP30]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP17]], ptr align 4 [[TMP31]], i32 [[TMP21]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -67,10 +67,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
index 4a7b7b2..a4d2e16 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 2147483647
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i32 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 [[TMP6]], i8 0, i32 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i32
; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 2147483647
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP8]] to ptr
@@ -50,15 +50,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP31:%.*]] = inttoptr i32 [[TMP30]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP17]], ptr align 4 [[TMP31]], i32 [[TMP21]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -67,10 +67,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
index 50e7be1..0c6e75c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
index e0b5907..c340d15 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
@@ -4,17 +4,17 @@ target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"
target triple = "s390x-unknown-linux-gnu"
%struct.__va_list = type { i64, i64, ptr, ptr }
-declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
declare void @llvm.va_start(ptr)
declare void @llvm.va_end(ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(ptr)
define i64 @foo(i64 %guard, ...) #1 {
%vl = alloca %struct.__va_list
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i64 0
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
index 009aef9..91b21ea 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
@@ -7,10 +7,10 @@ target triple = "s390x-unknown-linux-gnu"
define i64 @foo(i64 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i64 0
}
@@ -28,10 +28,10 @@ define i64 @foo(i64 %guard, ...) {
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 160, i1 false)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 [[A]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare i32 @random_i32()
declare i64 @random_i64()
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
index 7a3f0dd..b61cb6a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
@@ -29,7 +29,7 @@ entry:
define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #2
call void @llvm.va_start(ptr nonnull %args)
%cmp9 = icmp sgt i32 %n, 0
br i1 %cmp9, label %for.body.lr.ph, label %for.end
@@ -85,13 +85,13 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar
for.end: ; preds = %vaarg.end, %entry
%sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ]
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #2
ret i32 %sum.0.lcssa
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind
declare void @llvm.va_start(ptr) #2
@@ -100,7 +100,7 @@ declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
index 2051015..4bc14da 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
@@ -551,7 +551,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -581,26 +581,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
declare void @llvm.va_start(ptr) #5
declare void @llvm.va_end(ptr) #5
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
@@ -614,7 +614,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -644,16 +644,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -669,7 +669,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -699,16 +699,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -724,7 +724,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -754,16 +754,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -779,7 +779,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -809,16 +809,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -834,7 +834,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -864,16 +864,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -889,7 +889,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -919,16 +919,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -944,7 +944,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -974,16 +974,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -999,7 +999,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1029,16 +1029,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1054,7 +1054,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1084,16 +1084,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1109,7 +1109,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1139,16 +1139,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1164,7 +1164,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1194,16 +1194,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
index 40ade5f..c05702b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
@@ -125,12 +125,12 @@ entry:
br label %another_bb
another_bb:
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 7, ptr %x
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 8, ptr %x
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -158,8 +158,10 @@ another_bb:
define void @lifetime_start_var(i64 %cnt) sanitize_memory {
entry:
%x = alloca i32, i64 %cnt, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -176,5 +178,5 @@ entry:
; CHECK: call void @llvm.lifetime.end
; CHECK: ret void
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
index b27ef5d..2745939 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
@@ -93,7 +93,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
@@ -165,7 +165,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ]
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
;
@@ -186,7 +186,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4
; ORIGIN-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false)
-; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; ORIGIN-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64
; ORIGIN-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649
; ORIGIN-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -266,7 +266,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN: [[FOR_END]]:
; ORIGIN-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ]
; ORIGIN-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; ORIGIN-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
; ORIGIN-NEXT: ret i32 [[SUM_0_LCSSA]]
@@ -288,7 +288,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4
; ORIGIN2-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false)
-; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; ORIGIN2-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64
; ORIGIN2-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649
; ORIGIN2-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -368,14 +368,14 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2: [[FOR_END]]:
; ORIGIN2-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ]
; ORIGIN2-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; ORIGIN2-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN2-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
; ORIGIN2-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #2
call void @llvm.va_start(ptr nonnull %args)
%cmp9 = icmp sgt i32 %n, 0
br i1 %cmp9, label %for.body.lr.ph, label %for.end
@@ -419,13 +419,13 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar
for.end: ; preds = %vaarg.end, %entry
%sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ]
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #2
ret i32 %sum.0.lcssa
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind
declare void @llvm.va_start(ptr) #2
@@ -434,7 +434,7 @@ declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
index aedefca..74a6276 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
@@ -562,7 +562,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -582,26 +582,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
declare void @llvm.va_start(ptr) #5
declare void @llvm.va_end(ptr) #5
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
@@ -614,7 +614,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -634,16 +634,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -658,7 +658,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -678,16 +678,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -702,7 +702,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -722,16 +722,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -746,7 +746,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -766,16 +766,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -790,7 +790,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -810,16 +810,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -834,7 +834,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -854,16 +854,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -878,7 +878,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -898,16 +898,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -922,7 +922,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -942,16 +942,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -966,7 +966,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -986,16 +986,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1010,7 +1010,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1030,16 +1030,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1054,7 +1054,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1074,16 +1074,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index f07f3ad..04fdd23 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -664,8 +664,8 @@ entry:
declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
-declare void @llvm.lifetime.start.p0(i64 immarg %0, ptr nocapture %1)
-declare void @llvm.lifetime.end.p0(i64 immarg %0, ptr nocapture %1)
+declare void @llvm.lifetime.start.p0(ptr nocapture %1)
+declare void @llvm.lifetime.end.p0(ptr nocapture %1)
declare void @foo8(ptr nocapture)
@@ -674,7 +674,7 @@ define void @msan() sanitize_memory {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TEXT]]), !dbg [[DBG7]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG7]]
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG7]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG7]]
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG7]]
@@ -685,13 +685,13 @@ define void @msan() sanitize_memory {
; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG7]]
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]]
; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG8]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TEXT]]), !dbg
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg
; CHECK-NEXT: ret void, !dbg
;
entry:
%text = alloca i8, align 1, !dbg !10
- call void @llvm.lifetime.start.p0(i64 1, ptr %text), !dbg !11
+ call void @llvm.lifetime.start.p0(ptr %text), !dbg !11
call void @foo8(ptr %text), !dbg !12
- call void @llvm.lifetime.end.p0(i64 1, ptr %text), !dbg !13
+ call void @llvm.lifetime.end.p0(ptr %text), !dbg !13
ret void, !dbg !14
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
index 2cc8fd6..5779367 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
@@ -12,14 +12,14 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local ptr @_Z1fv() local_unnamed_addr #0 {
entry:
%p = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %p)
+ call void @llvm.lifetime.start.p0(ptr nonnull %p)
%0 = load i8, ptr @flag, align 1
%tobool = icmp ne i8 %0, 0
%call = call zeroext i1 @_Z2f1PPvb(ptr nonnull %p, i1 zeroext %tobool)
%1 = load ptr, ptr %p, align 8
%2 = call ptr @llvm.launder.invariant.group.p0(ptr %1)
%retval.0 = select i1 %call, ptr %2, ptr null
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %p)
+ call void @llvm.lifetime.end.p0(ptr nonnull %p)
ret ptr %retval.0
}
@@ -29,8 +29,8 @@ declare dso_local zeroext i1 @_Z2f1PPvb(ptr, i1 zeroext) local_unnamed_addr
declare ptr @llvm.launder.invariant.group.p0(ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
attributes #0 = { sanitize_memory uwtable }
diff --git a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
index c53b006..deddecf 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
@@ -48,7 +48,7 @@ define void @alloca_lifetime_test(i1 %c) sanitize_type {
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[SHADOW_BASE]]
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 10, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: call void @alloca_test_use(ptr [[X]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], [[APP_MEM_MASK]]
@@ -56,7 +56,7 @@ define void @alloca_lifetime_test(i1 %c) sanitize_type {
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[SHADOW_BASE]]
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 10, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -66,9 +66,62 @@ entry:
br label %loop
loop:
- call void @llvm.lifetime.start.p0(i64 10, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
call void @alloca_test_use(ptr %x)
- call void @llvm.lifetime.end.p0(i64 10, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ br i1 %c, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+define void @dynamic_alloca_lifetime_test(i1 %c, i64 %n) sanitize_type {
+; CHECK-LABEL: @dynamic_alloca_lifetime_test(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[APP_MEM_MASK:%.*]] = load i64, ptr @__tysan_app_memory_mask, align 8
+; CHECK-NEXT: [[SHADOW_BASE:%.*]] = load i64, ptr @__tysan_shadow_memory_address, align 8
+; CHECK-NEXT: [[X:%.*]] = alloca i32, i64 [[N:%.*]], align 1
+; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[APP_MEM_MASK]]
+; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], [[SHADOW_BASE]]
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP0]], 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 [[TMP6]], i1 false)
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[X]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], [[APP_MEM_MASK]]
+; CHECK-NEXT: [[TMP10:%.*]] = shl i64 [[TMP9]], 3
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], [[SHADOW_BASE]]
+; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[TMP7]], 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP12]], i8 0, i64 [[TMP13]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
+; CHECK-NEXT: call void @alloca_test_use(ptr [[X]])
+; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[X]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], [[APP_MEM_MASK]]
+; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 3
+; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], [[SHADOW_BASE]]
+; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP14]], 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP19]], i8 0, i64 [[TMP20]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
+; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %x = alloca i32, i64 %n, align 1
+ br label %loop
+
+loop:
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @alloca_test_use(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %c, label %loop, label %exit
exit:
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s
new file mode 100644
index 0000000..1ea64de
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], s[0:1], v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], v[4:5], s[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] op_sel:[1,0,0] op_sel_hi:[0,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] op_sel:[1,0,0] op_sel_hi:[1,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], s[0:1], v[4:5] op_sel:[0,1,0] op_sel_hi:[0,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], v[4:5], s[0:1] op_sel:[0,0,1] op_sel_hi:[0,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], s[0:1], v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], s[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], s[0:1], v[0:1] op_sel:[1,0] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,1]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], s[0:1], v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], s[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], s[0:1], v[0:1] op_sel:[1,0] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,1]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], exec, v[0:1], v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], exec, v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], v[4:5], exec
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], exec, v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], exec
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], exec, v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], exec
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
diff --git a/llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s b/llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s
new file mode 100644
index 0000000..ff15008
--- /dev/null
+++ b/llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s
@@ -0,0 +1,20 @@
+# RUN: llvm-mc --triple=riscv32-unknown-none-elf %s -filetype=obj -o - \
+# RUN: | llvm-objdump -dr - \
+# RUN: | FileCheck %s
+# RUN: llvm-mc --triple=riscv64-unknown-none-elf %s -filetype=obj -o - \
+# RUN: | llvm-objdump -dr - \
+# RUN: | FileCheck %s
+
+
+ # CHECK: 00000013 nop
+ nop
+
+ # CHECK-NEXT: 55 55 55 55 .word 0x55555555
+ .word 0x55555555
+
+ # CHECK-NEXT: 00 00 00 00 .word 0x00000000
+ # CHECK-NEXT: R_RISCV_32 foo
+ .word foo
+
+ # CHECK-NEXT: 00000013 nop
+ nop
diff --git a/llvm/test/MC/ELF/many-instructions.s b/llvm/test/MC/ELF/many-instructions.s
index 843d35f..7c13c0d 100644
--- a/llvm/test/MC/ELF/many-instructions.s
+++ b/llvm/test/MC/ELF/many-instructions.s
@@ -1,4 +1,5 @@
-# REQUIRES: asserts
+## Checks the size of an internal MC structure that is different on 32-bit.
+# REQUIRES: asserts, llvm-64-bits
# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o /dev/null -debug-only=mc-dump 2>&1 | grep -E -o '[0-9]+ Data Size:[0-9]+' | FileCheck %s
## Test that encodeInstruction may cause a new fragment to be created.
diff --git a/llvm/test/MC/RISCV/Relocations/align-after-relax.s b/llvm/test/MC/RISCV/Relocations/align-after-relax.s
new file mode 100644
index 0000000..95bef51
--- /dev/null
+++ b/llvm/test/MC/RISCV/Relocations/align-after-relax.s
@@ -0,0 +1,50 @@
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax %s --defsym LATE=1 -o %t1
+# RUN: llvm-objdump -dr --no-show-raw-insn -M no-aliases %t1 | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax %s -o %t0
+# RUN: llvm-objdump -dr --no-show-raw-insn -M no-aliases %t0 | FileCheck %s --check-prefix=CHECK0
+
+# CHECK: 4: 00 00 01 00 .word 0x00010000
+# CHECK-EMPTY:
+# CHECK: 8: 78 56 34 12 .word 0x12345678
+# CHECK-NEXT: c: 00 00 00 00 .word 0x00000000
+# CHECK: 10: auipc ra, 0x0
+# CHECK-NEXT: R_RISCV_CALL_PLT foo
+# CHECK-NEXT: R_RISCV_RELAX *ABS*
+# CHECK: 18: c.nop
+# CHECK-NEXT: R_RISCV_ALIGN *ABS*+0x6
+
+## Alignment directives in a lower-numbered subsection may be conservatively treated as linker-relaxable.
+# CHECK0: 4: 00 00 01 00 .word 0x00010000
+# CHECK0-NEXT: 000000006: R_RISCV_ALIGN *ABS*+0x6
+# CHECK0-NEXT: 8: 13 00 00 00 .word 0x00000013
+# CHECK0: 14: auipc ra, 0x0
+# CHECK0: 1c: c.nop
+# CHECK0-NEXT: R_RISCV_ALIGN *ABS*+0x6
+
+.text 2
+.option push
+.option norelax
+## R_RISCV_ALIGN is required even if norelax, because it is after a linker-relaxable instruction.
+.balign 8
+l2:
+ .word 0x12345678
+.option pop
+
+.text 1
+ .org .+1
+ .org .+3
+.ifdef LATE
+ .org .+0
+.endif
+ call foo
+
+.text 0
+_start:
+ .space 6
+.option push
+.option norelax
+.balign 8
+l0:
+ .word 0x12345678
+.option pop
diff --git a/llvm/test/MC/RISCV/Relocations/align-norvc.s b/llvm/test/MC/RISCV/Relocations/align-norvc.s
new file mode 100644
index 0000000..c3fe71e
--- /dev/null
+++ b/llvm/test/MC/RISCV/Relocations/align-norvc.s
@@ -0,0 +1,23 @@
+## To ensure ALIGN relocations in norvc code can adapt to shrinking of preceding rvc code,
+## we generate $alignment-2 bytes of NOPs regardless of rvc.
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax %s -o %t
+# RUN: llvm-objdump -dr -M no-aliases %t | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax -riscv-align-rvc=0 %s -o %t0
+# RUN: llvm-objdump -dr -M no-aliases %t0 | FileCheck %s --check-prefix=CHECK0
+
+# CHECK: 00000000: R_RISCV_RELAX *ABS*
+# CHECK-NEXT: 4: 0001 <unknown>
+# CHECK-NEXT: 00000004: R_RISCV_ALIGN *ABS*+0x6
+# CHECK-NEXT: 6: 00000013 addi zero, zero, 0x0
+# CHECK-NEXT: a: 00000537 lui a0, 0x0
+
+# CHECK0: 00000000: R_RISCV_RELAX *ABS*
+# CHECK0-NEXT: 4: 00000013 addi zero, zero, 0x0
+# CHECK0-NEXT: 00000004: R_RISCV_ALIGN *ABS*+0x4
+# CHECK0-NEXT: 8: 00000537 lui a0, 0x0
+
+ lui a0, %hi(foo)
+ .option norvc
+.balign 8
+ lui a0, %hi(foo)
diff --git a/llvm/test/MC/RISCV/Relocations/mc-dump.s b/llvm/test/MC/RISCV/Relocations/mc-dump.s
index ddc0c7d..99d34b5 100644
--- a/llvm/test/MC/RISCV/Relocations/mc-dump.s
+++ b/llvm/test/MC/RISCV/Relocations/mc-dump.s
@@ -9,12 +9,12 @@
# CHECK-NEXT:0 Data LinkerRelaxable Size:8 [97,00,00,00,e7,80,00,00]
# CHECK-NEXT: Fixup @0 Value:specifier(19,ext) Kind:4023
# CHECK-NEXT: Symbol @0 $x
-# CHECK-NEXT:8 Align LinkerRelaxable Size:0+4 []
+# CHECK-NEXT:8 Align LinkerRelaxable Size:0+6 []
# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops
-# CHECK-NEXT: Fixup @0 Value:4 Kind:[[#]]
-# CHECK-NEXT:12 Align LinkerRelaxable Size:4+4 [13,05,30,00]
+# CHECK-NEXT: Fixup @0 Value:6 Kind:[[#]]
+# CHECK-NEXT:14 Align LinkerRelaxable Size:4+6 [13,05,30,00]
# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops
-# CHECK-NEXT: Fixup @4 Value:4 Kind:[[#]]
+# CHECK-NEXT: Fixup @4 Value:6 Kind:[[#]]
# CHECK-NEXT:]
call ext
diff --git a/llvm/test/MC/RISCV/align-option-relax.s b/llvm/test/MC/RISCV/align-option-relax.s
index 890e1e7..60cd55f 100644
--- a/llvm/test/MC/RISCV/align-option-relax.s
+++ b/llvm/test/MC/RISCV/align-option-relax.s
@@ -1,8 +1,21 @@
# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=-relax < %s \
# RUN: | llvm-readobj -r - | FileCheck %s
-# Check that .option relax overrides -mno-relax and enables R_RISCV_ALIGN
-# relocations.
-# CHECK: R_RISCV_ALIGN
- .option relax
- .align 4
+## .option relax overrides -mno-relax and enables R_RISCV_ALIGN/R_RISCV_RELAX relocations.
+# CHECK: .rela.text
+# CHECK: R_RISCV_CALL_PLT
+# CHECK-NEXT: R_RISCV_RELAX
+# CHECK-NEXT: R_RISCV_ALIGN
+.option relax
+call foo
+.align 4
+
+## Alignments before the first linker-relaxable instruction do not need relocations.
+# CHECK-NOT: .rela.text1
+.section .text1,"ax"
+.align 4
+nop
+
+# CHECK: .rela.text2
+.section .text2,"ax"
+call foo
diff --git a/llvm/test/MC/RISCV/align.s b/llvm/test/MC/RISCV/align.s
index da3b1aa..4d4d998 100644
--- a/llvm/test/MC/RISCV/align.s
+++ b/llvm/test/MC/RISCV/align.s
@@ -46,20 +46,21 @@
# type for .align N directive when linker relaxation enabled.
# Linker could satisfy alignment by removing NOPs after linker relaxation.
-# The first R_RISCV_ALIGN come from
-# MCELFStreamer::InitSections() emitCodeAlignment(getTextSectionAligntment()).
-# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x2
-# C-OR-ZCA-EXT-RELAX-INST: c.nop
test:
+## Start with a linker-relaxable instruction so that the following alignment can be relaxable.
+ call foo
+# NORELAX-RELOC: R_RISCV_CALL_PLT
+# C-OR-ZCA-EXT-NORELAX-RELOC: R_RISCV_CALL_PLT
+
.p2align 2
# If the +c extension is enabled, the text section will be 2-byte aligned, so
# one c.nop instruction is sufficient.
-# C-OR-ZCA-EXT-RELAX-RELOC-NOT: R_RISCV_ALIGN - 0x2
-# C-OR-ZCA-EXT-RELAX-INST-NOT: c.nop
+# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x2
+# C-OR-ZCA-EXT-RELAX-INST: c.nop
bne zero, a0, .LBB0_2
mv a0, zero
.p2align 3
-# RELAX-RELOC: R_RISCV_ALIGN - 0x4
+# RELAX-RELOC: R_RISCV_ALIGN - 0x6
# RELAX-INST: addi zero, zero, 0
# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x6
# C-OR-ZCA-EXT-RELAX-INST: c.nop
@@ -68,7 +69,7 @@ test:
add a0, a0, a1
.align 4
.LBB0_2:
-# RELAX-RELOC: R_RISCV_ALIGN - 0xC
+# RELAX-RELOC: R_RISCV_ALIGN - 0xE
# RELAX-INST: addi zero, zero, 0
# RELAX-INST: addi zero, zero, 0
# RELAX-INST: addi zero, zero, 0
@@ -84,7 +85,7 @@ test:
.p2align 3
.constant_pool:
.long 3126770193
-# RELAX-RELOC: R_RISCV_ALIGN - 0x4
+# RELAX-RELOC: R_RISCV_ALIGN - 0x6
# RELAX-INST: addi zero, zero, 0
# NORELAX-INST: addi zero, zero, 0
# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x6
@@ -136,16 +137,8 @@ data2:
add a0, a0, a1
## Branches crossing the linker-relaxable R_RISCV_ALIGN need relocations.
-# RELAX-RELOC: .rela.text3 {
-# RELAX-RELOC-NEXT: 0x4 R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x8 R_RISCV_ALIGN - 0x4
-# RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: }
-# C-OR-ZCA-EXT-RELAX-RELOC: .rela.text3 {
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: 0x4 R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: 0x8 R_RISCV_ALIGN - 0x4
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: }
+# RELAX-RELOC-NOT: .rela.text3 {
+# C-OR-ZCA-EXT-RELAX-RELOC-NOT: .rela.text3 {
.section .text3, "ax"
bnez t1, 1f
bnez t2, 2f
@@ -157,14 +150,15 @@ data2:
## .text3 with a call at the start
# NORELAX-RELOC: .rela.text3a
-# C-OR-ZCA-EXT-NORELAX-RELOC: .rela.text3a
# RELAX-RELOC: .rela.text3a {
# RELAX-RELOC-NEXT: 0x0 R_RISCV_CALL_PLT foo 0x0
# RELAX-RELOC-NEXT: 0x0 R_RISCV_RELAX - 0x0
# RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x10 R_RISCV_ALIGN - 0x4
-# RELAX-RELOC-NEXT: 0x14 R_RISCV_BRANCH .Ltmp[[#]] 0x0
+# RELAX-RELOC-NEXT: 0x10 R_RISCV_ALIGN - 0x6
+# RELAX-RELOC-NEXT: 0x16 R_RISCV_BRANCH .Ltmp[[#]] 0x0
# RELAX-RELOC-NEXT: }
+# C-OR-ZCA-EXT-NORELAX-RELOC: .rela.text3a
+# C-OR-ZCA-EXT-RELAX-RELOC: .rela.text3a
.section .text3a, "ax"
call foo
bnez t1, 1f
@@ -177,11 +171,8 @@ bnez t1, 2b
## .text3 with a call at the end
# RELAX-RELOC: .rela.text3b {
-# RELAX-RELOC-NEXT: 0x4 R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x8 R_RISCV_ALIGN - 0x4
-# RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x14 R_RISCV_CALL_PLT foo 0x0
-# RELAX-RELOC-NEXT: 0x14 R_RISCV_RELAX - 0x0
+# RELAX-RELOC-NEXT: 0x10 R_RISCV_CALL_PLT foo 0x0
+# RELAX-RELOC-NEXT: 0x10 R_RISCV_RELAX - 0x0
# RELAX-RELOC-NEXT: }
.section .text3b, "ax"
bnez t1, 1f
diff --git a/llvm/test/MC/RISCV/cfi-advance.s b/llvm/test/MC/RISCV/cfi-advance.s
index f3f8530..7643e01 100644
--- a/llvm/test/MC/RISCV/cfi-advance.s
+++ b/llvm/test/MC/RISCV/cfi-advance.s
@@ -7,7 +7,7 @@
# NORELAX: Relocation section '.rela.text1' at offset {{.*}} contains 1 entries:
# NORELAX-NEXT: Offset Info Type Sym. Value Symbol's Name + Addend
-# NORELAX-NEXT: 00000000 00000313 R_RISCV_CALL_PLT 00000004 .L0 + 0
+# NORELAX-NEXT: 00000000 00000313 R_RISCV_CALL_PLT 00000008 .L0 + 0
# NORELAX-EMPTY:
# RELAX: Relocation section '.rela.text1' at offset {{.*}} contains 2 entries:
# RELAX: R_RISCV_CALL_PLT
@@ -16,23 +16,25 @@
# NORELAX-NEXT: Relocation section '.rela.eh_frame' at offset {{.*}} contains 1 entries:
# NORELAX: Offset Info Type Sym. Value Symbol's Name + Addend
# NORELAX-NEXT: 0000001c 00000139 R_RISCV_32_PCREL 00000000 .L0 + 0
-# RELAX-NEXT: Relocation section '.rela.eh_frame' at offset {{.*}} contains 5 entries:
+# RELAX-NEXT: Relocation section '.rela.eh_frame' at offset {{.*}} contains 7 entries:
# RELAX: Offset Info Type Sym. Value Symbol's Name + Addend
# RELAX-NEXT: 0000001c 00000139 R_RISCV_32_PCREL 00000000 .L0 + 0
-# RELAX-NEXT: 00000020 00000c23 R_RISCV_ADD32 0001017a .L0 + 0
+# RELAX-NEXT: 00000020 00000d23 R_RISCV_ADD32 0001017c .L0 + 0
# RELAX-NEXT: 00000020 00000127 R_RISCV_SUB32 00000000 .L0 + 0
-# RELAX-NEXT: 00000035 00000b35 R_RISCV_SET6 00010176 .L0 + 0
-# RELAX-NEXT: 00000035 00000934 R_RISCV_SUB6 0001016e .L0 + 0
+# RELAX-NEXT: 00000026 00000536 R_RISCV_SET8 00000068 .L0 + 0
+# RELAX-NEXT: 00000026 00000125 R_RISCV_SUB8 00000000 .L0 + 0
+# RELAX-NEXT: 00000035 00000c35 R_RISCV_SET6 00010178 .L0 + 0
+# RELAX-NEXT: 00000035 00000a34 R_RISCV_SUB6 0001016e .L0 + 0
# CHECK-EMPTY:
-# NORELAX: Symbol table '.symtab' contains 13 entries:
-# RELAX: Symbol table '.symtab' contains 16 entries:
+# NORELAX: Symbol table '.symtab' contains 14 entries:
+# RELAX: Symbol table '.symtab' contains 18 entries:
# RELAX-NEXT: Num: Value Size Type Bind Vis Ndx Name
# RELAX-NEXT: 0: 00000000 0 NOTYPE LOCAL DEFAULT UND
# RELAX-NEXT: 1: 00000000 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
-# RELAX: 3: 00000004 0 NOTYPE LOCAL DEFAULT 2 .L0{{$}}
-# RELAX: 9: 0001016e 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
-# RELAX: 11: 00010176 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
-# RELAX: 12: 0001017a 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
+# RELAX: 3: 00000008 0 NOTYPE LOCAL DEFAULT 2 .L0{{$}}
+# RELAX: 10: 0001016e 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
+# RELAX: 12: 00010178 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
+# RELAX: 13: 0001017c 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
# CHECK-DWARFDUMP: DW_CFA_advance_loc1: 104
# CHECK-DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +8
@@ -48,11 +50,11 @@
.type test,@function
test:
.cfi_startproc
- nop
+ call foo
## This looks similar to fake label names ".L0 ". Even if this is ".L0 ",
## the assembler will not conflate it with fake labels.
.L0:
- .zero 100, 0x90
+ .zero 96, 0x90
.cfi_def_cfa_offset 8
nop
.zero 255, 0x90
diff --git a/llvm/test/MC/RISCV/large-instructions.s b/llvm/test/MC/RISCV/large-instructions.s
deleted file mode 100644
index b50dbde..0000000
--- a/llvm/test/MC/RISCV/large-instructions.s
+++ /dev/null
@@ -1,29 +0,0 @@
-# RUN: llvm-mc -filetype=obj -triple riscv32 < %s \
-# RUN: | llvm-objdump -d - | FileCheck %s
-
-# CHECK: 011f 4523 8967 <unknown>
-.byte 0x1f, 0x01, 0x23, 0x45, 0x67, 0x89
-
-# CHECK: 4523013f cdab8967 <unknown>
-.byte 0x3f, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd
-
-# CHECK: 007f 4523 8967 cdab feef <unknown>
-.byte 0x7f, 0x00, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe
-
-# CHECK: 4523107f cdab8967 badcfeef <unknown>
-.byte 0x7f, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba
-
-# CHECK: 207f 4523 8967 cdab feef badc 7698 <unknown>
-.byte 0x7f, 0x20, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76
-
-# CHECK: 4523307f cdab8967 badcfeef 32547698 <unknown>
-.byte 0x7f, 0x30, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32
-
-# CHECK: 407f 4523 8967 cdab feef badc 7698 3254 1210 <unknown>
-.byte 0x7f, 0x40, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12
-
-# CHECK: 4523507f cdab8967 badcfeef 32547698 56341210 <unknown>
-.byte 0x7f, 0x50, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56
-
-# CHECK: 607f 4523 8967 cdab feef badc 7698 3254 1210 5634 9a78 <unknown>
-.byte 0x7f, 0x60, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56, 0x78, 0x9a
diff --git a/llvm/test/MC/RISCV/large-instructions.test b/llvm/test/MC/RISCV/large-instructions.test
new file mode 100644
index 0000000..b8396a9
--- /dev/null
+++ b/llvm/test/MC/RISCV/large-instructions.test
@@ -0,0 +1,60 @@
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-objdump -d %t | FileCheck %s
+
+## This CHECKs objdump's handling of wide instruction encodings, and how it
+## groups the instruction bytes when disassembling.
+##
+## This is written in YAML because using `.byte` emits the wrong mapping
+## symbols.
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_RISCV
+ SectionHeaderStringTable: .strtab
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ AddressAlign: 0x1
+ ContentArray: [
+ # CHECK: 011f 4523 8967 <unknown>
+ 0x1f, 0x01, 0x23, 0x45, 0x67, 0x89,
+
+ # CHECK: 4523013f cdab8967 <unknown>
+ 0x3f, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd,
+
+ # CHECK: 007f 4523 8967 cdab feef <unknown>
+ 0x7f, 0x00, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe,
+
+ # CHECK: 4523107f cdab8967 badcfeef <unknown>
+ 0x7f, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
+
+ # CHECK: 207f 4523 8967 cdab feef badc 7698 <unknown>
+ 0x7f, 0x20, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+
+ # CHECK: 4523307f cdab8967 badcfeef 32547698 <unknown>
+ 0x7f, 0x30, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32,
+
+ # CHECK: 407f 4523 8967 cdab feef badc 7698 3254 1210 <unknown>
+ 0x7f, 0x40, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12,
+
+ # CHECK: 4523507f cdab8967 badcfeef 32547698 56341210 <unknown>
+ 0x7f, 0x50, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56,
+
+ # CHECK: 607f 4523 8967 cdab feef badc 7698 3254 1210 5634 9a78 <unknown>
+ 0x7f, 0x60, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56, 0x78, 0x9a,
+ ]
+
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .strtab
+ - Name: .symtab
+ - Name: .text
+Symbols:
+ - Name: "$x"
+ Section: .text
+ Value: 0x0
+...
diff --git a/llvm/test/MC/RISCV/nop-slide.s b/llvm/test/MC/RISCV/nop-slide.s
index 4dc888b..9e7401d 100644
--- a/llvm/test/MC/RISCV/nop-slide.s
+++ b/llvm/test/MC/RISCV/nop-slide.s
@@ -1,5 +1,5 @@
-# RUN: llvm-mc -triple riscv64 -mattr +c,-relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s -check-prefix CHECK-RVC-NORELAX
-# RUN: llvm-mc -triple riscv64 -mattr +c,+relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s -check-prefix CHECK-RVC-RELAX
+# RUN: llvm-mc -triple riscv64 -mattr +c,-relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
+# RUN: llvm-mc -triple riscv64 -mattr +c,+relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
# RUN: llvm-mc -triple riscv64 -mattr -c,-relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
# RUN: llvm-mc -triple riscv64 -mattr -c,+relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
@@ -9,19 +9,6 @@
.balign 4
auipc a0, 0
-# CHECK-RVC-NORELAX: 0000000000000000 <.text>:
-# CHECK-RVC-NORELAX-NEXT: 0: 0000 unimp
-# CHECK-RVC-NORELAX-NEXT: 2: 0001 nop
-# CHECK-RVC-NORELAX-NEXT: 4: 00000517 auipc a0, 0x0
-
-# CHECK-RVC-RELAX: 0000000000000000 <.text>:
-# CHECK-RVC-RELAX-NEXT: 0: 0001 nop
-# CHECK-RVC-RELAX-NEXT: 2: 0100 addi s0, sp, 0x80
-# CHECK-RVC-RELAX-NEXT: 4: 1700 addi s0, sp, 0x3a0
-# CHECK-RVC-RELAX-NEXT: 6: 0005 c.nop 0x1
-# CHECK-RVC-RELAX-NEXT: 8: 00 <unknown>
-
# CHECK: 0000000000000000 <.text>:
-# CHECK-NEXT: 0: 0000 <unknown>
-# CHECK-NEXT: 2: 0000 <unknown>
+# CHECK-NEXT: 0: 00 00 01 00 .word 0x00010000
# CHECK-NEXT: 4: 00000517 auipc a0, 0x0
diff --git a/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s b/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s
index b45f3f2..d97b538 100644
--- a/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s
+++ b/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s
@@ -4,37 +4,37 @@
# RUN: | llvm-objdump -d --mattr=+v - | FileCheck %s
# CHECK: vsetvli a1, a0, e64, m1, tu, mu
-.word 0x018575d7
+.insn 4, 0x018575d7
# CHECK: vsetvli a1, a0, 0x1c
-.word 0x01c575d7
+.insn 4, 0x01c575d7
# CHECK: vsetvli a1, a0, 0x24
-.word 0x024575d7
+.insn 4, 0x024575d7
# CHECK: vsetvli a1, a0, 0x29
-.word 0x029575d7
+.insn 4, 0x029575d7
# CHECK: vsetvli a1, a0, 0x110
-.word 0x110575d7
+.insn 4, 0x110575d7
# CHECK: vsetvli a1, a0, e64, mf8, tu, mu
-.word 0x01d575d7
+.insn 4, 0x01d575d7
# CHECK: vsetivli a1, 0x10, e8, m4, tu, mu
-.word 0xc02875d7
+.insn 4, 0xc02875d7
# CHECK: vsetivli a1, 0x10, 0xc
-.word 0xc0c875d7
+.insn 4, 0xc0c875d7
# CHECK: vsetivli a1, 0x10, 0x14
-.word 0xc14875d7
+.insn 4, 0xc14875d7
# CHECK: vsetivli a1, 0x10, 0x38
-.word 0xc38875d7
+.insn 4, 0xc38875d7
# CHECK: vsetivli a1, 0x10, 0x103
-.word 0xd03875d7
+.insn 4, 0xd03875d7
# CHECK: vsetivli a1, 0x10, e8, mf4, tu, mu
-.word 0xc06875d7
+.insn 4, 0xc06875d7
diff --git a/llvm/test/TableGen/intrinsic-attrs.td b/llvm/test/TableGen/intrinsic-attrs.td
index 92a90dc..bcded0cd2 100644
--- a/llvm/test/TableGen/intrinsic-attrs.td
+++ b/llvm/test/TableGen/intrinsic-attrs.td
@@ -27,14 +27,16 @@ def int_deref_ptr_ret : Intrinsic<[llvm_ptr_ty], [], [Dereferenceable<RetIndex,
// CHECK: static constexpr uint16_t IntrinsicsToAttributesMap[] = {
// CHECK: 0 << 8 | 0, // llvm.deref.ptr.ret
// CHECK: 1 << 8 | 1, // llvm.random.gen
+// CHECK: }; // IntrinsicsToAttributesMap
+
+// CHECK: static constexpr ArgNoAttrIDPair ArgAttrIdTable[] = {
+// CHECK-NEXT: {0, 0},
+// CHECK: }; // ArgAttrIdTable
+
+// CHECK: static constexpr ArgAttributesInfo ArgAttributesInfoTable[] = {
+// CHECK-NEXT: {0, 1},
+// CHECK-NEXT: {0, 0},
+// CHECK-NEXT: }; // ArgAttributesInfoTable
// CHECK: getAttributes(LLVMContext &C, ID id,
// CHECK-NEXT: FunctionType *FT) {
-// CHECK: case 1:
-// CHECK-NEXT: HasFnAttr = true;
-// CHECK-NEXT: break;
-// CHECK-NEXT: case 0:
-// CHECK-NEXT: AS[0] = {0, getIntrinsicArgAttributeSet(C, 0, FT->getContainedType(0))};
-// CHECK-NEXT: HasFnAttr = true;
-// CHECK-NEXT: NumAttrs = 1
-// CHECK-NEXT: break;
diff --git a/llvm/test/Transforms/AddDiscriminators/call.ll b/llvm/test/Transforms/AddDiscriminators/call.ll
index d093c65..93d3aa4 100644
--- a/llvm/test/Transforms/AddDiscriminators/call.ll
+++ b/llvm/test/Transforms/AddDiscriminators/call.ll
@@ -12,8 +12,8 @@ define void @_Z3foov() #0 !dbg !4 {
call void @_Z3barv(), !dbg !10
; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%a = alloca [100 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 100, ptr %a), !dbg !11
- call void @llvm.lifetime.end.p0(i64 100, ptr %a), !dbg !11
+ call void @llvm.lifetime.start.p0(ptr %a), !dbg !11
+ call void @llvm.lifetime.end.p0(ptr %a), !dbg !11
call void @_Z3barv(), !dbg !11
; CHECK: call void @_Z3barv(), !dbg ![[CALL1:[0-9]+]]
call void @_Z3barv(), !dbg !12
@@ -22,8 +22,8 @@ define void @_Z3foov() #0 !dbg !4 {
}
declare void @_Z3barv() #1
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index cc51a00d..9bf8a51 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -31,11 +31,11 @@ define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
; PWR7-LABEL: @test_cmpxchg_seq_cst(
; PWR7-NEXT: entry:
; PWR7-NEXT: [[TMP0:%.*]] = alloca i128, align 16
-; PWR7-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP0]])
+; PWR7-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP0]])
; PWR7-NEXT: store i128 [[DESIRE:%.*]], ptr [[TMP0]], align 16
; PWR7-NEXT: [[TMP1:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr [[ADDR:%.*]], ptr [[TMP0]], i128 [[NEW:%.*]], i32 5, i32 5)
; PWR7-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 16
-; PWR7-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP0]])
+; PWR7-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP0]])
; PWR7-NEXT: [[TMP3:%.*]] = insertvalue { i128, i1 } poison, i128 [[TMP2]], 0
; PWR7-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } [[TMP3]], i1 [[TMP1]], 1
; PWR7-NEXT: [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP4]], 1
diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
index 2cbb179..60fb248 100644
--- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
@@ -9,12 +9,12 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store float [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[PTR]], ptr [[TMP1]], i32 [[TMP3]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { float, i1 } poison, float [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { float, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP7]], 1
@@ -35,12 +35,12 @@ define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store float [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[PTR]], ptr [[TMP1]], i32 [[TMP3]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { float, i1 } poison, float [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { float, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP7]], 1
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index 682c1e6..1d6a32c 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -38,11 +38,11 @@ define i16 @test_exchange_i16(ptr %arg, i16 %val) {
; CHECK-LABEL: @test_cmpxchg_i16(
; CHECK: %1 = alloca i16, align 2
-; CHECK: call void @llvm.lifetime.start.p0(i64 2, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i16 %old, ptr %1, align 2
; CHECK: %2 = call zeroext i1 @__atomic_compare_exchange_2(ptr %arg, ptr %1, i16 %new, i32 5, i32 0)
; CHECK: %3 = load i16, ptr %1, align 2
-; CHECK: call void @llvm.lifetime.end.p0(i64 2, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %4 = insertvalue { i16, i1 } poison, i16 %3, 0
; CHECK: %5 = insertvalue { i16, i1 } %4, i1 %2, 1
; CHECK: %ret = extractvalue { i16, i1 } %5, 0
@@ -68,10 +68,10 @@ define i16 @test_add_i16(ptr %arg, i16 %val) {
; CHECK-LABEL: @test_load_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: call void @__atomic_load(i32 16, ptr %arg, ptr %1, i32 5)
; CHECK: %2 = load i128, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: ret i128 %2
define i128 @test_load_i128(ptr %arg) {
%ret = load atomic i128, ptr %arg seq_cst, align 16
@@ -80,10 +80,10 @@ define i128 @test_load_i128(ptr %arg) {
; CHECK-LABEL: @test_store_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %val, ptr %1, align 8
; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: ret void
define void @test_store_i128(ptr %arg, i128 %val) {
store atomic i128 %val, ptr %arg seq_cst, align 16
@@ -92,14 +92,14 @@ define void @test_store_i128(ptr %arg, i128 %val) {
; CHECK-LABEL: @test_exchange_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %val, ptr %1, align 8
; CHECK: %2 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
; CHECK: call void @__atomic_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %3 = load i128, ptr %2, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
; CHECK: ret i128 %3
define i128 @test_exchange_i128(ptr %arg, i128 %val) {
%ret = atomicrmw xchg ptr %arg, i128 %val seq_cst
@@ -108,15 +108,15 @@ define i128 @test_exchange_i128(ptr %arg, i128 %val) {
; CHECK-LABEL: @test_cmpxchg_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %old, ptr %1, align 8
; CHECK: %2 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
; CHECK: store i128 %new, ptr %2, align 8
; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 0)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
; CHECK: %4 = load i128, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %5 = insertvalue { i128, i1 } poison, i128 %4, 0
; CHECK: %6 = insertvalue { i128, i1 } %5, i1 %3, 1
; CHECK: %ret = extractvalue { i128, i1 } %6, 0
@@ -139,14 +139,14 @@ define i128 @test_cmpxchg_i128(ptr %arg, i128 %old, i128 %new) {
; CHECK:atomicrmw.start:
; CHECK: %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ]
; CHECK: %new = add i128 %loaded, %val
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %loaded, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
; CHECK: store i128 %new, ptr %2, align 8
; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
; CHECK: %5 = load i128, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %6 = insertvalue { i128, i1 } poison, i128 %5, 0
; CHECK: %7 = insertvalue { i128, i1 } %6, i1 %4, 1
; CHECK: %success = extractvalue { i128, i1 } %7, 1
@@ -181,12 +181,12 @@ define void @test_store_double(ptr %arg, double %val) {
; CHECK-LABEL: @test_cmpxchg_ptr(
; CHECK: %1 = alloca ptr, align 4
-; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store ptr %old, ptr %1, align 4
; CHECK: %2 = ptrtoint ptr %new to i32
; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange_4(ptr %arg, ptr %1, i32 %2, i32 5, i32 2)
; CHECK: %4 = load ptr, ptr %1, align 4
-; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %5 = insertvalue { ptr, i1 } poison, ptr %4, 0
; CHECK: %6 = insertvalue { ptr, i1 } %5, i1 %3, 1
; CHECK: %ret = extractvalue { ptr, i1 } %6, 0
@@ -202,10 +202,10 @@ define ptr @test_cmpxchg_ptr(ptr %arg, ptr %old, ptr %new) {
; CHECK-LABEL: @test_store_fp128
; CHECK: %1 = alloca fp128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store fp128 %val, ptr %1, align 8
; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: ret void
define void @test_store_fp128(ptr %arg, fp128 %val) {
store atomic fp128 %val, ptr %arg seq_cst, align 16
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 20a9e9f..fda296b 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -5,10 +5,10 @@
define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
; CHECK-LABEL: @atomic_load256_libcall(
; CHECK-NEXT: [[TMP1:%.*]] = alloca i256, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: call void @__atomic_load(i32 32, ptr [[PTR:%.*]], ptr [[TMP1]], i32 0)
; CHECK-NEXT: [[TMP2:%.*]] = load i256, ptr [[TMP1]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: ret i256 [[TMP2]]
;
%result = load atomic i256, ptr %ptr unordered, align 16
@@ -19,10 +19,10 @@ define i256 @atomic_load256_libcall_as1(ptr addrspace(1) %ptr) nounwind {
; CHECK-LABEL: @atomic_load256_libcall_as1(
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
; CHECK-NEXT: [[TMP2:%.*]] = alloca i256, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP2]])
; CHECK-NEXT: call void @__atomic_load(i32 32, ptr [[TMP1]], ptr [[TMP2]], i32 0)
; CHECK-NEXT: [[TMP3:%.*]] = load i256, ptr [[TMP2]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP2]])
; CHECK-NEXT: ret i256 [[TMP3]]
;
%result = load atomic i256, ptr addrspace(1) %ptr unordered, align 16
diff --git a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
index 59916cc..647f187 100644
--- a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
+++ b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
@@ -361,11 +361,11 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -388,11 +388,11 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -415,11 +415,11 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -442,11 +442,11 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -469,11 +469,11 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -496,11 +496,11 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -523,11 +523,11 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -550,11 +550,11 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -577,11 +577,11 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -604,11 +604,11 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -631,11 +631,11 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -658,11 +658,11 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -685,11 +685,11 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -712,11 +712,11 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -739,11 +739,11 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -766,11 +766,11 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -793,11 +793,11 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -820,11 +820,11 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -847,11 +847,11 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -874,11 +874,11 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -1251,11 +1251,11 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1278,11 +1278,11 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1305,11 +1305,11 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1332,11 +1332,11 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1359,11 +1359,11 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1386,11 +1386,11 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1413,11 +1413,11 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1440,11 +1440,11 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1467,11 +1467,11 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1494,11 +1494,11 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1521,11 +1521,11 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1548,11 +1548,11 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1575,11 +1575,11 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1602,11 +1602,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1629,11 +1629,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1656,11 +1656,11 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1683,11 +1683,11 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1710,11 +1710,11 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1737,11 +1737,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1764,11 +1764,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -2112,11 +2112,11 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2139,11 +2139,11 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2166,11 +2166,11 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2193,11 +2193,11 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2220,11 +2220,11 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2247,11 +2247,11 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2274,11 +2274,11 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2301,11 +2301,11 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2328,11 +2328,11 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2355,11 +2355,11 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2382,11 +2382,11 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2409,11 +2409,11 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2436,11 +2436,11 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2463,11 +2463,11 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2490,11 +2490,11 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2517,11 +2517,11 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2544,11 +2544,11 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2571,11 +2571,11 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2598,11 +2598,11 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2625,11 +2625,11 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack.ll b/llvm/test/Transforms/Attributor/heap_to_stack.ll
index 6719290..d54f713 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack.ll
@@ -27,7 +27,7 @@ declare i32 @no_return_call() noreturn
declare void @free(ptr nocapture) allockind("free")
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
;.
; CHECK: @G = internal global ptr undef, align 4
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
index 0be9434..9a6e0680 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
@@ -32,7 +32,7 @@ declare i32 @no_return_call() noreturn
declare void @free(ptr nocapture)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
;.
; CHECK: @G = internal global ptr undef, align 4
diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll
index 874eff6..c112d99 100644
--- a/llvm/test/Transforms/Attributor/liveness.ll
+++ b/llvm/test/Transforms/Attributor/liveness.ll
@@ -2589,7 +2589,7 @@ define void @bad_gep() {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1
; TUNIT-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]]
; TUNIT-NEXT: br label [[EXIT:%.*]]
; TUNIT: while.body:
; TUNIT-NEXT: unreachable
@@ -2598,7 +2598,7 @@ define void @bad_gep() {
; TUNIT: if.end:
; TUNIT-NEXT: unreachable
; TUNIT: exit:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
@@ -2607,7 +2607,7 @@ define void @bad_gep() {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1
; CGSCC-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]]
; CGSCC-NEXT: br label [[EXIT:%.*]]
; CGSCC: while.body:
; CGSCC-NEXT: unreachable
@@ -2616,13 +2616,13 @@ define void @bad_gep() {
; CGSCC: if.end:
; CGSCC-NEXT: unreachable
; CGSCC: exit:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21]]
; CGSCC-NEXT: ret void
;
entry:
%n = alloca i8
%m = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %n)
+ call void @llvm.lifetime.start.p0(ptr %n)
br label %exit
while.body:
@@ -2640,7 +2640,7 @@ if.end:
br i1 %cmp, label %exit, label %while.body
exit:
- call void @llvm.lifetime.end.p0(i64 1, ptr %n)
+ call void @llvm.lifetime.end.p0(ptr %n)
ret void
}
@@ -2679,8 +2679,8 @@ b2:
declare i1 @bad_gep_helper1(ptr, ptr, ptr)
declare void @bad_gep_helper2(i8)
-declare void @llvm.lifetime.start.p0(i64 %0, ptr %1)
-declare void @llvm.lifetime.end.p0(i64 %0, ptr %1)
+declare void @llvm.lifetime.start.p0(ptr %1)
+declare void @llvm.lifetime.end.p0(ptr %1)
;.
; TUNIT: attributes #[[ATTR0]] = { nofree noreturn nosync nounwind }
; TUNIT: attributes #[[ATTR1:[0-9]+]] = { memory(none) }
diff --git a/llvm/test/Transforms/Attributor/noalias.ll b/llvm/test/Transforms/Attributor/noalias.ll
index 46d9f77..b7c295a 100644
--- a/llvm/test/Transforms/Attributor/noalias.ll
+++ b/llvm/test/Transforms/Attributor/noalias.ll
@@ -577,31 +577,31 @@ define internal fastcc double @strtox(ptr %s, ptr %p, i32 %prec) unnamed_addr {
; TUNIT-SAME: (ptr [[S:%.*]]) unnamed_addr {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[F:%.*]] = alloca [[STRUCT__IO_FILE:%.*]], align 8
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR13:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR13:[0-9]+]]
; TUNIT-NEXT: [[CALL:%.*]] = call i32 @sh_fromstring(ptr noundef nonnull align 8 dereferenceable(240) [[F]], ptr [[S]])
; TUNIT-NEXT: call void @__shlim(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i64 noundef 0)
; TUNIT-NEXT: [[CALL1:%.*]] = call double @__floatscan(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i32 noundef 1, i32 noundef 1)
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
; TUNIT-NEXT: ret double [[CALL1]]
;
; CGSCC-LABEL: define {{[^@]+}}@strtox
; CGSCC-SAME: (ptr [[S:%.*]]) unnamed_addr {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[F:%.*]] = alloca [[STRUCT__IO_FILE:%.*]], align 8
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR14:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR14:[0-9]+]]
; CGSCC-NEXT: [[CALL:%.*]] = call i32 @sh_fromstring(ptr noundef nonnull align 8 dereferenceable(240) [[F]], ptr [[S]])
; CGSCC-NEXT: call void @__shlim(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i64 noundef 0)
; CGSCC-NEXT: [[CALL1:%.*]] = call double @__floatscan(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i32 noundef 1, i32 noundef 1)
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
; CGSCC-NEXT: ret double [[CALL1]]
;
entry:
%f = alloca %struct._IO_FILE, align 8
- call void @llvm.lifetime.start.p0(i64 144, ptr nonnull %f)
+ call void @llvm.lifetime.start.p0(ptr nonnull %f)
%call = call i32 @sh_fromstring(ptr nonnull %f, ptr %s)
call void @__shlim(ptr nonnull %f, i64 0)
%call1 = call double @__floatscan(ptr nonnull %f, i32 %prec, i32 1)
- call void @llvm.lifetime.end.p0(i64 144, ptr nonnull %f)
+ call void @llvm.lifetime.end.p0(ptr nonnull %f)
ret double %call1
}
@@ -620,7 +620,7 @@ entry:
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: optsize
declare dso_local i32 @sh_fromstring(...) local_unnamed_addr
@@ -632,7 +632,7 @@ declare dso_local void @__shlim(ptr, i64) local_unnamed_addr
declare dso_local double @__floatscan(ptr, i32, i32) local_unnamed_addr
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Test 15
; propagate noalias to some callsite arguments that there is no possibly reachable capture before it
diff --git a/llvm/test/Transforms/Attributor/openmp_parallel.ll b/llvm/test/Transforms/Attributor/openmp_parallel.ll
index d7b194d..54da16c 100644
--- a/llvm/test/Transforms/Attributor/openmp_parallel.ll
+++ b/llvm/test/Transforms/Attributor/openmp_parallel.ll
@@ -68,13 +68,13 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; TUNIT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
; TUNIT-NEXT: br label [[OMP_PRECOND_THEN:%.*]]
; TUNIT: omp.precond.then:
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
; TUNIT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
; TUNIT-NEXT: store i32 197, ptr [[DOTOMP_UB]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
; TUNIT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
; TUNIT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
; TUNIT-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
; TUNIT-NEXT: call void @__kmpc_for_static_init_4(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP0]], i32 noundef 34, ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_IS_LAST]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_LB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_UB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_STRIDE]], i32 noundef 1, i32 noundef 1)
@@ -103,10 +103,10 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; TUNIT-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY]], label [[OMP_LOOP_EXIT]]
; TUNIT: omp.loop.exit:
; TUNIT-NEXT: call void @__kmpc_for_static_fini(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP0]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
; TUNIT-NEXT: br label [[OMP_PRECOND_END:%.*]]
; TUNIT: omp.precond.end:
; TUNIT-NEXT: ret void
@@ -124,13 +124,13 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; CGSCC-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 1
; CGSCC-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
; CGSCC: omp.precond.then:
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
; CGSCC-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
; CGSCC-NEXT: store i32 [[SUB2]], ptr [[DOTOMP_UB]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
; CGSCC-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
; CGSCC-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
; CGSCC-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
; CGSCC-NEXT: call void @__kmpc_for_static_init_4(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP1]], i32 noundef 34, ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_IS_LAST]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_LB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_UB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_STRIDE]], i32 noundef 1, i32 noundef 1)
@@ -159,10 +159,10 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; CGSCC-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY]], label [[OMP_LOOP_EXIT]]
; CGSCC: omp.loop.exit:
; CGSCC-NEXT: call void @__kmpc_for_static_fini(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP1]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
; CGSCC-NEXT: br label [[OMP_PRECOND_END]]
; CGSCC: omp.precond.end:
; CGSCC-NEXT: ret void
@@ -178,13 +178,13 @@ entry:
br i1 %cmp, label %omp.precond.then, label %omp.precond.end
omp.precond.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.lb) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.lb) #3
store i32 0, ptr %.omp.lb, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.ub) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.ub) #3
store i32 %sub2, ptr %.omp.ub, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.stride) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.stride) #3
store i32 1, ptr %.omp.stride, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.is_last) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.is_last) #3
store i32 0, ptr %.omp.is_last, align 4
%1 = load i32, ptr %.global_tid., align 4
call void @__kmpc_for_static_init_4(ptr nonnull @1, i32 %1, i32 34, ptr nonnull %.omp.is_last, ptr nonnull %.omp.lb, ptr nonnull %.omp.ub, ptr nonnull %.omp.stride, i32 1, i32 1) #3
@@ -216,10 +216,10 @@ omp.inner.for.body: ; preds = %omp.inner.for.body,
omp.loop.exit: ; preds = %omp.inner.for.body, %omp.precond.then
call void @__kmpc_for_static_fini(ptr nonnull @1, i32 %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.is_last) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.stride) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.ub) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.lb) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.is_last) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.stride) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.ub) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.lb) #3
br label %omp.precond.end
omp.precond.end: ; preds = %omp.loop.exit, %entry
@@ -227,10 +227,10 @@ omp.precond.end: ; preds = %omp.loop.exit, %ent
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare dso_local void @__kmpc_for_static_init_4(ptr, i32, i32, ptr, ptr, ptr, ptr, i32, i32) local_unnamed_addr
diff --git a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
index 472ed30..eb7d78f 100644
--- a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
+++ b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
@@ -201,7 +201,7 @@ declare dso_local void @_ZNSt6vectorIN12_GLOBAL__N_18TestCaseESaIS1_EED2Ev(ptr)
declare dso_local void @_Z11BM_functionRN9benchmark5StateE(ptr dereferenceable(144)) #0
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: alwaysinline uwtable
declare dso_local { i64, ptr } @_ZN9benchmark5State5beginEv(ptr) #6 align 2
@@ -216,7 +216,7 @@ declare dso_local zeroext i1 @_ZNK9benchmark5State13StateIteratorneERKS1_(ptr, p
declare dso_local void @_ZNK9benchmark5State13StateIteratordeEv(ptr) #7 align 2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
; Function Attrs: alwaysinline nounwind uwtable
declare dso_local dereferenceable(16) ptr @_ZN9benchmark5State13StateIteratorppEv(ptr) #7 align 2
diff --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
index fa942c9..82bed0f 100644
--- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
@@ -116,7 +116,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; TUNIT-SAME: (ptr noalias nofree writeonly sret([[STRUCT_S:%.*]]) align 4 captures(none) dereferenceable_or_null(24) [[AGG_RESULT:%.*]]) #[[ATTR1:[0-9]+]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[S:%.*]] = alloca [[STRUCT_S]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17:[0-9]+]]
; TUNIT-NEXT: [[F1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 3
; TUNIT-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 4
; TUNIT-NEXT: [[F3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 5
@@ -136,7 +136,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; TUNIT-NEXT: store i32 4, ptr [[I212]], align 4, !tbaa [[TBAA13:![0-9]+]]
; TUNIT-NEXT: [[I316:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[AGG_RESULT]], i64 0, i32 2
; TUNIT-NEXT: store i32 4, ptr [[I316]], align 4, !tbaa [[TBAA14:![0-9]+]]
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(argmem: readwrite)
@@ -144,7 +144,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly sret([[STRUCT_S:%.*]]) align 4 captures(none) dereferenceable(24) [[AGG_RESULT:%.*]]) #[[ATTR1:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[S:%.*]] = alloca [[STRUCT_S]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20:[0-9]+]]
; CGSCC-NEXT: [[F1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 3
; CGSCC-NEXT: store float 0x3FF19999A0000000, ptr [[F1]], align 4, !tbaa [[TBAA7:![0-9]+]]
; CGSCC-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 4
@@ -185,12 +185,12 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; CGSCC-NEXT: [[ADD15:%.*]] = add nsw i32 [[I10]], [[I11]]
; CGSCC-NEXT: [[I316:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[AGG_RESULT]], i64 0, i32 2
; CGSCC-NEXT: store i32 [[ADD15]], ptr [[I316]], align 4, !tbaa [[TBAA14]]
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20]]
; CGSCC-NEXT: ret void
;
entry:
%s = alloca %struct.S, align 4
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
%f1 = getelementptr inbounds %struct.S, ptr %s, i64 0, i32 3
store float 0x3FF19999A0000000, ptr %f1, align 4, !tbaa !7
%f2 = getelementptr inbounds %struct.S, ptr %s, i64 0, i32 4
@@ -231,13 +231,13 @@ entry:
%add15 = add nsw i32 %i10, %i11
%i316 = getelementptr inbounds %struct.S, ptr %agg.result, i64 0, i32 2
store i32 %add15, ptr %i316, align 4, !tbaa !14
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; void local_alloca_simplifiable_2(void) {
; char Bytes[1024];
@@ -260,7 +260,7 @@ define void @local_alloca_simplifiable_2() {
; TUNIT-SAME: () #[[ATTR3:[0-9]+]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
; TUNIT-NEXT: br label [[FOR_COND:%.*]]
; TUNIT: for.cond:
; TUNIT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
@@ -326,7 +326,7 @@ define void @local_alloca_simplifiable_2() {
; TUNIT-NEXT: [[INDVARS_IV_NEXT13]] = add nuw nsw i64 [[INDVARS_IV12]], 1
; TUNIT-NEXT: br label [[FOR_COND28]], !llvm.loop [[LOOP20:![0-9]+]]
; TUNIT: for.end38:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn
@@ -334,7 +334,7 @@ define void @local_alloca_simplifiable_2() {
; CGSCC-SAME: () #[[ATTR3:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
; CGSCC-NEXT: br label [[FOR_COND:%.*]]
; CGSCC: for.cond:
; CGSCC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
@@ -406,12 +406,12 @@ define void @local_alloca_simplifiable_2() {
; CGSCC-NEXT: [[INDVARS_IV_NEXT13]] = add nuw nsw i64 [[INDVARS_IV12]], 1
; CGSCC-NEXT: br label [[FOR_COND28]], !llvm.loop [[LOOP23:![0-9]+]]
; CGSCC: for.end38:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
; CGSCC-NEXT: ret void
;
entry:
%Bytes = alloca [1024 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %Bytes)
+ call void @llvm.lifetime.start.p0(ptr nonnull %Bytes)
br label %for.cond
for.cond: ; preds = %for.inc, %entry
@@ -503,7 +503,7 @@ for.inc36: ; preds = %for.body31
br label %for.cond28, !llvm.loop !23
for.end38: ; preds = %for.cond.cleanup30
- call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %Bytes)
+ call void @llvm.lifetime.end.p0(ptr nonnull %Bytes)
ret void
}
@@ -558,7 +558,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; TUNIT-SAME: (i32 [[CND:%.*]]) #[[ATTR3]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[L:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; TUNIT-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; TUNIT: cond.true:
@@ -566,7 +566,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; TUNIT: cond.false:
; TUNIT-NEXT: br label [[COND_END]]
; TUNIT: cond.end:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: ret i32 5
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
@@ -574,7 +574,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; CGSCC-SAME: (i32 [[CND:%.*]]) #[[ATTR5:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[L:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; CGSCC-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; CGSCC: cond.true:
@@ -582,12 +582,12 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; CGSCC: cond.false:
; CGSCC-NEXT: br label [[COND_END]]
; CGSCC: cond.end:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: ret i32 5
;
entry:
%L = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.start.p0(ptr nonnull %L)
store i32 5, ptr @GI1, align 4, !tbaa !3
store i32 5, ptr %L, align 4, !tbaa !3
%tobool.not = icmp eq i32 %cnd, 0
@@ -602,7 +602,7 @@ cond.false: ; preds = %entry
cond.end: ; preds = %cond.false, %cond.true
%cond = phi ptr [ @GI1, %cond.true ], [ %L, %cond.false ]
%i1 = load i32, ptr %cond, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.end.p0(ptr nonnull %L)
ret i32 %i1
}
@@ -620,7 +620,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; TUNIT-SAME: (i32 [[CND:%.*]]) #[[ATTR3]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[L:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; TUNIT-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; TUNIT: cond.true:
@@ -628,7 +628,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; TUNIT: cond.false:
; TUNIT-NEXT: br label [[COND_END]]
; TUNIT: cond.end:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: ret i32 5
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
@@ -636,7 +636,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; CGSCC-SAME: (i32 [[CND:%.*]]) #[[ATTR5]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[L:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; CGSCC-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; CGSCC: cond.true:
@@ -644,12 +644,12 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; CGSCC: cond.false:
; CGSCC-NEXT: br label [[COND_END]]
; CGSCC: cond.end:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: ret i32 5
;
entry:
%L = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.start.p0(ptr nonnull %L)
%tobool.not = icmp eq i32 %cnd, 0
br i1 %tobool.not, label %cond.false, label %cond.true
@@ -663,7 +663,7 @@ cond.end: ; preds = %cond.false, %cond.t
%cond = phi ptr [ @GI2, %cond.true ], [ %L, %cond.false ]
store i32 5, ptr %cond, align 4, !tbaa !3
%l = load i32, ptr %cond, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.end.p0(ptr nonnull %L)
ret i32 %l
}
@@ -1528,8 +1528,8 @@ define i32 @local_alloca_not_simplifiable_1() {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[X:%.*]] = alloca i32, align 4
; TUNIT-NEXT: [[Y:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR17]]
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR17]]
; TUNIT-NEXT: store i32 1, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: store i32 1, ptr [[X]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: call void @escape(ptr noundef nonnull align 4 dereferenceable(4) [[X]])
@@ -1540,16 +1540,16 @@ define i32 @local_alloca_not_simplifiable_1() {
; TUNIT-NEXT: [[I4:%.*]] = load i32, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: [[ADD:%.*]] = add nsw i32 [[I3]], [[I4]]
; TUNIT-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[COND]]
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
; TUNIT-NEXT: ret i32 [[ADD1]]
;
; CGSCC-LABEL: define {{[^@]+}}@local_alloca_not_simplifiable_1() {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[X:%.*]] = alloca i32, align 4
; CGSCC-NEXT: [[Y:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR20]]
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR20]]
; CGSCC-NEXT: store i32 1, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: store i32 1, ptr [[X]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: call void @escape(ptr noundef nonnull align 4 dereferenceable(4) [[X]])
@@ -1560,15 +1560,15 @@ define i32 @local_alloca_not_simplifiable_1() {
; CGSCC-NEXT: [[I4:%.*]] = load i32, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: [[ADD:%.*]] = add nsw i32 [[I3]], [[I4]]
; CGSCC-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[COND]]
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
; CGSCC-NEXT: ret i32 [[ADD1]]
;
entry:
%X = alloca i32, align 4
%Y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %X)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %Y)
+ call void @llvm.lifetime.start.p0(ptr nonnull %X)
+ call void @llvm.lifetime.start.p0(ptr nonnull %Y)
store i32 1, ptr %Y, align 4, !tbaa !3
store i32 1, ptr %X, align 4, !tbaa !3
call void @escape(ptr nonnull %X)
@@ -1579,8 +1579,8 @@ entry:
%i4 = load i32, ptr %Y, align 4, !tbaa !3
%add = add nsw i32 %i3, %i4
%add1 = add nsw i32 %add, %cond
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %Y)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %X)
+ call void @llvm.lifetime.end.p0(ptr nonnull %Y)
+ call void @llvm.lifetime.end.p0(ptr nonnull %X)
ret i32 %add1
}
@@ -2755,7 +2755,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; TUNIT-SAME: (ptr nofree readonly captures(none) [[IN:%.*]], ptr nofree writeonly captures(none) [[OUT:%.*]], i32 [[IDX:%.*]]) #[[ATTR1]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[BUF:%.*]] = alloca [128 x i32], align 16
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
; TUNIT-NEXT: br label [[FOR_COND:%.*]]
; TUNIT: for.cond:
; TUNIT-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
@@ -2776,7 +2776,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; TUNIT-NEXT: [[CMP5:%.*]] = icmp slt i32 [[I3_0]], 128
; TUNIT-NEXT: br i1 [[CMP5]], label [[FOR_BODY7]], label [[FOR_COND_CLEANUP6:%.*]]
; TUNIT: for.cond.cleanup6:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
; TUNIT-NEXT: ret void
; TUNIT: for.body7:
; TUNIT-NEXT: [[IDXPROM8:%.*]] = sext i32 [[I3_0]] to i64
@@ -2797,7 +2797,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; CGSCC-SAME: (ptr nofree readonly captures(none) [[IN:%.*]], ptr nofree writeonly captures(none) [[OUT:%.*]], i32 [[IDX:%.*]]) #[[ATTR13:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[BUF:%.*]] = alloca [128 x i32], align 16
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
; CGSCC-NEXT: br label [[FOR_COND:%.*]]
; CGSCC: for.cond:
; CGSCC-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
@@ -2818,7 +2818,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; CGSCC-NEXT: [[CMP5:%.*]] = icmp slt i32 [[I3_0]], 128
; CGSCC-NEXT: br i1 [[CMP5]], label [[FOR_BODY7]], label [[FOR_COND_CLEANUP6:%.*]]
; CGSCC: for.cond.cleanup6:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
; CGSCC-NEXT: ret void
; CGSCC: for.body7:
; CGSCC-NEXT: [[IDXPROM8:%.*]] = sext i32 [[I3_0]] to i64
@@ -2836,7 +2836,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
;
entry:
%buf = alloca [128 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 512, ptr %buf) #2
+ call void @llvm.lifetime.start.p0(ptr %buf) #2
br label %for.cond
for.cond: ; preds = %for.body, %entry
@@ -2862,7 +2862,7 @@ for.cond4: ; preds = %for.body7, %for.con
br i1 %cmp5, label %for.body7, label %for.cond.cleanup6
for.cond.cleanup6: ; preds = %for.cond4
- call void @llvm.lifetime.end.p0(i64 512, ptr %buf) #2
+ call void @llvm.lifetime.end.p0(ptr %buf) #2
ret void
for.body7: ; preds = %for.cond4
diff --git a/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll b/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll
index b932a7d..09abf1f 100644
--- a/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll
+++ b/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll
@@ -30,16 +30,16 @@ entry:
br label %loop1
loop1:
- call void @llvm.lifetime.start.p0(i64 4, ptr %v1)
+ call void @llvm.lifetime.start.p0(ptr %v1)
%r1 = call i32 @foo(ptr %v1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %v1)
+ call void @llvm.lifetime.end.p0(ptr %v1)
%cmp1 = icmp ne i32 %r1, 0
br i1 %cmp1, label %loop1, label %loop2
loop2:
- call void @llvm.lifetime.start.p0(i64 4, ptr %v2)
+ call void @llvm.lifetime.start.p0(ptr %v2)
%r2 = call i32 @foo(ptr %v2)
- call void @llvm.lifetime.end.p0(i64 4, ptr %v2)
+ call void @llvm.lifetime.end.p0(ptr %v2)
%cmp2 = icmp ne i32 %r2, 0
br i1 %cmp2, label %loop2, label %exit
@@ -49,6 +49,6 @@ exit:
declare i32 @foo(ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll
index 9cdc37a..a24bb74 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll
@@ -20,11 +20,11 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr %tmp, align 4, !tbaa !2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
call void @bar(ptr nonnull %tmp) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -32,14 +32,14 @@ bb6: ; preds = %bb5, %bb
ret i32 %tmp7
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll
index f4a37e7..22c0baf 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll
@@ -18,10 +18,10 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
call void @bar(ptr nonnull %tmp) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -30,14 +30,14 @@ bb6: ; preds = %bb5, %bb
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
index bd51910..8b9c5dd 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
@@ -7,7 +7,7 @@
@g = external local_unnamed_addr global i32, align 4
; CHECK-LABEL: define{{.*}}@caller(
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %tmp.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %tmp.i)
; CHECK-NEXT: call void @callee_unknown_use1.{{.*}}(ptr %tmp.i
define i32 @callee_unknown_use1(i32 %arg) local_unnamed_addr #0 {
@@ -21,11 +21,11 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
%tmp11 = bitcast ptr %tmp to ptr
call void @bar(ptr nonnull %tmp11) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -36,14 +36,14 @@ bb6: ; preds = %bb5, %bb
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
index 54782c5..10be1c8 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
@@ -18,11 +18,11 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr %tmp, align 4, !tbaa !2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
call void @bar(ptr nonnull %tmp) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -32,14 +32,14 @@ bb6: ; preds = %bb5, %bb
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll
index bdf9e23..5e0ce20 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll
@@ -26,10 +26,10 @@ bb5: ; preds = %bb4, %bb1, %bb
; CHECK-LABEL: bb:
; CHECK-NEXT: [[CALL26LOC:%.*]] = alloca ptr
; CHECK-LABEL: codeRepl.i:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[CALL26LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CALL26LOC]])
; CHECK-NEXT: call void @bar.1.bb1(ptr [[CALL26LOC]])
; CHECK-NEXT: %call26.reload.i = load ptr, ptr [[CALL26LOC]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[CALL26LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CALL26LOC]])
define ptr @dummy_caller(i32 %arg) {
bb:
%tmp = tail call ptr @bar(i32 %arg)
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink.ll b/llvm/test/Transforms/CodeExtractor/live_shrink.ll
index f5debc503..43cc248 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink.ll
@@ -8,7 +8,7 @@
define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
%tmp2 = load i32, ptr @cond, align 4, !tbaa !2
%tmp3 = icmp eq i32 %tmp2, 0
br i1 %tmp3, label %bb4, label %bb5
@@ -18,17 +18,17 @@ bb4: ; preds = %bb
br label %bb5
bb5: ; preds = %bb4, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
@@ -49,8 +49,8 @@ bb:
; CHECK-LABEL: define internal void @_Z3foov.1.
; CHECK: newFuncRoot:
; CHECK-NEXT: %tmp = alloca %class.A
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
-; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
; CHECK-NEXT: br label %bb5.exitStub
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll
index e9d5fb6..ef815ad 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll
@@ -9,7 +9,7 @@
define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
%tmp2 = load i32, ptr @cond, align 4, !tbaa !2
%tmp3 = icmp eq i32 %tmp2, 0
br i1 %tmp3, label %bb4, label %bb5
@@ -19,17 +19,17 @@ bb4: ; preds = %bb
br label %bb5
bb5: ; preds = %bb4, %bb
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll
index 6f63bca..7074854 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll
@@ -9,7 +9,7 @@
define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
%tmp2 = load i32, ptr @cond, align 4, !tbaa !2
%tmp3 = icmp eq i32 %tmp2, 0
br i1 %tmp3, label %bb4, label %bb9
@@ -29,17 +29,17 @@ bb8: ; preds = %bb4
br label %bb9
bb9: ; preds = %bb8, %bb4, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
@@ -50,7 +50,7 @@ bb:
; CHECK-LABEL: define internal void @_Z3foov.1.
; CHECK: bb9:
-; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
; CHECK: br label %.exitStub
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll
index 2512ac9..1d0af23 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll
@@ -8,8 +8,8 @@ define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
%tmp1 = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp1)
%tmp4 = load i32, ptr @cond, align 4, !tbaa !2
%tmp5 = icmp eq i32 %tmp4, 0
br i1 %tmp5, label %bb6, label %bb7
@@ -19,18 +19,18 @@ bb6: ; preds = %bb
br label %bb7
bb7: ; preds = %bb6, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll
index 7942418..c5bd626 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll
@@ -14,8 +14,8 @@ define void @_Z3foo_unknown_mem_accessv() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
%tmp1 = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp1)
%tmp4 = load ptr, ptr @condptr, align 8, !tbaa !2
%tmp5 = load i32, ptr %tmp4, align 4, !tbaa !6
%tmp6 = icmp eq i32 %tmp5, 0
@@ -26,20 +26,20 @@ bb7: ; preds = %bb
br label %bb8
bb8: ; preds = %bb7, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
declare void @_Z3barv() local_unnamed_addr
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @_Z3foo_unknown_calli(i32 %arg) local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
tail call void @_Z3barv()
%tmp2 = icmp eq i32 %arg, 0
br i1 %tmp2, label %bb3, label %bb4
@@ -49,7 +49,7 @@ bb3: ; preds = %bb
br label %bb4
bb4: ; preds = %bb3, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
diff --git a/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll b/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll
index 3f113e6..d2b79ab 100644
--- a/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll
@@ -4,8 +4,8 @@ target triple = "armv8m.main-none-eabi"
declare ptr @f0()
declare ptr @f1()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
define ptr @tail_dup() {
; CHECK-LABEL: tail_dup
@@ -15,7 +15,7 @@ define ptr @tail_dup() {
; CHECK-NEXT: ret ptr
bb0:
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr %a) nounwind
%tmp0 = tail call ptr @f0()
br label %return
bb1:
@@ -23,7 +23,7 @@ bb1:
br label %return
return:
%retval = phi ptr [ %tmp0, %bb0 ], [ %tmp1, %bb1 ]
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr %a) nounwind
ret ptr %retval
}
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll b/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll
index dd47d5e..f72756d 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll
@@ -14,7 +14,7 @@
define ptr @foo(i64 %size, i64 %v1, i64 %v2) {
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr %a) nounwind
%cmp1 = icmp ult i64 %size, 1025
br i1 %cmp1, label %if.end, label %case1
@@ -42,12 +42,12 @@ exit1:
exit2:
%retval2 = phi ptr [ %ret1, %case1 ], [ %retval1, %exit1 ]
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr %a) nounwind
ret ptr %retval2
}
declare void @llvm.assume(i1)
declare ptr @qux()
declare ptr @bar()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
index 6bf268b..6997c9f 100644
--- a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
+++ b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
@@ -65,25 +65,25 @@ define void @test_free_intrinsics(i64 %x) {
; CHECK-LABEL: @test_free_intrinsics(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000032, ptr [[PTR]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000064, ptr [[PTR]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100000000128, ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr [[PTR]])
; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[I]], i64 100000000256, ptr [[PTR]])
; CHECK-NEXT: ret void
;
entry:
%ptr = alloca i8
- call void @llvm.lifetime.start.p0(i64 100000000032, ptr %ptr)
- call void @llvm.lifetime.start.p0(i64 100000000064, ptr %ptr)
- call void @llvm.lifetime.end.p0(i64 100000000128, ptr %ptr)
+ call void @llvm.lifetime.start.p0(ptr %ptr)
+ call void @llvm.lifetime.start.p0(ptr %ptr)
+ call void @llvm.lifetime.end.p0(ptr %ptr)
%i = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr %ptr)
call void @llvm.invariant.end.p0(ptr %i, i64 100000000256, ptr %ptr)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare ptr @llvm.invariant.start.p0(i64, ptr nocapture)
declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll
index 89149ce..bf75196 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll
@@ -17,11 +17,11 @@ entry:
tricky:
%2 = call ptr @await_suspend()
store ptr %2, ptr %0, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %1)
+ call void @llvm.lifetime.start.p0(ptr %1)
store ptr %0, ptr %1, align 8
%3 = load ptr, ptr %1, align 8
%4 = load ptr, ptr %3, align 8
- call void @llvm.lifetime.end.p0(i64 8, ptr %1)
+ call void @llvm.lifetime.end.p0(ptr %1)
br label %finish
finish:
@@ -49,9 +49,9 @@ suspend:
; CHECK: [[TMP2:%.*]] = call ptr @await_suspend()
; CHECK-NEXT: store ptr [[TMP2]], ptr [[TMP0]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP1]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
;
declare ptr @llvm.coro.free(token, ptr)
@@ -65,8 +65,8 @@ declare i1 @llvm.coro.alloc(token)
declare ptr @llvm.coro.begin(token, ptr)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend()
declare void @print(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll
index 3b0acdd..8bfb8cf 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll
@@ -13,11 +13,11 @@ entry:
br i1 %n, label %flag_true, label %flag_false
flag_true:
- call void @llvm.lifetime.start.p0(i64 8, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
br label %merge
flag_false:
- call void @llvm.lifetime.start.p0(i64 8, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
br label %merge
merge:
@@ -51,7 +51,7 @@ declare i1 @llvm.coro.alloc(token)
declare ptr @llvm.coro.begin(token, ptr)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @print(ptr)
declare noalias ptr @malloc(i32)
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll
index 5a14a0e..80be62a 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll
@@ -18,9 +18,9 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -53,9 +53,9 @@ entry:
await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
br label %exit
exit:
@@ -76,5 +76,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll b/llvm/test/Transforms/Coroutines/coro-alloca-09.ll
deleted file mode 100644
index 5c60c5b..0000000
--- a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: opt < %s -passes='cgscc(coro-split),simplifycfg,early-cse' -S | FileCheck %s
-
-%"struct.std::coroutine_handle" = type { ptr }
-%"struct.std::coroutine_handle.0" = type { %"struct.std::coroutine_handle" }
-%"struct.lean_future<int>::Awaiter" = type { i32, %"struct.std::coroutine_handle.0" }
-
-declare ptr @malloc(i64)
-
-%i8.array = type { [100 x i8] }
-declare void @consume.i8(ptr)
-
-; The testval lives across suspend point so that it should be put on the frame.
-; However, part of testval has lifetime marker which indicates the part
-; wouldn't live across suspend point.
-; This test whether or not %testval would be put on the frame by ignoring the
-; partial lifetime markers.
-define void @foo(ptr %to_store) presplitcoroutine {
-entry:
- %testval = alloca %i8.array
- %subrange = getelementptr inbounds %i8.array, ptr %testval, i64 0, i32 0, i64 50
- %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
- %alloc = call ptr @malloc(i64 16) #3
- %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
-
- call void @llvm.lifetime.start.p0(i64 50, ptr %subrange)
- call void @consume.i8(ptr %subrange)
- call void @llvm.lifetime.end.p0(i64 50, ptr %subrange)
- store ptr %testval, ptr %to_store
-
- %save = call token @llvm.coro.save(ptr null)
- %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
- switch i8 %suspend, label %exit [
- i8 0, label %await.ready
- i8 1, label %exit
- ]
-await.ready:
- %StrayCoroSave = call token @llvm.coro.save(ptr null)
- br label %exit
-exit:
- call i1 @llvm.coro.end(ptr null, i1 false, token none)
- ret void
-}
-
-; Verify that for both foo and bar, testval isn't put on the frame.
-; CHECK: %foo.Frame = type { ptr, ptr, %i8.array, i1 }
-
-declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
-declare i1 @llvm.coro.alloc(token) #3
-declare i64 @llvm.coro.size.i64() #5
-declare ptr @llvm.coro.begin(token, ptr writeonly) #3
-declare token @llvm.coro.save(ptr) #3
-declare ptr @llvm.coro.frame() #5
-declare i8 @llvm.coro.suspend(token, i1) #3
-declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
-declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll
index f828b22..8b8dbac 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll
@@ -50,7 +50,7 @@ entry:
br label %loop
loop:
- call void @llvm.lifetime.start(i64 8, ptr %stackvar0)
+ call void @llvm.lifetime.start(ptr %stackvar0)
store i64 1234, ptr %stackvar0
@@ -58,7 +58,7 @@ loop:
; %stackvar1 and rely on it staying the same across suspension.
call void @bar()
- call void @llvm.lifetime.end(i64 8, ptr %stackvar0)
+ call void @llvm.lifetime.end(ptr %stackvar0)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -81,5 +81,5 @@ declare ptr @llvm.coro.begin(token, ptr writeonly)
declare token @llvm.coro.save(ptr)
declare i8 @llvm.coro.suspend(token, i1)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start(i64, ptr nocapture)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
index 07b3bd8..d662638 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
@@ -49,7 +49,7 @@ entry:
%id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
ptr @my_async_function_fp)
%hdl = call ptr @llvm.coro.begin(token %id, ptr null)
- call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr)
+ call void @llvm.lifetime.start.p0(ptr %escaped_addr)
call void @escape(ptr %escaped_addr)
br label %callblock
@@ -80,6 +80,6 @@ declare void @llvm.coro.async.context.dealloc(ptr)
declare swiftcc void @asyncSuspend(ptr)
declare ptr @llvm.coro.async.resume()
declare void @llvm.coro.async.size.replace(ptr, ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
attributes #0 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
index 4010159..49c4207 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
@@ -43,7 +43,7 @@ entry:
%id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
ptr @my_async_function_fp)
%hdl = call ptr @llvm.coro.begin(token %id, ptr null)
- call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr)
+ call void @llvm.lifetime.start.p0(ptr %escaped_addr)
br label %callblock
@@ -81,7 +81,7 @@ loop:
br label %callblock
loop_exit:
- call void @llvm.lifetime.end.p0(i64 4, ptr %escaped_addr)
+ call void @llvm.lifetime.end.p0(ptr %escaped_addr)
call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false)
unreachable
}
@@ -104,6 +104,6 @@ declare void @llvm.coro.async.context.dealloc(ptr)
declare swiftcc void @asyncSuspend(ptr)
declare ptr @llvm.coro.async.resume()
declare void @llvm.coro.async.size.replace(ptr, ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
attributes #0 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/Coroutines/coro-byval-param.ll b/llvm/test/Transforms/Coroutines/coro-byval-param.ll
index 4705918..38ab5ac 100644
--- a/llvm/test/Transforms/Coroutines/coro-byval-param.ll
+++ b/llvm/test/Transforms/Coroutines/coro-byval-param.ll
@@ -19,7 +19,7 @@ coro.alloc: ; preds = %entry
coro.init: ; preds = %coro.alloc, %entry
%3 = phi ptr [ null, %entry ], [ %call, %coro.alloc ]
%4 = call ptr @llvm.coro.begin(token %0, ptr %3) #10
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise) #2
%call2 = call ptr @_ZN4task12promise_type17get_return_objectEv(ptr nonnull dereferenceable(1) %__promise)
call void @initial_suspend(ptr nonnull dereferenceable(1) %__promise)
%5 = call token @llvm.coro.save(ptr null)
@@ -31,9 +31,9 @@ coro.init: ; preds = %coro.alloc, %entry
]
init.ready: ; preds = %coro.init
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %a2) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %a2) #2
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a2, ptr align 8 %a1, i64 24, i1 false)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %a2) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %a2) #2
call void @_ZN4task12promise_type13final_suspendEv(ptr nonnull dereferenceable(1) %__promise) #2
%7 = call token @llvm.coro.save(ptr null)
call fastcc void @_ZNSt12experimental13coroutines_v116coroutine_handleIN4task12promise_typeEE12from_addressEPv(ptr %4) #2
@@ -42,7 +42,7 @@ init.ready: ; preds = %coro.init
br i1 %switch, label %cleanup33, label %coro.ret
cleanup33: ; preds = %init.ready, %coro.init
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %__promise) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %__promise) #2
%9 = call ptr @llvm.coro.free(token %0, ptr %4)
%.not = icmp eq ptr %9, null
br i1 %.not, label %coro.ret, label %coro.free
@@ -75,7 +75,7 @@ declare i64 @llvm.coro.size.i64() #4
declare ptr @llvm.coro.begin(token, ptr writeonly) #2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: argmemonly nofree nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #6
@@ -93,7 +93,7 @@ declare token @llvm.coro.save(ptr) #2
declare hidden fastcc void @_ZNSt12experimental13coroutines_v116coroutine_handleIN4task12promise_typeEE12from_addressEPv(ptr) unnamed_addr #7 align 2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
; Function Attrs: nounwind
declare i8 @llvm.coro.suspend(token, i1) #2
diff --git a/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll b/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll
index 6c6e5a6..d369a21 100644
--- a/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll
+++ b/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll
@@ -48,7 +48,7 @@ entry:
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
; Function Attrs: argmemonly nounwind readonly
declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #1
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll
index c9700c8..bf08d6f 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll
@@ -16,33 +16,33 @@ entry:
br i1 %cond, label %then, label %else
then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data)
call void @consume(ptr %data)
%suspend.value = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value, label %coro.ret [i8 0, label %resume
i8 1, label %cleanup1]
resume:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup1
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup
else:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data2)
call void @consume(ptr %data2)
%suspend.value2 = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value2, label %coro.ret [i8 0, label %resume2
i8 1, label %cleanup2]
resume2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup2
cleanup2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup
cleanup:
@@ -72,5 +72,5 @@ declare noalias ptr @malloc(i32)
declare double @print(double)
declare void @free(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll
index 584caa35..78c6f0c 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll
@@ -17,10 +17,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -29,10 +29,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -41,13 +41,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -69,5 +69,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll
index f916ebb..8265731 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll
@@ -19,10 +19,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -31,10 +31,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume.2(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -43,13 +43,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll
index 525df87..66d4137 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll
@@ -16,33 +16,33 @@ entry:
br i1 %cond, label %then, label %else
then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data)
call void @consume(ptr %data)
%suspend.value = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value, label %coro.ret [i8 0, label %resume
i8 1, label %cleanup1]
resume:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup1
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup
else:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data2)
call void @consume(ptr %data2)
%suspend.value2 = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value2, label %coro.ret [i8 0, label %resume2
i8 1, label %cleanup2]
resume2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup2
cleanup2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup
cleanup:
@@ -72,5 +72,5 @@ declare noalias ptr @malloc(i32)
declare double @print(double)
declare void @free(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll
index 27e0c47..6ff31e5 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll
@@ -19,10 +19,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -31,10 +31,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume.2(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -43,13 +43,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll
index 6d93eea..c3da8e8 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll
@@ -19,10 +19,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -31,10 +31,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume.2(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -43,13 +43,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
index 8d0e772..df2ed7e 100644
--- a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
+++ b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
@@ -31,7 +31,7 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
%save = call token @llvm.coro.save(ptr null)
@@ -68,7 +68,7 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
%save = call token @llvm.coro.save(ptr null)
@@ -81,7 +81,7 @@ await.ready:
br label %exit
exit:
call i1 @llvm.coro.end(ptr null, i1 false, token none)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
ret void
}
@@ -107,7 +107,7 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
%0 = load i8, ptr @testbool, align 1
@@ -115,7 +115,7 @@ entry:
br i1 %tobool, label %if.then, label %if.end
if.then:
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
br label %if.end
if.end:
@@ -138,5 +138,5 @@ declare ptr @llvm.coro.begin(token, ptr writeonly) #3
declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll
index 3f0899a..c3d0fb1 100644
--- a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll
+++ b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll
@@ -19,7 +19,7 @@ coro.alloc: ; preds = %entry
init.suspend: ; preds = %entry, %coro.alloc
%3 = phi ptr [ null, %entry ], [ %call, %coro.alloc ]
%4 = call ptr @llvm.coro.begin(token %0, ptr %3) #12
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %__promise) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise) #3
store ptr null, ptr %__promise, align 8
%5 = call token @llvm.coro.save(ptr null)
%6 = call i8 @llvm.coro.suspend(token %5, i1 false)
@@ -80,7 +80,7 @@ cleanup3:
br label %cleanup62
cleanup62: ; preds = %await2.suspend, %await.suspend, %init.suspend, %final.suspend
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %__promise) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %__promise) #3
%18 = call ptr @llvm.coro.free(token %0, ptr %4)
%.not = icmp eq ptr %18, null
br i1 %.not, label %coro.ret, label %coro.free
@@ -99,9 +99,9 @@ declare i1 @llvm.coro.alloc(token) #3
declare dso_local noundef nonnull ptr @_Znwm(i64 noundef) local_unnamed_addr #4
declare i64 @llvm.coro.size.i64() #5
declare ptr @llvm.coro.begin(token, ptr writeonly) #3
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #6
+declare void @llvm.lifetime.start.p0(ptr nocapture) #6
declare token @llvm.coro.save(ptr) #7
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #6
+declare void @llvm.lifetime.end.p0(ptr nocapture) #6
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @_Z5Innerv() local_unnamed_addr
declare dso_local void @_ZdlPv(ptr noundef) local_unnamed_addr #8
diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll
index 8ed0384..31839aa 100644
--- a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll
+++ b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll
@@ -37,9 +37,9 @@ declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) #5
declare ptr @llvm.coro.begin(token, ptr writeonly) #5
declare token @llvm.coro.alloca.alloc.i64(i64, i32) #5
declare ptr @llvm.coro.alloca.get(token) #5
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #6
+declare void @llvm.lifetime.start.p0(ptr nocapture) #6
declare i1 @llvm.coro.suspend.retcon.i1(...) #5
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #6
+declare void @llvm.lifetime.end.p0(ptr nocapture) #6
declare void @llvm.coro.alloca.free(token) #5
declare i1 @llvm.coro.end(ptr, i1, token) #5
diff --git a/llvm/test/Transforms/Coroutines/coro-split-02.ll b/llvm/test/Transforms/Coroutines/coro-split-02.ll
index 31e8e81..c487ab1 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-02.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-02.ll
@@ -27,10 +27,10 @@ entry:
await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
%val = load i32, ptr %ref.tmp7
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -42,10 +42,10 @@ exit:
; CHECK: %testval = alloca i32
; CHECK-NOT: call token @llvm.coro.save(ptr null)
; CHECK: %val = load i32, ptr %ref.tmp7
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %test = load i32, ptr %testval
; CHECK-NEXT: call void @print(i32 %test)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -61,5 +61,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
index 184d4a5..9a9e3c3 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
@@ -43,7 +43,7 @@ coro_Suspend: ; preds = %for.cond, %if.then,
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
; Function Attrs: argmemonly nounwind readonly
declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #5
@@ -54,7 +54,7 @@ declare i64 @llvm.coro.size.i64() #1
declare ptr @llvm.coro.begin(token, ptr writeonly) #7
declare token @llvm.coro.save(ptr) #7
declare i8 @llvm.coro.suspend(token, i1) #7
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5
declare void @free(ptr nocapture) local_unnamed_addr #6
declare i1 @llvm.coro.end(ptr, i1, token) #7
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll
index e2ed205..e661932 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll
@@ -33,8 +33,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8)
declare i8 @llvm.coro.suspend(token, i1)
declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
declare void @llvm.instrprof.value.profile(ptr, i64, i64, i32, i32)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: noinline nounwind presplitcoroutine uwtable
define ptr @f(i32 %0) presplitcoroutine align 32 {
@@ -56,11 +56,11 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
12: ; preds = %8, %1
%13 = phi ptr [ null, %1 ], [ %11, %8 ]
%14 = call ptr @llvm.coro.begin(token %6, ptr %13) #28
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %3) #9
store ptr null, ptr %3, align 16
%15 = getelementptr inbounds {ptr, i64}, ptr %3, i64 0, i32 1
store i64 0, ptr %15, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %4) #9
store ptr %3, ptr %4, align 8
%16 = call token @llvm.coro.save(ptr null)
call void @await_suspend(ptr noundef nonnull align 1 dereferenceable(1) %4, ptr %14) #9
@@ -71,7 +71,7 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
]
18: ; preds = %12
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %4) #9
%19 = icmp slt i32 0, %0
br i1 %19, label %20, label %36
@@ -79,12 +79,12 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
br label %22
21: ; preds = %12
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %4) #9
br label %54
22: ; preds = %20, %31
%23 = phi i32 [ 0, %20 ], [ %32, %31 ]
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %5) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %5) #9
%24 = call ptr @other_coro()
store ptr %3, ptr %5, align 8
%25 = getelementptr inbounds { ptr, ptr }, ptr %5, i64 0, i32 1
@@ -98,13 +98,13 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
]
31: ; preds = %22
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %5) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %5) #9
%32 = add nuw nsw i32 %23, 1
%33 = icmp slt i32 %32, %0
br i1 %33, label %22, label %35, !llvm.loop !0
34: ; preds = %22
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %5) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %5) #9
br label %54
35: ; preds = %31
@@ -142,11 +142,11 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
br label %54
53: ; preds = %47
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %2) #9
unreachable
54: ; preds = %52, %34, %21
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %3) #9
%55 = call ptr @llvm.coro.free(token %6, ptr %14)
%56 = icmp eq ptr %55, null
br i1 %56, label %61, label %57
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll
index 7c1a13f..b256175 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll
@@ -9,7 +9,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -29,7 +29,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
exit:
call i1 @llvm.coro.end(ptr null, i1 false, token none)
@@ -53,8 +53,8 @@ declare i1 @llvm.coro.end(ptr, i1, token) #2
declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1
declare ptr @malloc(i64)
declare void @consume(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl)
attributes #0 = { presplitcoroutine }
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
index e05169a..99174ff 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
@@ -13,7 +13,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -33,7 +33,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
exit:
call i1 @llvm.coro.end(ptr null, i1 false, token none)
@@ -51,7 +51,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -71,7 +71,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
cleanup:
@@ -106,8 +106,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1
declare ptr @malloc(i64)
declare void @delete(ptr nonnull) #2
declare void @consume(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl)
attributes #0 = { presplitcoroutine }
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
index 8ceb0dd..91f8543 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
@@ -11,7 +11,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -36,7 +36,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
exit:
%result = phi i64 [0, %entry], [0, %entry], [%foo, %await.suspend], [%foo, %await.suspend], [%foo, %await.ready]
@@ -57,7 +57,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -77,7 +77,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
cleanup:
@@ -114,8 +114,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1
declare ptr @malloc(i64)
declare void @delete(ptr nonnull) #2
declare void @consume(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl)
attributes #0 = { presplitcoroutine }
diff --git a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll
index 157807d..12d6564 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll
@@ -12,11 +12,11 @@ entry:
br i1 %n, label %flag_true, label %flag_false
flag_true:
- call void @llvm.lifetime.start.p0(i64 8, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
br label %merge
flag_false:
- call void @llvm.lifetime.start.p0(i64 8, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
br label %merge
merge:
@@ -27,8 +27,8 @@ merge:
i8 1, label %cleanup]
resume:
call void @print(ptr %phi)
- call void @llvm.lifetime.end.p0(i64 8, ptr %x)
- call void @llvm.lifetime.end.p0(i64 8, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
br label %cleanup
cleanup:
@@ -54,8 +54,8 @@ declare i1 @llvm.coro.alloc(token)
declare ptr @llvm.coro.begin(token, ptr)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @print(ptr)
declare noalias ptr @malloc(i32)
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll
index 1d0cf94..a5a2bcf 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll
@@ -17,7 +17,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i32
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -33,7 +33,7 @@ await.ready:
%val = load i32, ptr %ref.tmp7
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -43,11 +43,11 @@ exit:
; CHECK-LABEL: @a.resume(
; CHECK: %testval = alloca i32, align 4
-; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %val = load i32, ptr %ref.tmp7
; CHECK-NEXT: %test = load i32, ptr %testval
; CHECK-NEXT: call void @print(i32 %test)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -56,7 +56,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i32
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -72,7 +72,7 @@ await.ready:
%val = load i32, ptr %ref.tmp7
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -92,5 +92,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll
index 38a2a33..abc91c3 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll
@@ -15,7 +15,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i32
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -44,7 +44,7 @@ await.ready:
after.await:
%test1 = load i32, ptr %testval
call void @print(i32 %test1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
br label %exit
exit:
@@ -54,7 +54,7 @@ exit:
; CHECK-LABEL: @a.resume(
; CHECK: %[[VAL:testval.+]] = getelementptr inbounds %a.Frame
-; CHECK-NOT: call void @llvm.lifetime.start.p0(i64 4, ptr %{{.*}})
+; CHECK-NOT: call void @llvm.lifetime.start.p0(ptr %{{.*}})
; CHECK: %test = load i32, ptr %[[VAL]]
declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
@@ -69,5 +69,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll
index de377a6..efd1adf 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll
@@ -17,7 +17,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca %i8.array
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -32,7 +32,7 @@ await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
%val = load i32, ptr %ref.tmp7
call void @consume.i8.array(ptr %testval)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -41,10 +41,10 @@ exit:
}
; CHECK-LABEL: @a.gep.resume(
; CHECK: %testval = alloca %i8.array
-; CHECK: call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %val = load i32, ptr %ref.tmp7
; CHECK-NEXT: call void @consume.i8.array(ptr %testval)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -60,5 +60,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll
index 8210455..af5aa8a 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll
@@ -15,7 +15,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i8
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 1, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -31,7 +31,7 @@ await.ready:
%val = load i32, ptr %ref.tmp7
%test = load i8, ptr %testval
call void @consume.i8(i8 %test)
- call void @llvm.lifetime.end.p0(i64 1, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -41,11 +41,11 @@ exit:
; CHECK-LABEL: @a.resume(
; CHECK: %testval = alloca i8, align 1
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %testval)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %val = load i32, ptr %ref.tmp7
; CHECK-NEXT: %test = load i8, ptr %testval
; CHECK-NEXT: call void @consume.i8(i8 %test)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -62,5 +62,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll b/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
index d2c4f57..4eec7ed 100644
--- a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
+++ b/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
@@ -61,11 +61,11 @@ entry:
ret ptr %task
; CHECK: %[[TASK:.+]] = alloca %struct.Task, align 8
; CHECK-NEXT: %[[FRAME:.+]] = alloca [32 x i8], align 8
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %[[TASK]])
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %[[TASK]])
; CHECK-NEXT: %[[ID:.+]] = call token @llvm.coro.id(i32 0, ptr null, ptr @callee, ptr @callee.resumers)
; CHECK-NEXT: %[[HDL:.+]] = call ptr @llvm.coro.begin(token %[[ID]], ptr null)
; CHECK-NEXT: store ptr %[[HDL]], ptr %[[TASK]], align 8
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %[[TASK]])
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %[[TASK]])
; CHECK-NEXT: ret ptr %[[TASK]]
}
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll
index 9e47bd2..390e96e 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll
@@ -12,14 +12,14 @@ target triple = "x86_64-unknown-linux-gnu"
@.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @hoo(ptr)
declare i32 @printf(ptr nocapture readonly, ...)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @goo(i32 %N, ptr %b) {
entry:
@@ -32,12 +32,12 @@ for.cond: ; preds = %for.body, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.i)
+ call void @llvm.lifetime.start.p0(ptr %a.i)
call void @hoo(ptr %a.i)
call void @hoo(ptr %b)
%tmp1 = load volatile i64, ptr %a.i, align 8
%call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %a.i)
+ call void @llvm.lifetime.end.p0(ptr %a.i)
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Transforms/DCE/basic.ll b/llvm/test/Transforms/DCE/basic.ll
index 1a3b12e..28772f0 100644
--- a/llvm/test/Transforms/DCE/basic.ll
+++ b/llvm/test/Transforms/DCE/basic.ll
@@ -10,8 +10,8 @@ define void @test() {
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
; CHECK-LABEL: @test_lifetime_alloca
define i32 @test_lifetime_alloca() {
@@ -21,8 +21,8 @@ define i32 @test_lifetime_alloca() {
; CHECK-NOT: llvm.lifetime.start
; CHECK-NOT: llvm.lifetime.end
%i = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
ret i32 0
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll b/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll
index ee9bd69..4ec69bc 100644
--- a/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll
@@ -12,17 +12,17 @@ define ptr @foo(ptr noundef %ptr) {
; CHECK-LABEL: define ptr @foo(
; CHECK-SAME: ptr noundef [[PTR:%.*]]) {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6:[0-9]+]]
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_BYTE_8]], i64 4
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 [[TMP1]], i8 42, i64 4, i1 false)
; CHECK-NEXT: store i32 43, ptr [[STRUCT_BYTE_8]], align 4
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
; Set %struct.alloca[8, 16) to 42.
call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 %struct.byte.8, i8 42, i64 8, i1 false)
@@ -33,7 +33,7 @@ define ptr @foo(ptr noundef %ptr) {
store i32 44, ptr %struct.byte.4, align 4
; Return %struct.alloca[8, 16).
%ret = load ptr, ptr %struct.byte.8
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
ret ptr %ret
}
@@ -44,7 +44,7 @@ define ptr @foo(ptr noundef %ptr) {
define ptr @foo_with_removable_malloc() {
; CHECK-LABEL: define ptr @foo_with_removable_malloc() {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_BYTE_8]], i64 4
@@ -53,11 +53,11 @@ define ptr @foo_with_removable_malloc() {
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]])
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
@@ -79,7 +79,7 @@ define ptr @foo_with_removable_malloc() {
%ret = load ptr, ptr %struct.byte.8
call void @readnone(ptr %struct.byte.4);
call void @readnone(ptr %struct.byte.8);
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
ret ptr %ret
}
@@ -87,7 +87,7 @@ define ptr @foo_with_removable_malloc_free() {
; CHECK-LABEL: define ptr @foo_with_removable_malloc_free() {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
; CHECK-NEXT: [[M1:%.*]] = tail call ptr @malloc(i64 4)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[M2:%.*]] = tail call ptr @malloc(i64 4)
@@ -99,12 +99,12 @@ define ptr @foo_with_removable_malloc_free() {
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]])
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
%m1 = tail call ptr @malloc(i64 4)
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
@@ -126,14 +126,14 @@ define ptr @foo_with_removable_malloc_free() {
%ret = load ptr, ptr %struct.byte.8
call void @readnone(ptr %struct.byte.4);
call void @readnone(ptr %struct.byte.8);
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
ret ptr %ret
}
define ptr @foo_with_malloc_to_calloc() {
; CHECK-LABEL: define ptr @foo_with_malloc_to_calloc() {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4
; CHECK-NEXT: [[CALLOC1:%.*]] = call ptr @calloc(i64 1, i64 4)
@@ -144,13 +144,13 @@ define ptr @foo_with_malloc_to_calloc() {
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]])
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: call void @use(ptr [[CALLOC1]])
; CHECK-NEXT: call void @use(ptr [[CALLOC]])
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
%struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4
@@ -172,15 +172,15 @@ define ptr @foo_with_malloc_to_calloc() {
%ret = load ptr, ptr %struct.byte.8
call void @readnone(ptr %struct.byte.4);
call void @readnone(ptr %struct.byte.8);
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
call void @use(ptr %m1)
call void @use(ptr %m2)
ret ptr %ret
}
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare noalias ptr @malloc(i64) willreturn allockind("alloc,uninitialized") "alloc-family"="malloc"
declare void @readnone(ptr) readnone nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
index 7d827fa..56c84c7 100644
--- a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
@@ -865,7 +865,7 @@ exit:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
declare void @use.i64(i64)
@@ -883,7 +883,7 @@ define i64 @test_a_not_captured_at_all(ptr %ptr, ptr %ptr.2, i1 %c) {
; CHECK-NEXT: call void @use.i64(i64 [[LV_2]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @clobber()
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[A]], i8 0, i64 8, i1 false)
; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[A]], align 4
@@ -902,7 +902,7 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.start.p0(i64 8, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i64 99, ptr %a
call void @clobber()
call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 8, i1 false)
@@ -1112,7 +1112,7 @@ else:
declare void @capture_and_clobber_multiple(ptr, ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i64 @earliest_escape_ptrtoint(ptr %p.1) {
; CHECK-LABEL: @earliest_escape_ptrtoint(
@@ -1122,7 +1122,7 @@ define i64 @earliest_escape_ptrtoint(ptr %p.1) {
; CHECK-NEXT: [[LV_1:%.*]] = load ptr, ptr [[P_1:%.*]], align 8
; CHECK-NEXT: [[LV_2:%.*]] = load i64, ptr [[LV_1]], align 4
; CHECK-NEXT: store ptr [[A_1]], ptr [[P_1]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A_2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_2]])
; CHECK-NEXT: ret i64 [[LV_2]]
;
entry:
@@ -1134,7 +1134,7 @@ entry:
store ptr %a.1, ptr %p.1, align 8
%int = ptrtoint ptr %a.2 to i64
store i64 %int , ptr %a.2, align 8
- call void @llvm.lifetime.end.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.end.p0(ptr %a.2)
ret i64 %lv.2
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/dominate.ll b/llvm/test/Transforms/DeadStoreElimination/dominate.ll
index 262d16e..7e3ddb3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/dominate.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/dominate.ll
@@ -8,12 +8,12 @@ bb1:
br label %bb3
bb2:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i)
+ call void @llvm.lifetime.end.p0(ptr %memtmp3.i)
br label %bb3
bb3:
call void @bar()
- call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i)
+ call void @llvm.lifetime.end.p0(ptr %memtmp3.i)
br label %bb4
bb4:
@@ -21,4 +21,4 @@ bb4:
}
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
index 27ad639..8225e14 100644
--- a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
@@ -56,14 +56,14 @@ define void @test3(ptr %src) {
define void @test_strcat_with_lifetime(ptr %src) {
; CHECK-LABEL: @test_strcat_with_lifetime(
; CHECK-NEXT: [[B:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[B]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[B]])
; CHECK-NEXT: ret void
;
%B = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %B)
+ call void @llvm.lifetime.start.p0(ptr nonnull %B)
%call = call ptr @strcat(ptr %B, ptr %src)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %B)
+ call void @llvm.lifetime.end.p0(ptr nonnull %B)
ret void
}
@@ -344,61 +344,61 @@ entry:
define void @dse_strcpy(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strcpy(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strcpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
define void @dse_strncpy(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strncpy(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strncpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
define void @dse_strcat(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strcat(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strcat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
define void @dse_strncat(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strncat(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strncat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
index f2a372ea..3d74c84 100644
--- a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
@@ -3,20 +3,20 @@
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare void @llvm.memset.p0.i8(ptr nocapture, i8, i8, i1) nounwind
define void @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%A = alloca i8
store i8 0, ptr %A ;; Written to by memset
- call void @llvm.lifetime.end.p0(i64 1, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.memset.p0.i8(ptr %A, i8 0, i8 -1, i1 false)
@@ -26,14 +26,14 @@ define void @test1() {
define void @test2(ptr %P) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[Q:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[Q]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Q]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Q]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Q]])
; CHECK-NEXT: ret void
;
%Q = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %Q)
+ call void @llvm.lifetime.start.p0(ptr %Q)
store i32 0, ptr %Q ;; This store is dead.
- call void @llvm.lifetime.end.p0(i64 4, ptr %Q)
+ call void @llvm.lifetime.end.p0(ptr %Q)
ret void
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
index 7dd8e41..264e816 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
@@ -15,7 +15,7 @@ define ptr @alloc_tree() {
; CHECK-LABEL: @alloc_tree(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FVAL:%.*]] = alloca [4 x ptr], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[FVAL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[FVAL]])
; CHECK-NEXT: [[CALL:%.*]] = tail call dereferenceable_or_null(192) ptr @malloc(i64 192)
; CHECK-NEXT: [[CALL3:%.*]] = tail call ptr @alloc(ptr [[CALL]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x ptr], ptr [[FVAL]], i64 0, i64 3
@@ -29,12 +29,12 @@ define ptr @alloc_tree() {
; CHECK-NEXT: [[CALL3_3:%.*]] = tail call ptr @alloc(ptr [[CALL]])
; CHECK-NEXT: store ptr [[CALL3_3]], ptr [[FVAL]], align 16
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) [[CALL]], ptr nonnull align 16 dereferenceable(32) [[FVAL]], i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[FVAL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[FVAL]])
; CHECK-NEXT: ret ptr [[CALL]]
;
entry:
%fval = alloca [4 x ptr], align 16
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %fval) #7
+ call void @llvm.lifetime.start.p0(ptr nonnull %fval) #7
%call = tail call dereferenceable_or_null(192) ptr @malloc(i64 192) #8
%call3 = tail call ptr @alloc(ptr %call)
%arrayidx = getelementptr inbounds [4 x ptr], ptr %fval, i64 0, i64 3
@@ -48,11 +48,11 @@ entry:
%call3.3 = tail call ptr @alloc(ptr %call)
store ptr %call3.3, ptr %fval, align 16
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) %call, ptr nonnull align 16 dereferenceable(32) %fval, i64 32, i1 false)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %fval) #7
+ call void @llvm.lifetime.end.p0(ptr nonnull %fval) #7
ret ptr %call
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare noalias ptr @malloc(i64)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
diff --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
index f3f5cb1..112e9f4 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
@@ -103,7 +103,7 @@ define void @test.2() {
; CHECK-NEXT: [[C_2:%.*]] = icmp slt i64 [[IV_2_NEXT]], 100
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_2]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 400, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -136,11 +136,11 @@ loop.2:
br i1 %c.2, label %loop.2, label %exit
exit:
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %A) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %A) #5
ret void
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Make sure `store i32 10, ptr %ptr.2` in %cond.store is not removed. The
; stored value may be read by `%use = load i32, ptr %ptr.1` in a future
@@ -171,7 +171,7 @@ define void@test.3() {
; CHECK-NEXT: [[DEPTH_1_BE]] = phi i32 [ [[SUB]], [[COND_READ]] ], [ [[INC]], [[COND_STORE]] ]
; CHECK-NEXT: br label [[LOOP_HEADER]]
; CHECK: cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull [[NODESTACK]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[NODESTACK]])
; CHECK-NEXT: ret void
;
entry:
@@ -203,7 +203,7 @@ loop.latch:
br label %loop.header
cleanup: ; preds = %while.body, %while.end, %entry
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %nodeStack) #3
ret void
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
index d32d562..8ecc7939 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
@@ -4,8 +4,8 @@
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
declare void @unknown_func()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
index 3712bec..7293228 100644
--- a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
@@ -9,7 +9,7 @@ define void @test_nounwind_invoke() personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: invoke void @foo(ptr [[TMP]])
; CHECK-NEXT: to label [[BB1:%.*]] unwind label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP]])
; CHECK-NEXT: ret void
; CHECK: bb2:
; CHECK-NEXT: [[ABCTMP1:%.*]] = landingpad { ptr, i32 }
@@ -26,7 +26,7 @@ bb:
to label %bb1 unwind label %bb2
bb1: ; preds = %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
ret void
bb2: ; preds = %bb
@@ -36,7 +36,7 @@ bb2: ; preds = %bb
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
; Function Attrs: argmemonly nounwind willreturn
declare void @foo(ptr) #1
declare i32 @__gxx_personality_v0(...)
diff --git a/llvm/test/Transforms/DeadStoreElimination/simple.ll b/llvm/test/Transforms/DeadStoreElimination/simple.ll
index 6c04e15..9d28395 100644
--- a/llvm/test/Transforms/DeadStoreElimination/simple.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/simple.ll
@@ -697,26 +697,26 @@ define void @test39_atomic(ptr %P, ptr %Q, ptr %R) {
declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
define void @test40(ptr noalias %Pp, ptr noalias %Q) {
; CHECK-LABEL: @test40(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: [[PC:%.*]] = load ptr, ptr [[PP:%.*]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 [[A]], ptr align 4 [[Q:%.*]], i64 4, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[PC]], ptr nonnull align 4 [[A]], i64 4, i1 true)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry:
%A = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %A)
+ call void @llvm.lifetime.start.p0(ptr nonnull %A)
%Pc = load ptr, ptr %Pp, align 8
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %A, ptr align 4 %Q, i64 4, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %Pc, ptr nonnull align 4 %A, i64 4, i1 true)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %A)
+ call void @llvm.lifetime.end.p0(ptr nonnull %A)
ret void
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
index df2feb0..0970ed3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=dse -S < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @unknown()
declare void @f(ptr)
@@ -23,14 +23,14 @@ define void @test_dead() {
define void @test_lifetime() {
; CHECK-LABEL: @test_lifetime(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -39,18 +39,18 @@ define void @test_lifetime() {
define void @test_lifetime2() {
; CHECK-LABEL: @test_lifetime2(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: call void @unknown()
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @unknown()
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
call void @unknown()
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/Transforms/EarlyCSE/memoryssa.ll b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
index ba4cce4..f7f7ba3 100644
--- a/llvm/test/Transforms/EarlyCSE/memoryssa.ll
+++ b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
@@ -146,12 +146,12 @@ define void @test_writeback_lifetimes() {
; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes(
; CHECK-NOMEMSSA-NEXT: entry:
; CHECK-NOMEMSSA-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P]], i64 1
; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4
; CHECK-NOMEMSSA-NEXT: ret void
@@ -159,24 +159,24 @@ define void @test_writeback_lifetimes() {
; CHECK-LABEL: @test_writeback_lifetimes(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P]], i64 1
; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NEXT: store i32 [[QV]], ptr [[Q]], align 4
; CHECK-NEXT: ret void
;
entry:
%p = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
%q = getelementptr i32, ptr %p, i64 1
%pv = load i32, ptr %p
%qv = load i32, ptr %q
- call void @llvm.lifetime.end.p0(i64 8, ptr %p)
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.end.p0(ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
store i32 %pv, ptr %p
store i32 %qv, ptr %q
ret void
@@ -188,11 +188,11 @@ define void @test_writeback_lifetimes_multi_arg(ptr %q) {
; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg(
; CHECK-NOMEMSSA-NEXT: entry:
; CHECK-NOMEMSSA-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4
; CHECK-NOMEMSSA-NEXT: ret void
@@ -200,25 +200,25 @@ define void @test_writeback_lifetimes_multi_arg(ptr %q) {
; CHECK-LABEL: @test_writeback_lifetimes_multi_arg(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
entry:
%p = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
%pv = load i32, ptr %p
%qv = load i32, ptr %q
- call void @llvm.lifetime.end.p0(i64 8, ptr %p)
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.end.p0(ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
store i32 %pv, ptr %p
store i32 %qv, ptr %q
ret void
}
-declare void @llvm.lifetime.end.p0(i64, ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(ptr)
+declare void @llvm.lifetime.start.p0(ptr)
diff --git a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll
index f7e21cd..736b072 100644
--- a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll
+++ b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll
@@ -25,11 +25,11 @@ define void @defn_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_simple(ptr %varargs) {
@@ -50,11 +50,11 @@ define private void @defn_private_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_private_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_private_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_private_simple(ptr %varargs) {
@@ -75,11 +75,11 @@ define internal void @defn_internal_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_internal_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_internal_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_internal_simple(ptr %varargs) {
@@ -211,11 +211,11 @@ define external void @defn_external_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_external_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_external_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_external_simple(ptr %varargs) {
diff --git a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll
index 96cc826..e21b72d 100644
--- a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll
+++ b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll
@@ -10,11 +10,11 @@ define i32 @variadic_int_double_get_firstz(...) {
; OPT-LABEL: define {{[^@]+}}@variadic_int_double_get_firstz(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: %1 = call i32 @variadic_int_double_get_firstz.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret i32 %1
;
; ABI-LABEL: define {{[^@]+}}@variadic_int_double_get_firstz(ptr %varargs) {
@@ -61,11 +61,11 @@ define double @variadic_int_double_get_secondz(...) {
; OPT-LABEL: define {{[^@]+}}@variadic_int_double_get_secondz(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: %1 = call double @variadic_int_double_get_secondz.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret double %1
;
; ABI-LABEL: define {{[^@]+}}@variadic_int_double_get_secondz(ptr %varargs) {
@@ -115,13 +115,13 @@ entry:
; CHECK-LABEL: @variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store double %y, ptr %1, align 4
; CHECK-NEXT: %call = call i32 @variadic_int_double_get_firstz.valist(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: %cmp.i = icmp eq i32 %call, %x
; CHECK-NEXT: ret i1 %cmp.i
; CHECK-NEXT: }
@@ -130,26 +130,26 @@ define zeroext i1 @variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; OPT-LABEL: define {{[^@]+}}@variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; OPT-NEXT: entry:
; OPT-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; OPT-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; OPT-NEXT: store i32 %x, ptr %0, align 4
; OPT-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; OPT-NEXT: store double %y, ptr %1, align 8
; OPT-NEXT: %call = call i32 @variadic_int_double_get_firstz.valist(ptr %vararg_buffer)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; OPT-NEXT: %cmp.i = icmp eq i32 %call, %x
; OPT-NEXT: ret i1 %cmp.i
;
; ABI-LABEL: define {{[^@]+}}@variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; ABI-NEXT: entry:
; ABI-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; ABI-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; ABI-NEXT: store i32 %x, ptr %0, align 4
; ABI-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; ABI-NEXT: store double %y, ptr %1, align 8
; ABI-NEXT: %call = call i32 @variadic_int_double_get_firstz(ptr %vararg_buffer)
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; ABI-NEXT: %cmp.i = icmp eq i32 %call, %x
; ABI-NEXT: ret i1 %cmp.i
;
@@ -162,13 +162,13 @@ entry:
; CHECK-LABEL: @variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store double %y, ptr %1, align 4
; CHECK-NEXT: %call = call double @variadic_int_double_get_secondz.valist(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: %cmp.i = fcmp oeq double %call, %y
; CHECK-NEXT: ret i1 %cmp.i
; CHECK-NEXT: }
@@ -177,26 +177,26 @@ define zeroext i1 @variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; OPT-LABEL: define {{[^@]+}}@variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; OPT-NEXT: entry:
; OPT-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; OPT-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; OPT-NEXT: store i32 %x, ptr %0, align 4
; OPT-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; OPT-NEXT: store double %y, ptr %1, align 8
; OPT-NEXT: %call = call double @variadic_int_double_get_secondz.valist(ptr %vararg_buffer)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; OPT-NEXT: %cmp.i = fcmp oeq double %call, %y
; OPT-NEXT: ret i1 %cmp.i
;
; ABI-LABEL: define {{[^@]+}}@variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; ABI-NEXT: entry:
; ABI-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; ABI-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; ABI-NEXT: store i32 %x, ptr %0, align 4
; ABI-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; ABI-NEXT: store double %y, ptr %1, align 8
; ABI-NEXT: %call = call double @variadic_int_double_get_secondz(ptr %vararg_buffer)
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; ABI-NEXT: %cmp.i = fcmp oeq double %call, %y
; ABI-NEXT: ret i1 %cmp.i
;
diff --git a/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll b/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll
index b661f7f..0f178c7 100644
--- a/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll
+++ b/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll
@@ -19,11 +19,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) {
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_SINGLE_I32_VARARG:%.*]], align 16
; ABI-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr @vararg_ptr, align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FPTR_SINGLE_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void [[TMP0]](ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -45,11 +45,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) {
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_LIBCS_VARARG:%.*]], align 16
; ABI-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr @vararg_ptr, align 4
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[X:%.*]], i64 24, i1 false)
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FPTR_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void [[TMP0]](ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/ExpandVariadics/intrinsics.ll b/llvm/test/Transforms/ExpandVariadics/intrinsics.ll
index 1782c92..52ce80e 100644
--- a/llvm/test/Transforms/ExpandVariadics/intrinsics.ll
+++ b/llvm/test/Transforms/ExpandVariadics/intrinsics.ll
@@ -3,13 +3,13 @@
; RUN: opt -mtriple=wasm32-unknown-unknown -S --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s -check-prefixes=CHECK,ABI
; REQUIRES: webassembly-registered-target
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.va_copy.p0(ptr, ptr)
declare void @valist(ptr noundef)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.va_start.p0(ptr)
@@ -20,31 +20,31 @@ define void @start_once(...) {
; OPT-LABEL: @start_once(
; OPT-NEXT: entry:
; OPT-NEXT: [[VA_START:%.*]] = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr [[VA_START]])
; OPT-NEXT: call void @llvm.va_start.p0(ptr [[VA_START]])
; OPT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VA_START]], align 4
; OPT-NEXT: call void @start_once.valist(ptr [[TMP0]])
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr [[VA_START]])
; OPT-NEXT: ret void
;
; ABI-LABEL: @start_once(
; ABI-NEXT: entry:
; ABI-NEXT: [[S:%.*]] = alloca ptr, align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]])
; ABI-NEXT: store ptr [[VARARGS:%.*]], ptr [[S]], align 4
; ABI-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S]], align 4
; ABI-NEXT: call void @valist(ptr noundef [[TMP0]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S]])
; ABI-NEXT: ret void
;
entry:
%s = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
call void @llvm.va_start.p0(ptr nonnull %s)
%0 = load ptr, ptr %s, align 4
call void @valist(ptr noundef %0)
call void @llvm.va_end.p0(ptr %s)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
ret void
}
@@ -53,34 +53,34 @@ define void @start_twice(...) {
; OPT-LABEL: @start_twice(
; OPT-NEXT: entry:
; OPT-NEXT: [[VA_START:%.*]] = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr [[VA_START]])
; OPT-NEXT: call void @llvm.va_start.p0(ptr [[VA_START]])
; OPT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VA_START]], align 4
; OPT-NEXT: call void @start_twice.valist(ptr [[TMP0]])
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr [[VA_START]])
; OPT-NEXT: ret void
;
; ABI-LABEL: @start_twice(
; ABI-NEXT: entry:
; ABI-NEXT: [[S0:%.*]] = alloca ptr, align 4
; ABI-NEXT: [[S1:%.*]] = alloca ptr, align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S0]])
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S1]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S0]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S1]])
; ABI-NEXT: store ptr [[VARARGS:%.*]], ptr [[S0]], align 4
; ABI-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S0]], align 4
; ABI-NEXT: call void @valist(ptr noundef [[TMP0]])
; ABI-NEXT: store ptr [[VARARGS]], ptr [[S1]], align 4
; ABI-NEXT: [[TMP1:%.*]] = load ptr, ptr [[S1]], align 4
; ABI-NEXT: call void @valist(ptr noundef [[TMP1]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S1]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S0]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S1]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S0]])
; ABI-NEXT: ret void
;
entry:
%s0 = alloca ptr, align 4
%s1 = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s1)
call void @llvm.va_start.p0(ptr nonnull %s0)
%0 = load ptr, ptr %s0, align 4
call void @valist(ptr noundef %0)
@@ -89,8 +89,8 @@ entry:
%1 = load ptr, ptr %s1, align 4
call void @valist(ptr noundef %1)
call void @llvm.va_end.p0(ptr %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s0)
ret void
}
@@ -100,21 +100,21 @@ define void @copy(ptr noundef %va) {
; CHECK-NEXT: [[VA_ADDR:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[CP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: store ptr [[VA:%.*]], ptr [[VA_ADDR]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[CP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CP]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[CP]], ptr [[VA_ADDR]], i32 4, i1 false)
; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[CP]], align 4
; CHECK-NEXT: call void @valist(ptr noundef [[TMP0]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[CP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CP]])
; CHECK-NEXT: ret void
;
entry:
%va.addr = alloca ptr, align 4
%cp = alloca ptr, align 4
store ptr %va, ptr %va.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %cp)
call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr)
%0 = load ptr, ptr %cp, align 4
call void @valist(ptr noundef %0)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %cp)
ret void
}
diff --git a/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll b/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll
index a9f27f7..83b33b93 100644
--- a/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll
+++ b/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll
@@ -16,11 +16,11 @@ define void @pass_byval(ptr byval(i32) %b) {
; ABI-LABEL: @pass_byval(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_BYVAL_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP0]], ptr [[B:%.*]], i64 4, i1 false)
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -41,13 +41,13 @@ define void @i32_libcS_byval(i32 %x, ptr noundef byval(%struct.libcS) align 8 %y
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_BYVAL_VARARG:%.*]], align 16
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[Y:%.*]], i64 24, i1 false)
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -66,13 +66,13 @@ define void @libcS_i32_byval(ptr byval(%struct.libcS) align 8 %x, i32 %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_BYVAL_VARARG:%.*]], align 16
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[X:%.*]], i64 24, i1 false)
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -90,11 +90,11 @@ define void @pass_byref(ptr byref(i32) %b) {
; ABI-LABEL: @pass_byref(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_BYREF_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[B:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -113,13 +113,13 @@ define void @i32_libcS_byref(i32 %x, ptr noundef byref(%struct.libcS) align 8 %y
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_BYREF_VARARG:%.*]], align 16
; ABI-NEXT: store ptr [[Y:%.*]], ptr [[INDIRECTALLOCA]], align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -138,13 +138,13 @@ define void @libcS_i32_byref(ptr byref(%struct.libcS) align 8 %x, i32 %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_BYREF_VARARG:%.*]], align 16
; ABI-NEXT: store ptr [[X:%.*]], ptr [[INDIRECTALLOCA]], align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll b/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll
index 67cb269..46e1904 100644
--- a/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll
+++ b/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll
@@ -19,13 +19,13 @@ define void @i32_libcS(i32 %x, %struct.libcS %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_VARARG:%.*]], align 16
; ABI-NEXT: store [[STRUCT_LIBCS]] [[Y:%.*]], ptr [[INDIRECTALLOCA]], align 8
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -44,13 +44,13 @@ define void @libcS_i32(%struct.libcS %x, i32 %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_VARARG:%.*]], align 16
; ABI-NEXT: store [[STRUCT_LIBCS]] [[X:%.*]], ptr [[INDIRECTALLOCA]], align 8
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/ExpandVariadics/pass-integers.ll b/llvm/test/Transforms/ExpandVariadics/pass-integers.ll
index 7a0c004..cf52724 100644
--- a/llvm/test/Transforms/ExpandVariadics/pass-integers.ll
+++ b/llvm/test/Transforms/ExpandVariadics/pass-integers.ll
@@ -17,9 +17,9 @@ define void @pass_nothing() {
; ABI-LABEL: @pass_nothing(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_NOTHING_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -36,11 +36,11 @@ define void @pass_s1(i8 %x) {
; ABI-LABEL: @pass_s1(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S1_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i8 [[X:%.*]], ptr [[TMP0]], align 1
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -57,11 +57,11 @@ define void @pass_s2(i16 %x) {
; ABI-LABEL: @pass_s2(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S2_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i16 [[X:%.*]], ptr [[TMP0]], align 2
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -78,11 +78,11 @@ define void @pass_s3(i32 %x) {
; ABI-LABEL: @pass_s3(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S3_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -99,11 +99,11 @@ define void @pass_s4(i64 %x) {
; ABI-LABEL: @pass_s4(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S4_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i64 [[X:%.*]], ptr [[TMP0]], align 8
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -120,11 +120,11 @@ define void @pass_s5(<4 x i32> %x) {
; ABI-LABEL: @pass_s5(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S5_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store <4 x i32> [[X:%.*]], ptr [[TMP0]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -141,13 +141,13 @@ define void @pass_int_s1(i32 %i, i8 %x) {
; ABI-LABEL: @pass_int_s1(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S1_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 5, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i8 [[X:%.*]], ptr [[TMP1]], align 1
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 5, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -164,13 +164,13 @@ define void @pass_int_s2(i32 %i, i16 %x) {
; ABI-LABEL: @pass_int_s2(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S2_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 6, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i16 [[X:%.*]], ptr [[TMP1]], align 2
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 6, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -187,13 +187,13 @@ define void @pass_int_s3(i32 %i, i32 %x) {
; ABI-LABEL: @pass_int_s3(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S3_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -210,13 +210,13 @@ define void @pass_int_s4(i32 %i, i64 %x) {
; ABI-LABEL: @pass_int_s4(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S4_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
; ABI-NEXT: store i64 [[X:%.*]], ptr [[TMP1]], align 8
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -233,13 +233,13 @@ define void @pass_int_s5(i32 %i, <4 x i32> %x) {
; ABI-LABEL: @pass_int_s5(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S5_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
; ABI-NEXT: store <4 x i32> [[X:%.*]], ptr [[TMP1]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -256,7 +256,7 @@ define void @pass_asc(i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> %x5) {
; ABI-LABEL: @pass_asc(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_ASC_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 48, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i8 [[X1:%.*]], ptr [[TMP0]], align 1
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
@@ -268,7 +268,7 @@ define void @pass_asc(i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> %x5) {
; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 8
; ABI-NEXT: store <4 x i32> [[X5:%.*]], ptr [[TMP4]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -285,7 +285,7 @@ define void @pass_dsc(<4 x i32> %x0, i64 %x1, i32 %x2, i16 %x3, i8 %x4) {
; ABI-LABEL: @pass_dsc(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_DSC_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 33, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store <4 x i32> [[X0:%.*]], ptr [[TMP0]], align 16
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -297,7 +297,7 @@ define void @pass_dsc(<4 x i32> %x0, i64 %x1, i32 %x2, i16 %x3, i8 %x4) {
; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 5
; ABI-NEXT: store i8 [[X4:%.*]], ptr [[TMP4]], align 1
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 33, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -316,7 +316,7 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32>
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_MULTIPLE_VARARG:%.*]], align 16
; ABI-NEXT: [[VARARG_BUFFER1:%.*]] = alloca [[PASS_MULTIPLE_VARARG_0:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -324,8 +324,8 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32>
; ABI-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3
; ABI-NEXT: store i64 [[X4:%.*]], ptr [[TMP2]], align 8
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VARARG_BUFFER1]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER1]])
; ABI-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 0
; ABI-NEXT: store i32 [[I]], ptr [[TMP3]], align 4
; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 1
@@ -335,7 +335,7 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32>
; ABI-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 5
; ABI-NEXT: store <4 x i32> [[X5:%.*]], ptr [[TMP6]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER1]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VARARG_BUFFER1]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER1]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/FunctionAttrs/nocapture.ll b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
index 9d6acc4..26b5dc2 100644
--- a/llvm/test/Transforms/FunctionAttrs/nocapture.ll
+++ b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
@@ -1082,6 +1082,65 @@ define i64 @captures_not_ret_only(ptr %p) {
ret i64 %int
}
+@gi = global i64 0
+
+;; Unlike ptrtoint, ptrtoaddr only captures the address
+define i64 @captures_ptrtoaddr_stored(ptr %p) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write, argmem: none, inaccessiblemem: none)
+; FNATTRS-LABEL: define noundef i64 @captures_ptrtoaddr_stored
+; FNATTRS-SAME: (ptr captures(address) [[P:%.*]]) #[[ATTR1]] {
+; FNATTRS-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; FNATTRS-NEXT: store i64 [[INT]], ptr @gi, align 8
+; FNATTRS-NEXT: ret i64 0
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write)
+; ATTRIBUTOR-LABEL: define i64 @captures_ptrtoaddr_stored
+; ATTRIBUTOR-SAME: (ptr nofree writeonly [[P:%.*]]) #[[ATTR1]] {
+; ATTRIBUTOR-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; ATTRIBUTOR-NEXT: store i64 [[INT]], ptr @gi, align 8
+; ATTRIBUTOR-NEXT: ret i64 0
+;
+ %int = ptrtoaddr ptr %p to i64
+ store i64 %int, ptr @gi, align 8
+ ret i64 0
+}
+
+;; Note: ptrtoaddr is a location-independent capture, so we don't get captures(ret: address) here.
+define i64 @captures_ptrtoaddr_ret(ptr %p) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; FNATTRS-LABEL: define i64 @captures_ptrtoaddr_ret
+; FNATTRS-SAME: (ptr captures(address) [[P:%.*]]) #[[ATTR0]] {
+; FNATTRS-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; FNATTRS-NEXT: ret i64 [[INT]]
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; ATTRIBUTOR-LABEL: define i64 @captures_ptrtoaddr_ret
+; ATTRIBUTOR-SAME: (ptr nofree readnone [[P:%.*]]) #[[ATTR0]] {
+; ATTRIBUTOR-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; ATTRIBUTOR-NEXT: ret i64 [[INT]]
+;
+ %int = ptrtoaddr ptr %p to i64
+ ret i64 %int
+}
+
+;; Note: ptrtoaddr is a location-independent capture, so we don't get captures(none) here.
+define i64 @captures_ptrtoaddr_ignored(ptr %p) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; FNATTRS-LABEL: define noundef i64 @captures_ptrtoaddr_ignored
+; FNATTRS-SAME: (ptr captures(address) [[P:%.*]]) #[[ATTR0]] {
+; FNATTRS-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; FNATTRS-NEXT: ret i64 0
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; ATTRIBUTOR-LABEL: define i64 @captures_ptrtoaddr_ignored
+; ATTRIBUTOR-SAME: (ptr nofree readnone [[P:%.*]]) #[[ATTR0]] {
+; ATTRIBUTOR-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; ATTRIBUTOR-NEXT: ret i64 0
+;
+ %int = ptrtoaddr ptr %p to i64
+ ret i64 0
+}
+
define void @captures_read_provenance(ptr %p) {
; FNATTRS-LABEL: define void @captures_read_provenance
; FNATTRS-SAME: (ptr captures(address, read_provenance) [[P:%.*]]) {
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
index a38d3e5..1e789b0 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=gvn -S | FileCheck %s
+; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@a = common global [100 x i64] zeroinitializer, align 16
@@ -50,32 +51,56 @@ if.end: ; preds = %if.then, %entry
}
define void @test2(i64 %i) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]]
-; CHECK-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]]
-; CHECK-NEXT: store i64 [[MUL]], ptr @g1, align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3
-; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
-; CHECK-NEXT: store i64 [[CALL]], ptr @g2, align 8
-; CHECK-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @a, i64 24), align 8
-; CHECK-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @b, i64 24), align 8
-; CHECK-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]]
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[MUL5_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], [[IF_THEN]] ], [ [[MUL]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[T3:%.*]] = phi i64 [ [[T3_PRE]], [[IF_THEN]] ], [ [[T1]], [[ENTRY]] ]
-; CHECK-NEXT: [[T2:%.*]] = phi i64 [ [[T2_PRE]], [[IF_THEN]] ], [ [[T0]], [[ENTRY]] ]
-; CHECK-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]]
-; CHECK-NEXT: store i64 [[MUL5_PRE_PHI]], ptr @g3, align 8
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test2(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]]
+; MDEP-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
+; MDEP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]]
+; MDEP-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
+; MDEP-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]]
+; MDEP-NEXT: store i64 [[MUL]], ptr @g1, align 8
+; MDEP-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3
+; MDEP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
+; MDEP-NEXT: store i64 [[CALL]], ptr @g2, align 8
+; MDEP-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @a, i64 24), align 8
+; MDEP-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @b, i64 24), align 8
+; MDEP-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]]
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[MUL5_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], [[IF_THEN]] ], [ [[MUL]], [[ENTRY:%.*]] ]
+; MDEP-NEXT: [[T3:%.*]] = phi i64 [ [[T3_PRE]], [[IF_THEN]] ], [ [[T1]], [[ENTRY]] ]
+; MDEP-NEXT: [[T2:%.*]] = phi i64 [ [[T2_PRE]], [[IF_THEN]] ], [ [[T0]], [[ENTRY]] ]
+; MDEP-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY]] ]
+; MDEP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]]
+; MDEP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]]
+; MDEP-NEXT: store i64 [[MUL5_PRE_PHI]], ptr @g3, align 8
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test2(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]]
+; MSSA-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
+; MSSA-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]]
+; MSSA-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
+; MSSA-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]]
+; MSSA-NEXT: store i64 [[MUL]], ptr @g1, align 8
+; MSSA-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3
+; MSSA-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
+; MSSA-NEXT: store i64 [[CALL]], ptr @g2, align 8
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY:%.*]] ]
+; MSSA-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]]
+; MSSA-NEXT: [[T2:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
+; MSSA-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]]
+; MSSA-NEXT: [[T3:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8
+; MSSA-NEXT: [[MUL5:%.*]] = mul nsw i64 [[T3]], [[T2]]
+; MSSA-NEXT: store i64 [[MUL5]], ptr @g3, align 8
+; MSSA-NEXT: ret void
;
entry:
%arrayidx = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 %i
@@ -252,29 +277,50 @@ if.end3: ; preds = %if.then2, %if.else,
; available in if.then. Check that we correctly phi-translate to the phi that
; the load has been replaced with.
define void @test6(ptr %ptr, i1 %arg) {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX1_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX1_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[WHILE:%.*]]
-; CHECK: while:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[IF_END:%.*]] ]
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[IF_END]] ]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I]]
-; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]]
-; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
-; CHECK: if.then:
-; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4
-; CHECK-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[TMP2]] = phi i32 [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[WHILE]] ]
-; CHECK-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]]
-; CHECK: while.end:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test6(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[ARRAYIDX1_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 1
+; MDEP-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX1_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[WHILE:%.*]]
+; MDEP: while:
+; MDEP-NEXT: [[TMP0:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[IF_END:%.*]] ]
+; MDEP-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[IF_END]] ]
+; MDEP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I]]
+; MDEP-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
+; MDEP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]]
+; MDEP-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; MDEP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]]
+; MDEP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
+; MDEP: if.then:
+; MDEP-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4
+; MDEP-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[TMP2]] = phi i32 [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[WHILE]] ]
+; MDEP-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]]
+; MDEP: while.end:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test6(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br label [[WHILE:%.*]]
+; MSSA: while:
+; MSSA-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ]
+; MSSA-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[I]]
+; MSSA-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+; MSSA-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
+; MSSA-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]]
+; MSSA-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; MSSA-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]]
+; MSSA-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
+; MSSA: if.then:
+; MSSA-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4
+; MSSA-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]]
+; MSSA: while.end:
+; MSSA-NEXT: ret void
;
entry:
br label %while
@@ -304,24 +350,40 @@ while.end:
; Load from arrayidx2 is partially redundant, check that address translation can
; fold sext + trunc across phi node together.
define i32 @test7(ptr noalias %ptr1, ptr noalias %ptr2, i32 %i, i1 %cond) {
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]]
-; CHECK: entry.if.end_crit_edge:
-; CHECK-NEXT: [[RES_PRE:%.*]] = load i32, ptr [[PTR1:%.*]], align 4
-; CHECK-NEXT: br label [[IF_END:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[I:%.*]]
-; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4
-; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PRE]], [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[TMP]], [[IF_THEN]] ]
-; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[IDX_EXT]], [[IF_THEN]] ]
-; CHECK-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]]
-; CHECK-NEXT: ret i32 [[RES]]
+; MDEP-LABEL: @test7(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]]
+; MDEP: entry.if.end_crit_edge:
+; MDEP-NEXT: [[RES_PRE:%.*]] = load i32, ptr [[PTR1:%.*]], align 4
+; MDEP-NEXT: br label [[IF_END:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[I:%.*]]
+; MDEP-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; MDEP-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4
+; MDEP-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PRE]], [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[TMP]], [[IF_THEN]] ]
+; MDEP-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[IDX_EXT]], [[IF_THEN]] ]
+; MDEP-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32
+; MDEP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]]
+; MDEP-NEXT: ret i32 [[RES]]
+;
+; MSSA-LABEL: @test7(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1:%.*]], i32 [[I:%.*]]
+; MSSA-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; MSSA-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4
+; MSSA-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IDX_EXT]], [[IF_THEN]] ]
+; MSSA-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32
+; MSSA-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]]
+; MSSA-NEXT: [[RES:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; MSSA-NEXT: ret i32 [[RES]]
;
entry:
br i1 %cond, label %if.then, label %if.end
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll
index ea43307..cb05a8e 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll
@@ -1,21 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=gvn -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON
-; RUN: opt -passes=gvn -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF
+; RUN: opt -passes=gvn -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON --check-prefixes=CHECK,PT-ON-MDEP
+; RUN: opt -passes='gvn<memoryssa>' -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON --check-prefixes=CHECK,PT-ON-MSSA
+; RUN: opt -passes=gvn -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF --check-prefixes=CHECK,PT-OFF-MDEP
+; RUN: opt -passes='gvn<memoryssa>' -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF --check-prefixes=CHECK,PT-OFF-MSSA
; Test that phi translation is able to hoist a load whose address
; depends on an add also being hoisted.
define double @phi_translation_hoists_add(ptr %a, i64 %idx) {
-; ADD-TRANS-ON-LABEL: @phi_translation_hoists_add(
-; ADD-TRANS-ON-NEXT: entry:
-; ADD-TRANS-ON-NEXT: [[ADD_PHI_TRANS_INSERT:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1
-; ADD-TRANS-ON-NEXT: [[GEP_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD_PHI_TRANS_INSERT]]
-; ADD-TRANS-ON-NEXT: [[LOAD_PRE:%.*]] = load double, ptr [[GEP_PHI_TRANS_INSERT]], align 8
-; ADD-TRANS-ON-NEXT: br label [[FOR_BODY:%.*]]
-; ADD-TRANS-ON: for.body:
-; ADD-TRANS-ON-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD_PRE]], 1.000000e+00
-; ADD-TRANS-ON-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
-; ADD-TRANS-ON: exit:
-; ADD-TRANS-ON-NEXT: ret double [[LOAD_PRE]]
+; PT-ON-MDEP-LABEL: @phi_translation_hoists_add(
+; PT-ON-MDEP-NEXT: entry:
+; PT-ON-MDEP-NEXT: [[ADD_PHI_TRANS_INSERT:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1
+; PT-ON-MDEP-NEXT: [[GEP_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD_PHI_TRANS_INSERT]]
+; PT-ON-MDEP-NEXT: [[LOAD_PRE:%.*]] = load double, ptr [[GEP_PHI_TRANS_INSERT]], align 8
+; PT-ON-MDEP-NEXT: br label [[FOR_BODY:%.*]]
+; PT-ON-MDEP: for.body:
+; PT-ON-MDEP-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD_PRE]], 1.000000e+00
+; PT-ON-MDEP-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; PT-ON-MDEP: exit:
+; PT-ON-MDEP-NEXT: ret double [[LOAD_PRE]]
+;
+; PT-ON-MSSA-LABEL: @phi_translation_hoists_add(
+; PT-ON-MSSA-NEXT: entry:
+; PT-ON-MSSA-NEXT: br label [[FOR_BODY:%.*]]
+; PT-ON-MSSA: for.body:
+; PT-ON-MSSA-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1
+; PT-ON-MSSA-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD]]
+; PT-ON-MSSA-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8
+; PT-ON-MSSA-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD]], 1.000000e+00
+; PT-ON-MSSA-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; PT-ON-MSSA: exit:
+; PT-ON-MSSA-NEXT: ret double [[LOAD]]
;
; ADD-TRANS-OFF-LABEL: @phi_translation_hoists_add(
; ADD-TRANS-OFF-NEXT: entry:
@@ -42,3 +56,8 @@ for.body: ; preds = %for.body, %entry
exit:
ret double %load
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ADD-TRANS-ON: {{.*}}
+; CHECK: {{.*}}
+; PT-OFF-MDEP: {{.*}}
+; PT-OFF-MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate.ll b/llvm/test/Transforms/GVN/PRE/phi-translate.ll
index 713f012..084c449 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate.ll
@@ -1,23 +1,53 @@
-; RUN: opt -passes=gvn -S < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:64:64:64"
-; CHECK-LABEL: @foo(
-; CHECK: entry.end_crit_edge:
-; CHECK: %[[INDEX:[a-z0-9.]+]] = sext i32 %x to i64{{$}}
-; CHECK: %[[ADDRESS:[a-z0-9.]+]] = getelementptr [100 x i32], ptr @G, i64 0, i64 %[[INDEX]]{{$}}
-; CHECK: %n.pre = load i32, ptr %[[ADDRESS]], align 4, !dbg [[N_LOC:![0-9]+]]
-; CHECK: br label %end
-; CHECK: then:
-; CHECK: store i32 %z
-; CHECK: end:
-; CHECK: %n = phi i32 [ %n.pre, %entry.end_crit_edge ], [ %z, %then ], !dbg [[N_LOC]]
-; CHECK: ret i32 %n
-; CHECK: [[N_LOC]] = !DILocation(line: 47, column: 1, scope: !{{.*}})
@G = external global [100 x i32]
define i32 @foo(i32 %x, i32 %z) !dbg !6 {
+; MDEP-LABEL: define i32 @foo(
+; MDEP-SAME: i32 [[X:%.*]], i32 [[Z:%.*]]) !dbg [[DBG5:![0-9]+]] {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[Z]], 0, !dbg [[DBG8:![0-9]+]]
+; MDEP-NEXT: br i1 [[TOBOOL]], label %[[ENTRY_END_CRIT_EDGE:.*]], label %[[THEN:.*]], !dbg [[DBG8]]
+; MDEP: [[ENTRY_END_CRIT_EDGE]]:
+; MDEP-NEXT: [[J_PHI_TRANS_INSERT:%.*]] = sext i32 [[X]] to i64
+; MDEP-NEXT: [[Q_PHI_TRANS_INSERT:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PHI_TRANS_INSERT]]
+; MDEP-NEXT: [[N_PRE:%.*]] = load i32, ptr [[Q_PHI_TRANS_INSERT]], align 4, !dbg [[DBG9:![0-9]+]]
+; MDEP-NEXT: br label %[[END:.*]], !dbg [[DBG8]]
+; MDEP: [[THEN]]:
+; MDEP-NEXT: [[I:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG10:![0-9]+]]
+; MDEP-NEXT: [[P:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[I]], !dbg [[DBG10]]
+; MDEP-NEXT: store i32 [[Z]], ptr [[P]], align 4, !dbg [[DBG10]]
+; MDEP-NEXT: br label %[[END]], !dbg [[DBG10]]
+; MDEP: [[END]]:
+; MDEP-NEXT: [[J_PRE_PHI:%.*]] = phi i64 [ [[J_PHI_TRANS_INSERT]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[I]], %[[THEN]] ], !dbg [[DBG11:![0-9]+]]
+; MDEP-NEXT: [[N:%.*]] = phi i32 [ [[N_PRE]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[Z]], %[[THEN]] ], !dbg [[DBG9]]
+; MDEP-NEXT: [[Q:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PRE_PHI]], !dbg [[DBG12:![0-9]+]]
+; MDEP-NEXT: ret i32 [[N]], !dbg [[DBG9]]
+;
+; MSSA-LABEL: define i32 @foo(
+; MSSA-SAME: i32 [[X:%.*]], i32 [[Z:%.*]]) !dbg [[DBG5:![0-9]+]] {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[Z]], 0, !dbg [[DBG8:![0-9]+]]
+; MSSA-NEXT: br i1 [[TOBOOL]], label %[[ENTRY_END_CRIT_EDGE:.*]], label %[[THEN:.*]], !dbg [[DBG8]]
+; MSSA: [[ENTRY_END_CRIT_EDGE]]:
+; MSSA-NEXT: [[DOTPRE:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG9:![0-9]+]]
+; MSSA-NEXT: br label %[[END:.*]], !dbg [[DBG8]]
+; MSSA: [[THEN]]:
+; MSSA-NEXT: [[I:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG10:![0-9]+]]
+; MSSA-NEXT: [[P:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[I]], !dbg [[DBG10]]
+; MSSA-NEXT: store i32 [[Z]], ptr [[P]], align 4, !dbg [[DBG10]]
+; MSSA-NEXT: br label %[[END]], !dbg [[DBG10]]
+; MSSA: [[END]]:
+; MSSA-NEXT: [[J_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[I]], %[[THEN]] ], !dbg [[DBG9]]
+; MSSA-NEXT: [[Q:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PRE_PHI]], !dbg [[DBG11:![0-9]+]]
+; MSSA-NEXT: [[N:%.*]] = load i32, ptr [[Q]], align 4, !dbg [[DBG12:![0-9]+]]
+; MSSA-NEXT: ret i32 [[N]], !dbg [[DBG12]]
+;
entry:
%tobool = icmp eq i32 %z, 0, !dbg !7
br i1 %tobool, label %end, label %then, !dbg !7
@@ -51,6 +81,31 @@ end:
!10 = !DILocation(line: 46, column: 1, scope: !6)
!11 = !DILocation(line: 47, column: 1, scope: !6)
!12 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang",
- file: !5,
- isOptimized: true, flags: "-O2",
- splitDebugFilename: "abc.debug", emissionKind: 2)
+ file: !5,
+ isOptimized: true, flags: "-O2",
+ splitDebugFilename: "abc.debug", emissionKind: 2)
+;.
+; MDEP: [[META3:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C99, file: [[META4:![0-9]+]], producer: "clang", isOptimized: true, flags: "-O2", runtimeVersion: 0, splitDebugFilename: "abc.debug", emissionKind: LineTablesOnly)
+; MDEP: [[META4]] = !DIFile(filename: "{{.*}}a.cc", directory: {{.*}})
+; MDEP: [[DBG5]] = distinct !DISubprogram(name: "foo", scope: [[META4]], file: [[META4]], line: 42, type: [[META6:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META3]], retainedNodes: [[META7:![0-9]+]])
+; MDEP: [[META6]] = !DISubroutineType(types: [[META7]])
+; MDEP: [[META7]] = !{}
+; MDEP: [[DBG8]] = !DILocation(line: 43, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG9]] = !DILocation(line: 47, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG10]] = !DILocation(line: 44, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG11]] = !DILocation(line: 45, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG12]] = !DILocation(line: 46, column: 1, scope: [[DBG5]])
+;.
+; MSSA: [[META3:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C99, file: [[META4:![0-9]+]], producer: "clang", isOptimized: true, flags: "-O2", runtimeVersion: 0, splitDebugFilename: "abc.debug", emissionKind: LineTablesOnly)
+; MSSA: [[META4]] = !DIFile(filename: "{{.*}}a.cc", directory: {{.*}})
+; MSSA: [[DBG5]] = distinct !DISubprogram(name: "foo", scope: [[META4]], file: [[META4]], line: 42, type: [[META6:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META3]], retainedNodes: [[META7:![0-9]+]])
+; MSSA: [[META6]] = !DISubroutineType(types: [[META7]])
+; MSSA: [[META7]] = !{}
+; MSSA: [[DBG8]] = !DILocation(line: 43, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG9]] = !DILocation(line: 45, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG10]] = !DILocation(line: 44, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG11]] = !DILocation(line: 46, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG12]] = !DILocation(line: 47, column: 1, scope: [[DBG5]])
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll b/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll
index 9ca3e1b..60611a0 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s
+; RUN: opt -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -enable-load-pre -enable-pre -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
declare void @side_effect_0() nofree
@@ -102,25 +103,45 @@ exit:
}
define i32 @test_03(ptr %p) {
-; CHECK-LABEL: @test_03(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X_PRE]], 100
-; CHECK-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]]
-; CHECK: hot_path:
-; CHECK-NEXT: br label [[BACKEDGE]]
-; CHECK: cold_path:
-; CHECK-NEXT: call void @no_side_effect()
-; CHECK-NEXT: br label [[BACKEDGE]]
-; CHECK: backedge:
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X_PRE]]
-; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]]
-; CHECK: exit:
-; CHECK-NEXT: ret i32 [[X_PRE]]
+; MDEP-LABEL: @test_03(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[X_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[LOOP:%.*]]
+; MDEP: loop:
+; MDEP-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
+; MDEP-NEXT: [[COND:%.*]] = icmp ult i32 [[X_PRE]], 100
+; MDEP-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]]
+; MDEP: hot_path:
+; MDEP-NEXT: br label [[BACKEDGE]]
+; MDEP: cold_path:
+; MDEP-NEXT: call void @no_side_effect()
+; MDEP-NEXT: br label [[BACKEDGE]]
+; MDEP: backedge:
+; MDEP-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X_PRE]]
+; MDEP-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
+; MDEP-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]]
+; MDEP: exit:
+; MDEP-NEXT: ret i32 [[X_PRE]]
+;
+; MSSA-LABEL: @test_03(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br label [[LOOP:%.*]]
+; MSSA: loop:
+; MSSA-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
+; MSSA-NEXT: [[X:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MSSA-NEXT: [[COND:%.*]] = icmp ult i32 [[X]], 100
+; MSSA-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]]
+; MSSA: hot_path:
+; MSSA-NEXT: br label [[BACKEDGE]]
+; MSSA: cold_path:
+; MSSA-NEXT: call void @no_side_effect()
+; MSSA-NEXT: br label [[BACKEDGE]]
+; MSSA: backedge:
+; MSSA-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]]
+; MSSA-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
+; MSSA-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]]
+; MSSA: exit:
+; MSSA-NEXT: ret i32 [[X]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll b/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll
index f099ddc..9bf6496 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll
@@ -1,33 +1,53 @@
-; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
; RUN: opt < %s -passes="gvn<pre>" -enable-pre=false -S | FileCheck %s
@H = common global i32 0 ; <ptr> [#uses=2]
@G = common global i32 0 ; <ptr> [#uses=1]
define i32 @test() nounwind {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @H, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 (...) @foo() #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[BB:.*]], label %[[ENTRY_BB1_CRIT_EDGE:.*]]
+; CHECK: [[ENTRY_BB1_CRIT_EDGE]]:
+; CHECK-NEXT: [[DOTPRE:%.*]] = add i32 [[TMP0]], 42
+; CHECK-NEXT: br label %[[BB1:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP0]], 42
+; CHECK-NEXT: store i32 [[TMP3]], ptr @G, align 4
+; CHECK-NEXT: br label %[[BB1]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[DOTPRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[ENTRY_BB1_CRIT_EDGE]] ], [ [[TMP3]], %[[BB]] ]
+; CHECK-NEXT: store i32 [[DOTPRE_PHI]], ptr @H, align 4
+; CHECK-NEXT: ret i32 0
+;
entry:
- %0 = load i32, ptr @H, align 4 ; <i32> [#uses=2]
- %1 = call i32 (...) @foo() nounwind ; <i32> [#uses=1]
- %2 = icmp ne i32 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb, label %bb1
+ %0 = load i32, ptr @H, align 4 ; <i32> [#uses=2]
+ %1 = call i32 (...) @foo() nounwind ; <i32> [#uses=1]
+ %2 = icmp ne i32 %1, 0 ; <i1> [#uses=1]
+ br i1 %2, label %bb, label %bb1
bb: ; preds = %entry
- %3 = add i32 %0, 42 ; <i32> [#uses=1]
-; CHECK: %.pre = add i32 %0, 42
- store i32 %3, ptr @G, align 4
- br label %bb1
+ %3 = add i32 %0, 42 ; <i32> [#uses=1]
+ store i32 %3, ptr @G, align 4
+ br label %bb1
bb1: ; preds = %bb, %entry
- %4 = add i32 %0, 42 ; <i32> [#uses=1]
- store i32 %4, ptr @H, align 4
- br label %return
+ %4 = add i32 %0, 42 ; <i32> [#uses=1]
+ store i32 %4, ptr @H, align 4
+ br label %return
-; CHECK: %.pre-phi = phi i32 [ %.pre, %entry.bb1_crit_edge ], [ %3, %bb ]
-; CHECK-NEXT: store i32 %.pre-phi, ptr @H, align 4
-; CHECK-NEXT: ret i32 0
return: ; preds = %bb1
- ret i32 0
+ ret i32 0
}
declare i32 @foo(...)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll b/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll
index 95f8f3f..f62d06d 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll
@@ -1,16 +1,33 @@
-; RUN: opt < %s -passes=gvn,jump-threading -enable-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn,jump-threading -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>',jump-threading -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
@H = common global i32 0
@G = common global i32 0
define i32 @test(i1 %cond, i32 %v) nounwind {
-; CHECK-LABEL: @test
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[MERGE:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: store i32 -1, ptr @G, align 4
+; CHECK-NEXT: br label %[[MERGE]]
+; CHECK: [[MERGE]]:
+; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[V]], -1
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD_2]], 0
+; CHECK-NEXT: br i1 [[CMP]], label %[[ACTION:.*]], label %[[RETURN:.*]]
+; CHECK: [[ACTION]]:
+; CHECK-NEXT: store i32 [[ADD_2]], ptr @H, align 4
+; CHECK-NEXT: br label %[[RETURN]]
+; CHECK: [[RETURN]]:
+; CHECK-NEXT: [[P:%.*]] = phi i32 [ 0, %[[MERGE]] ], [ 1, %[[ACTION]] ]
+; CHECK-NEXT: ret i32 [[P]]
+;
entry:
br i1 %cond, label %bb, label %bb1
bb:
-; CHECK: store
-; CHECK-NOT: br label %return
%add.1 = add nuw nsw i32 %v, -1
store i32 %add.1, ptr @G, align 4
br label %merge
@@ -24,8 +41,6 @@ merge:
br i1 %cmp, label %action, label %return
action:
-; CHECK: store
-; CHECK-NEXT: br label %return
store i32 %add.2, ptr @H, align 4
br label %return
@@ -34,3 +49,6 @@ return:
ret i32 %p
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll b/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll
index 8c020fd..f961f23 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -gvn-max-num-insns=22 -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -gvn-max-num-insns=22 -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -gvn-max-num-insns=22 -S | FileCheck %s --check-prefixes=CHECK,MSSA
; Debug information should not impact gvn. The following two functions have same
; code except debug information. They should generate same optimized
@@ -11,13 +13,80 @@
@h = global %struct.a zeroinitializer, align 1
define void @withdbg() {
-; CHECK-LABEL: @withdbg
-; CHECK: [[PRE_PRE1:%.*]] = load i16, ptr @f, align 1
-; CHECK-NEXT: [[PRE_PRE2:%.*]] = load ptr, ptr @m, align 1
-; CHECK-NEXT: br i1 true, label %[[BLOCK1:.*]], label %[[BLOCK2:.*]]
-; CHECK: [[BLOCK1]]:
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[PRE_PRE1]] to i32
-; CHECK-NEXT: store i32 [[CONV]], ptr [[PRE_PRE2]], align 1
+; MDEP-LABEL: define void @withdbg() {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MDEP-NEXT: [[TMP11_PRE:%.*]] = load i16, ptr @f, align 1
+; MDEP-NEXT: [[TMP12_PRE:%.*]] = load ptr, ptr @m, align 1
+; MDEP-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MDEP: [[LOR_RHS]]:
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META4:![0-9]+]], !DIExpression(), [[META14:![0-9]+]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META10:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META11:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META12:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META13:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: br label %[[LOR_END]]
+; MDEP: [[LOR_END]]:
+; MDEP-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11_PRE]] to i32
+; MDEP-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12_PRE]], align 1
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: define void @withdbg() {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MSSA-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MSSA: [[LOR_RHS]]:
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META4:![0-9]+]], !DIExpression(), [[META14:![0-9]+]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META10:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META11:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META12:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META13:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[FVALUE:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[MVALUE:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: br label %[[LOR_END]]
+; MSSA: [[LOR_END]]:
+; MSSA-NEXT: [[TMP11:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11]] to i32
+; MSSA-NEXT: [[TMP12:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12]], align 1
+; MSSA-NEXT: ret void
+;
entry:
%agg.tmp.ensured.sroa.0.i = alloca i16, align 1
@@ -61,13 +130,70 @@ lor.end: ; preds = %lor.rhs, %entry
}
define void @lessdbg() {
-; CHECK-LABEL: @lessdbg
-; CHECK: [[PRE_PRE1:%.*]] = load i16, ptr @f, align 1
-; CHECK-NEXT: [[PRE_PRE2:%.*]] = load ptr, ptr @m, align 1
-; CHECK-NEXT: br i1 true, label %[[BLOCK1:.*]], label %[[BLOCK2:.*]]
-; CHECK: [[BLOCK1]]:
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[PRE_PRE1]] to i32
-; CHECK-NEXT: store i32 [[CONV]], ptr [[PRE_PRE2]], align 1
+; MDEP-LABEL: define void @lessdbg() {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MDEP-NEXT: [[TMP11_PRE:%.*]] = load i16, ptr @f, align 1
+; MDEP-NEXT: [[TMP12_PRE:%.*]] = load ptr, ptr @m, align 1
+; MDEP-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MDEP: [[LOR_RHS]]:
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: br label %[[LOR_END]]
+; MDEP: [[LOR_END]]:
+; MDEP-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11_PRE]] to i32
+; MDEP-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12_PRE]], align 1
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: define void @lessdbg() {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MSSA-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MSSA: [[LOR_RHS]]:
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[FVALUE:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[MVALUE:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: br label %[[LOR_END]]
+; MSSA: [[LOR_END]]:
+; MSSA-NEXT: [[TMP11:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11]] to i32
+; MSSA-NEXT: [[TMP12:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12]], align 1
+; MSSA-NEXT: ret void
+;
entry:
%agg.tmp.ensured.sroa.0.i = alloca i16, align 1
@@ -126,3 +252,34 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!48 = !DILocalVariable(name: "v", scope: !41, file: !1, line: 15, type: !5)
!49 = !DILocalVariable(name: "d", scope: !41, file: !1, line: 15, type: !5)
!50 = !DILocalVariable(name: "u", scope: !41, file: !1, line: 16, type: !5)
+;.
+; MDEP: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+; MDEP: [[META1]] = !DIFile(filename: "{{.*}}bbi-78272.c", directory: {{.*}})
+; MDEP: [[META4]] = !DILocalVariable(name: "t", scope: [[META5:![0-9]+]], file: [[META1]], line: 15, type: [[META8:![0-9]+]])
+; MDEP: [[META5]] = distinct !DISubprogram(name: "x", scope: [[META1]], file: [[META1]], line: 14, type: [[META6:![0-9]+]], scopeLine: 14, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META9:![0-9]+]])
+; MDEP: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
+; MDEP: [[META7]] = !{[[META8]]}
+; MDEP: [[META8]] = !DIBasicType(name: "int", size: 16, encoding: DW_ATE_signed)
+; MDEP: [[META9]] = !{[[META4]], [[META10]], [[META11]], [[META12]], [[META13]]}
+; MDEP: [[META10]] = !DILocalVariable(name: "c", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MDEP: [[META11]] = !DILocalVariable(name: "v", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MDEP: [[META12]] = !DILocalVariable(name: "d", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MDEP: [[META13]] = !DILocalVariable(name: "u", scope: [[META5]], file: [[META1]], line: 16, type: [[META8]])
+; MDEP: [[META14]] = !DILocation(line: 15, column: 7, scope: [[META5]])
+;.
+; MSSA: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+; MSSA: [[META1]] = !DIFile(filename: "{{.*}}bbi-78272.c", directory: {{.*}})
+; MSSA: [[META4]] = !DILocalVariable(name: "t", scope: [[META5:![0-9]+]], file: [[META1]], line: 15, type: [[META8:![0-9]+]])
+; MSSA: [[META5]] = distinct !DISubprogram(name: "x", scope: [[META1]], file: [[META1]], line: 14, type: [[META6:![0-9]+]], scopeLine: 14, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META9:![0-9]+]])
+; MSSA: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
+; MSSA: [[META7]] = !{[[META8]]}
+; MSSA: [[META8]] = !DIBasicType(name: "int", size: 16, encoding: DW_ATE_signed)
+; MSSA: [[META9]] = !{[[META4]], [[META10]], [[META11]], [[META12]], [[META13]]}
+; MSSA: [[META10]] = !DILocalVariable(name: "c", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MSSA: [[META11]] = !DILocalVariable(name: "v", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MSSA: [[META12]] = !DILocalVariable(name: "d", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MSSA: [[META13]] = !DILocalVariable(name: "u", scope: [[META5]], file: [[META1]], line: 16, type: [[META8]])
+; MSSA: [[META14]] = !DILocation(line: 15, column: 7, scope: [[META5]])
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll b/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll
index 1ca907d..ca1852f 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
declare void @llvm.experimental.guard(i1, ...)
@@ -8,20 +10,33 @@ declare void @llvm.experimental.guard(i1, ...)
; the element in this case and deoptimize otherwise. If we hoist the load to a
; place above the guard, it will may lead to out-of-bound array access.
define i32 @test_motivation(ptr %p, ptr %q, i1 %C, i32 %index, i32 %len) {
-; CHECK-LABEL: @test_motivation(
+; CHECK-LABEL: define i32 @test_motivation(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i32 [[INDEX:%.*]], i32 [[LEN:%.*]]) {
+; CHECK-NEXT: [[BLOCK1:.*:]]
+; CHECK-NEXT: [[EL1:%.*]] = getelementptr inbounds i32, ptr [[Q]], i32 [[INDEX]]
+; CHECK-NEXT: [[EL2:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 [[INDEX]]
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK4:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: store i32 0, ptr [[EL1]], align 4
+; CHECK-NEXT: br label %[[BLOCK4]]
+; CHECK: [[BLOCK4]]:
+; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[EL2]], %[[BLOCK3]] ], [ [[EL1]], %[[BLOCK2]] ]
+; CHECK-NEXT: [[COND1:%.*]] = icmp sge i32 [[INDEX]], 0
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i32 [[INDEX]], [[LEN]]
+; CHECK-NEXT: [[IN_BOUNDS:%.*]] = and i1 [[COND1]], [[COND2]]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[IN_BOUNDS]]) [ "deopt"() ]
+; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; CHECK-NEXT: ret i32 [[PRE]]
+;
block1:
%el1 = getelementptr inbounds i32, ptr %q, i32 %index
%el2 = getelementptr inbounds i32, ptr %p, i32 %index
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: br
-; CHECK-NOT: load
-; CHECK-NOT: sge
-; CHECK-NOT: slt
-; CHECK-NOT: and
br label %block4
block3:
@@ -30,13 +45,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK: %cond1 = icmp sge i32 %index, 0
-; CHECK-NEXT: %cond2 = icmp slt i32 %index, %len
-; CHECK-NEXT: %in.bounds = and i1 %cond1, %cond2
-; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %in.bounds)
-; CHECK-NEXT: %PRE = load i32, ptr %P2
-; CHECK: ret i32 %PRE
%P2 = phi ptr [%el2, %block3], [%el1, %block2]
%cond1 = icmp sge i32 %index, 0
@@ -49,17 +57,28 @@ block4:
; Guard in load's block that is above the load should prohibit the PRE.
define i32 @test_guard_01(ptr %p, ptr %q, i1 %C, i1 %G) {
-; CHECK-LABEL: @test_guard_01(
+; CHECK-LABEL: define i32 @test_guard_01(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; CHECK-NEXT: [[BLOCK1:.*:]]
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK4:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: store i32 0, ptr [[P]], align 4
+; CHECK-NEXT: br label %[[BLOCK4]]
+; CHECK: [[BLOCK4]]:
+; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; CHECK-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: br
-; CHECK-NOT: load
- br label %block4
+ br label %block4
block3:
store i32 0, ptr %p
@@ -67,10 +86,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %G)
-; CHECK-NEXT: load
-; CHECK: ret i32
%P2 = phi ptr [%p, %block3], [%q, %block2]
call void (i1, ...) @llvm.experimental.guard(i1 %G) [ "deopt"() ]
@@ -80,16 +95,44 @@ block4:
; Guard in load's block that is below the load should not prohibit the PRE.
define i32 @test_guard_02(ptr %p, ptr %q, i1 %C, i1 %G) {
-; CHECK-LABEL: @test_guard_02(
+; MDEP-LABEL: define i32 @test_guard_02(
+; MDEP-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; MDEP-NEXT: [[BLOCK1:.*:]]
+; MDEP-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MDEP: [[BLOCK2]]:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q]], align 4
+; MDEP-NEXT: br label %[[BLOCK4:.*]]
+; MDEP: [[BLOCK3]]:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label %[[BLOCK4]]
+; MDEP: [[BLOCK4]]:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, %[[BLOCK3]] ], [ [[PRE_PRE]], %[[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; MDEP-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: define i32 @test_guard_02(
+; MSSA-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; MSSA-NEXT: [[BLOCK1:.*:]]
+; MSSA-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MSSA: [[BLOCK2]]:
+; MSSA-NEXT: br label %[[BLOCK4:.*]]
+; MSSA: [[BLOCK3]]:
+; MSSA-NEXT: store i32 0, ptr [[P]], align 4
+; MSSA-NEXT: br label %[[BLOCK4]]
+; MSSA: [[BLOCK4]]:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; MSSA-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; MSSA-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: load i32, ptr %q
- br label %block4
+ br label %block4
block3:
store i32 0, ptr %p
@@ -97,12 +140,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK-NEXT: phi i32 [
-; CHECK-NEXT: phi ptr [
-; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %G)
-; CHECK-NOT: load
-; CHECK: ret i32
%P2 = phi ptr [%p, %block3], [%q, %block2]
%PRE = load i32, ptr %P2
@@ -112,17 +149,28 @@ block4:
; Guard above the load's block should prevent PRE from hoisting through it.
define i32 @test_guard_03(ptr %p, ptr %q, i1 %C, i1 %G) {
-; CHECK-LABEL: @test_guard_03(
+; CHECK-LABEL: define i32 @test_guard_03(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; CHECK-NEXT: [[BLOCK1:.*:]]
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK4:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: store i32 0, ptr [[P]], align 4
+; CHECK-NEXT: br label %[[BLOCK4]]
+; CHECK: [[BLOCK4]]:
+; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; CHECK-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: br
-; CHECK-NOT: load
- br label %block4
+ br label %block4
block3:
store i32 0, ptr %p
@@ -130,11 +178,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK-NEXT: phi ptr
-; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %G)
-; CHECK-NEXT: load
-; CHECK-NEXT: ret i32
%P2 = phi ptr [%p, %block3], [%q, %block2]
call void (i1, ...) @llvm.experimental.guard(i1 %G) [ "deopt"() ]
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll b/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll
index 0585781..17fbc0e 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll
@@ -1,4 +1,6 @@
-; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -S -passes='gvn<memoryssa>' -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -9,18 +11,28 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
declare i32 @foo(i32 %arg) #0
define hidden void @test_01(i32 %x, i32 %y) {
-
; c2 only throws if c1 throws, so it can be safely removed and then PRE can
; hoist the load out of loop.
-
-; CHECK-LABEL: @test_01
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: %val.pre = load i32, ptr null, align 8
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %val.pre)
-; CHECK-NEXT: br label %loop
+; MDEP-LABEL: define hidden void @test_01(
+; MDEP-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MDEP-NEXT: [[VAL_PRE:%.*]] = load i32, ptr null, align 8
+; MDEP-NEXT: br label %[[LOOP:.*]]
+; MDEP: [[LOOP]]:
+; MDEP-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL_PRE]])
+; MDEP-NEXT: br label %[[LOOP]]
+;
+; MSSA-LABEL: define hidden void @test_01(
+; MSSA-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MSSA-NEXT: br label %[[LOOP:.*]]
+; MSSA: [[LOOP]]:
+; MSSA-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; MSSA-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL]])
+; MSSA-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
@@ -34,18 +46,18 @@ loop:
}
define hidden void @test_02(i32 %x, i32 %y) {
-
; PRE is not allowed because c2 may throw.
-
-; CHECK-LABEL: @test_02
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c2 = call i32 @foo(i32 %y)
-; CHECK-NEXT: %val = load i32, ptr null, align 8
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %val)
-; CHECK-NEXT: br label %loop
+; CHECK-LABEL: define hidden void @test_02(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[C2:%.*]] = call i32 @foo(i32 [[Y]])
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; CHECK-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL]])
+; CHECK-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
@@ -59,19 +71,31 @@ loop:
}
define hidden void @test_03(i32 %x, i32 %y) {
-
; PRE of load is allowed because c2 only throws if c1 throws. c3 should
; not be eliminated. c4 is eliminated because it only throws if c3 throws.
-
-; CHECK-LABEL: @test_03
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: %val.pre = load i32, ptr null, align 8
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %y)
-; CHECK-NEXT: %c5 = call i32 @foo(i32 %val.pre)
-; CHECK-NEXT: br label %loop
+; MDEP-LABEL: define hidden void @test_03(
+; MDEP-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MDEP-NEXT: [[VAL_PRE:%.*]] = load i32, ptr null, align 8
+; MDEP-NEXT: br label %[[LOOP:.*]]
+; MDEP: [[LOOP]]:
+; MDEP-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]])
+; MDEP-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL_PRE]])
+; MDEP-NEXT: br label %[[LOOP]]
+;
+; MSSA-LABEL: define hidden void @test_03(
+; MSSA-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MSSA-NEXT: br label %[[LOOP:.*]]
+; MSSA: [[LOOP]]:
+; MSSA-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; MSSA-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]])
+; MSSA-NEXT: [[VAL2:%.*]] = load i32, ptr null, align 8
+; MSSA-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL]])
+; MSSA-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
@@ -88,18 +112,18 @@ loop:
}
define hidden void @test_04(i32 %x, i32 %y) {
-
; PRE is not allowed even after we remove c2 because now c3 prevents us from it.
-
-; CHECK-LABEL: @test_04
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %y)
-; CHECK-NEXT: %val = load i32, ptr null, align 8
-; CHECK-NEXT: %c5 = call i32 @foo(i32 %val)
-; CHECK-NEXT: br label %loop
+; CHECK-LABEL: define hidden void @test_04(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]])
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; CHECK-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL]])
+; CHECK-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load.ll b/llvm/test/Transforms/GVN/PRE/pre-load.ll
index bbd20bc..5a07f9f 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load.ll
@@ -1,21 +1,34 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s
+; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
; RUN: opt < %s -aa-pipeline=basic-aa -passes="gvn<load-pre>" -enable-load-pre=false -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define i32 @test1(ptr %p, i1 %C) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr [[P]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test1(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test1(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -34,19 +47,32 @@ block4:
; This is a simple phi translation case.
define i32 @test2(ptr %p, ptr %q, i1 %C) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test2(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test2(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q:%.*]], [[BLOCK2]] ]
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -66,23 +92,40 @@ block4:
; This is a PRE case that requires phi translation through a GEP.
define i32 @test3(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[B]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test3(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[B]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test3(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
%B = getelementptr i32, ptr %q, i32 1
@@ -107,24 +150,41 @@ block4:
;; Here the loaded address is available, but the computation is in 'block3'
;; which does not dominate 'block2'.
define i32 @test4(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
-; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test4(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
+; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test4(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -149,24 +209,41 @@ block4:
; Same as test4, with a nuw flag on the GEP.
define i32 @test4_nuw(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test4_nuw(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr nuw i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
-; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test4_nuw(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr nuw i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
+; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test4_nuw(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -196,28 +273,50 @@ block4:
;}
define void @test5(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test5(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP3:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
-; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP3]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
-; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test5(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP3:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
+; MDEP-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MDEP-NEXT: [[TMP3]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test5(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP6]]
+; MSSA-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP7]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = add i32 %N, -1
@@ -254,28 +353,50 @@ return:
;}
define void @test6(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
-; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
-; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test6(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
+; MDEP-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MDEP-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
+; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test6(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP6]]
+; MSSA-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP7]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = add i32 %N, -1
@@ -314,31 +435,57 @@ return:
; This requires phi translation of the adds.
define void @test7(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1
-; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1
-; CHECK-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP3:%.*]] = phi double [ 1.000000e+00, [[BB_NPH]] ], [ [[TMP5:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP5]] = fadd double [[TMP3]], [[TMP4]]
-; CHECK-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test7(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1
+; MDEP-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8
+; MDEP-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1
+; MDEP-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64
+; MDEP-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP3:%.*]] = phi double [ 1.000000e+00, [[BB_NPH]] ], [ [[TMP5:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
+; MDEP-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MDEP-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP5]] = fadd double [[TMP3]], [[TMP4]]
+; MDEP-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test7(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1
+; MSSA-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8
+; MSSA-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1
+; MSSA-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64
+; MSSA-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
+; MSSA-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP10]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
+; MSSA-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = getelementptr inbounds double, ptr %G, i64 1
@@ -374,22 +521,37 @@ return:
;; Here the loaded address isn't available in 'block2' at all, requiring a new
;; GEP to be inserted into it.
define i32 @test8(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test8(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test8(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test8(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q:%.*]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -417,31 +579,55 @@ block4:
; This requires phi translation of the adds.
define void @test9(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test9(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
-; CHECK-NEXT: [[SCEVGEP10_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP10_PHI_TRANS_INSERT]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
-; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test9(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MDEP-NEXT: [[SCEVGEP10_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP10_PHI_TRANS_INSERT]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
+; MDEP-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MDEP-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
+; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test9(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MSSA-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP8]]
+; MSSA-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP10]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
add i32 0, 0
@@ -482,35 +668,62 @@ return:
; PR5501
define void @test10(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test10(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1
-; CHECK-NEXT: [[SCEVGEP12_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP12_PHI_TRANS_INSERT]], align 8
-; CHECK-NEXT: [[DOTPRE1:%.*]] = load double, ptr [[G]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE1]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ]
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]]
-; CHECK-NEXT: [[TMP4]] = load double, ptr [[SCEVGEP10]], align 8
-; CHECK-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP6]] = fadd double [[TMP5]], [[TMP2]]
-; CHECK-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test10(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1
+; MDEP-NEXT: [[SCEVGEP12_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP12_PHI_TRANS_INSERT]], align 8
+; MDEP-NEXT: [[DOTPRE1:%.*]] = load double, ptr [[G]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE1]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP3:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ]
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MDEP-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2
+; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MDEP-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]]
+; MDEP-NEXT: [[TMP4]] = load double, ptr [[SCEVGEP10]], align 8
+; MDEP-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
+; MDEP-NEXT: [[TMP6]] = fadd double [[TMP5]], [[TMP2]]
+; MDEP-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test10(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ]
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[INDVAR]]
+; MSSA-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2
+; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MSSA-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP12]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP10]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: [[TMP5:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]]
+; MSSA-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = add i32 %N, -1
@@ -547,24 +760,40 @@ return:
; Test critical edge splitting.
define i32 @test11(ptr %p, i1 %C, i32 %N) {
-; CHECK-LABEL: @test11(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1
-; CHECK-NEXT: br i1 [[COND]], label [[BLOCK2_BLOCK4_CRIT_EDGE:%.*]], label [[BLOCK5:%.*]]
-; CHECK: block2.block4_crit_edge:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr [[P]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ [[PRE_PRE]], [[BLOCK2_BLOCK4_CRIT_EDGE]] ], [ 0, [[BLOCK3]] ]
-; CHECK-NEXT: br label [[BLOCK5]]
-; CHECK: block5:
-; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ]
-; CHECK-NEXT: ret i32 [[RET]]
+; MDEP-LABEL: @test11(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1
+; MDEP-NEXT: br i1 [[COND]], label [[BLOCK2_BLOCK4_CRIT_EDGE:%.*]], label [[BLOCK5:%.*]]
+; MDEP: block2.block4_crit_edge:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ [[PRE_PRE]], [[BLOCK2_BLOCK4_CRIT_EDGE]] ], [ 0, [[BLOCK3]] ]
+; MDEP-NEXT: br label [[BLOCK5]]
+; MDEP: block5:
+; MDEP-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ]
+; MDEP-NEXT: ret i32 [[RET]]
+;
+; MSSA-LABEL: @test11(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1
+; MSSA-NEXT: br i1 [[COND]], label [[BLOCK4:%.*]], label [[BLOCK5:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4
+; MSSA-NEXT: br label [[BLOCK5]]
+; MSSA: block5:
+; MSSA-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ]
+; MSSA-NEXT: ret i32 [[RET]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -726,17 +955,30 @@ follow_2:
; Since it is OK to speculate, PRE is allowed.
define i32 @test15(ptr noalias nocapture readonly dereferenceable(8) align 4 %x, ptr noalias nocapture %r, i32 %a) nofree nosync {
-; CHECK-LABEL: @test15(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
-; CHECK-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: call void @f()
-; CHECK-NEXT: ret i32 [[VV_PRE]]
+; MDEP-LABEL: @test15(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MDEP-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MDEP-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: call void @f()
+; MDEP-NEXT: ret i32 [[VV_PRE]]
+;
+; MSSA-LABEL: @test15(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MSSA-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[UU:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MSSA-NEXT: store i32 [[UU]], ptr [[R:%.*]], align 4
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: call void @f()
+; MSSA-NEXT: [[VV:%.*]] = load i32, ptr [[X]], align 4
+; MSSA-NEXT: ret i32 [[VV]]
;
entry:
@@ -763,17 +1005,30 @@ if.end:
; Since it is OK to speculate, PRE is allowed.
define i32 @test16(ptr noalias nocapture readonly dereferenceable(8) align 4 %x, ptr noalias nocapture %r, i32 %a) nofree nosync {
-; CHECK-LABEL: @test16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
-; CHECK-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: call void @f()
-; CHECK-NEXT: ret i32 [[VV_PRE]]
+; MDEP-LABEL: @test16(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MDEP-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MDEP-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: call void @f()
+; MDEP-NEXT: ret i32 [[VV_PRE]]
+;
+; MSSA-LABEL: @test16(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MSSA-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[UU:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MSSA-NEXT: store i32 [[UU]], ptr [[R:%.*]], align 4
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: call void @f()
+; MSSA-NEXT: [[VV:%.*]] = load i32, ptr [[X]], align 4
+; MSSA-NEXT: ret i32 [[VV]]
;
entry:
@@ -808,36 +1063,67 @@ declare i1 @bar()
; We can move all loads into predecessors.
define void @test17(ptr %p1, ptr %p2, ptr %p3, ptr %p4)
-; CHECK-LABEL: @test17(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
-; CHECK-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200
-; CHECK-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]]
-; CHECK: bb1:
-; CHECK-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100
-; CHECK-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]]
-; CHECK: bb2:
-; CHECK-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1
-; CHECK-NEXT: store i64 [[V2]], ptr [[P1]], align 8
-; CHECK-NEXT: br label [[BB3:%.*]]
-; CHECK: bb3:
-; CHECK-NEXT: [[V3:%.*]] = phi i64 [ [[V3_PRE:%.*]], [[BB200]] ], [ [[V3_PRE1:%.*]], [[BB100]] ], [ [[V2]], [[BB2]] ]
-; CHECK-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8
-; CHECK-NEXT: ret void
-; CHECK: bb100:
-; CHECK-NEXT: [[COND3:%.*]] = call i1 @foo()
-; CHECK-NEXT: [[V3_PRE1]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]]
-; CHECK: bb101:
-; CHECK-NEXT: store i64 [[V3_PRE1]], ptr [[P3:%.*]], align 8
-; CHECK-NEXT: ret void
-; CHECK: bb200:
-; CHECK-NEXT: [[COND4:%.*]] = call i1 @bar()
-; CHECK-NEXT: [[V3_PRE]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]]
-; CHECK: bb201:
-; CHECK-NEXT: store i64 [[V3_PRE]], ptr [[P4:%.*]], align 8
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test17(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MDEP-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200
+; MDEP-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]]
+; MDEP: bb1:
+; MDEP-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100
+; MDEP-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]]
+; MDEP: bb2:
+; MDEP-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1
+; MDEP-NEXT: store i64 [[V2]], ptr [[P1]], align 8
+; MDEP-NEXT: br label [[BB3:%.*]]
+; MDEP: bb3:
+; MDEP-NEXT: [[V3:%.*]] = phi i64 [ [[V3_PRE:%.*]], [[BB200]] ], [ [[V3_PRE1:%.*]], [[BB100]] ], [ [[V2]], [[BB2]] ]
+; MDEP-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8
+; MDEP-NEXT: ret void
+; MDEP: bb100:
+; MDEP-NEXT: [[COND3:%.*]] = call i1 @foo()
+; MDEP-NEXT: [[V3_PRE1]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]]
+; MDEP: bb101:
+; MDEP-NEXT: store i64 [[V3_PRE1]], ptr [[P3:%.*]], align 8
+; MDEP-NEXT: ret void
+; MDEP: bb200:
+; MDEP-NEXT: [[COND4:%.*]] = call i1 @bar()
+; MDEP-NEXT: [[V3_PRE]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]]
+; MDEP: bb201:
+; MDEP-NEXT: store i64 [[V3_PRE]], ptr [[P4:%.*]], align 8
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test17(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MSSA-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200
+; MSSA-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]]
+; MSSA: bb1:
+; MSSA-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100
+; MSSA-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]]
+; MSSA: bb2:
+; MSSA-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1
+; MSSA-NEXT: store i64 [[V2]], ptr [[P1]], align 8
+; MSSA-NEXT: br label [[BB3:%.*]]
+; MSSA: bb3:
+; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8
+; MSSA-NEXT: ret void
+; MSSA: bb100:
+; MSSA-NEXT: [[COND3:%.*]] = call i1 @foo()
+; MSSA-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]]
+; MSSA: bb101:
+; MSSA-NEXT: [[V4:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V4]], ptr [[P3:%.*]], align 8
+; MSSA-NEXT: ret void
+; MSSA: bb200:
+; MSSA-NEXT: [[COND4:%.*]] = call i1 @bar()
+; MSSA-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]]
+; MSSA: bb201:
+; MSSA-NEXT: [[V5:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V5]], ptr [[P4:%.*]], align 8
+; MSSA-NEXT: ret void
;
{
entry:
@@ -882,18 +1168,31 @@ bb201:
; So ValuesPerBlock[%if.then] should not be replaced when the load instruction
; is moved to %entry.
define void @test18(i1 %cond, ptr %p1, ptr %p2) {
-; CHECK-LABEL: @test18(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[V2_PRE:%.*]] = load i16, ptr [[P1:%.*]], align 2
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[DEC:%.*]] = add i16 [[V2_PRE]], -1
-; CHECK-NEXT: store i16 [[DEC]], ptr [[P1]], align 2
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[V2:%.*]] = phi i16 [ [[DEC]], [[IF_THEN]] ], [ [[V2_PRE]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test18(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[V2_PRE:%.*]] = load i16, ptr [[P1:%.*]], align 2
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[DEC:%.*]] = add i16 [[V2_PRE]], -1
+; MDEP-NEXT: store i16 [[DEC]], ptr [[P1]], align 2
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[V2:%.*]] = phi i16 [ [[DEC]], [[IF_THEN]] ], [ [[V2_PRE]], [[ENTRY:%.*]] ]
+; MDEP-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test18(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[V1:%.*]] = load i16, ptr [[P1:%.*]], align 2
+; MSSA-NEXT: [[DEC:%.*]] = add i16 [[V1]], -1
+; MSSA-NEXT: store i16 [[DEC]], ptr [[P1]], align 2
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: [[V2:%.*]] = load i16, ptr [[P1]], align 2
+; MSSA-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2
+; MSSA-NEXT: ret void
;
entry:
br i1 %cond, label %if.end, label %if.then
@@ -912,32 +1211,56 @@ if.end:
; PRE of load instructions should not cross exception handling instructions.
define void @test19(i1 %cond, ptr %p1, ptr %p2)
-; CHECK-LABEL: @test19(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
-; CHECK: then:
-; CHECK-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1
-; CHECK-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8
-; CHECK-NEXT: br label [[END:%.*]]
-; CHECK: else:
-; CHECK-NEXT: invoke void @f()
-; CHECK-NEXT: to label [[ELSE_END_CRIT_EDGE:%.*]] unwind label [[LPAD:%.*]]
-; CHECK: else.end_crit_edge:
-; CHECK-NEXT: [[V1_PRE:%.*]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br label [[END]]
-; CHECK: end:
-; CHECK-NEXT: [[V1:%.*]] = phi i64 [ [[V1_PRE]], [[ELSE_END_CRIT_EDGE]] ], [ [[ADD]], [[THEN]] ]
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[V1]], 100
-; CHECK-NEXT: store i64 [[AND]], ptr [[P2]], align 8
-; CHECK-NEXT: ret void
-; CHECK: lpad:
-; CHECK-NEXT: [[LP:%.*]] = landingpad { ptr, i32 }
-; CHECK-NEXT: cleanup
-; CHECK-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[V3]], 200
-; CHECK-NEXT: store i64 [[OR]], ptr [[P1]], align 8
-; CHECK-NEXT: resume { ptr, i32 } [[LP]]
+; MDEP-LABEL: @test19(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; MDEP: then:
+; MDEP-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8
+; MDEP-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1
+; MDEP-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8
+; MDEP-NEXT: br label [[END:%.*]]
+; MDEP: else:
+; MDEP-NEXT: invoke void @f()
+; MDEP-NEXT: to label [[ELSE_END_CRIT_EDGE:%.*]] unwind label [[LPAD:%.*]]
+; MDEP: else.end_crit_edge:
+; MDEP-NEXT: [[V1_PRE:%.*]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br label [[END]]
+; MDEP: end:
+; MDEP-NEXT: [[V1:%.*]] = phi i64 [ [[V1_PRE]], [[ELSE_END_CRIT_EDGE]] ], [ [[ADD]], [[THEN]] ]
+; MDEP-NEXT: [[AND:%.*]] = and i64 [[V1]], 100
+; MDEP-NEXT: store i64 [[AND]], ptr [[P2]], align 8
+; MDEP-NEXT: ret void
+; MDEP: lpad:
+; MDEP-NEXT: [[LP:%.*]] = landingpad { ptr, i32 }
+; MDEP-NEXT: cleanup
+; MDEP-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: [[OR:%.*]] = or i64 [[V3]], 200
+; MDEP-NEXT: store i64 [[OR]], ptr [[P1]], align 8
+; MDEP-NEXT: resume { ptr, i32 } [[LP]]
+;
+; MSSA-LABEL: @test19(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; MSSA: then:
+; MSSA-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8
+; MSSA-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1
+; MSSA-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8
+; MSSA-NEXT: br label [[END:%.*]]
+; MSSA: else:
+; MSSA-NEXT: invoke void @f()
+; MSSA-NEXT: to label [[END]] unwind label [[LPAD:%.*]]
+; MSSA: end:
+; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: [[AND:%.*]] = and i64 [[V1]], 100
+; MSSA-NEXT: store i64 [[AND]], ptr [[P2]], align 8
+; MSSA-NEXT: ret void
+; MSSA: lpad:
+; MSSA-NEXT: [[LP:%.*]] = landingpad { ptr, i32 }
+; MSSA-NEXT: cleanup
+; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: [[OR:%.*]] = or i64 [[V3]], 200
+; MSSA-NEXT: store i64 [[OR]], ptr [[P1]], align 8
+; MSSA-NEXT: resume { ptr, i32 } [[LP]]
;
personality ptr @__CxxFrameHandler3 {
entry:
@@ -1050,29 +1373,50 @@ if.end:
; Call to function @maybethrow may cause exception, so the load of %v3 can't
; be hoisted to block %if.else.
define void @test22(i1 %cond, ptr %p1, ptr %p2) {
-; CHECK-LABEL: @test22(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
-; CHECK-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1
-; CHECK-NEXT: store i64 [[DEC]], ptr [[P1]], align 8
-; CHECK-NEXT: br label [[IF_END:%.*]]
-; CHECK: if.end:
-; CHECK-NEXT: [[V2:%.*]] = phi i64 [ [[V2_PRE:%.*]], [[IF_ELSE_IF_END_CRIT_EDGE:%.*]] ], [ [[DEC]], [[IF_THEN]] ]
-; CHECK-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8
-; CHECK-NEXT: ret void
-; CHECK: if.else:
-; CHECK-NEXT: [[COND2:%.*]] = call i1 @foo()
-; CHECK-NEXT: br i1 [[COND2]], label [[IF_ELSE_IF_END_CRIT_EDGE]], label [[EXIT:%.*]]
-; CHECK: if.else.if.end_crit_edge:
-; CHECK-NEXT: [[V2_PRE]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: exit:
-; CHECK-NEXT: [[_:%.*]] = call i1 @maybethrow()
-; CHECK-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: store i64 [[V3]], ptr [[P2]], align 8
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test22(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MDEP-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1
+; MDEP-NEXT: store i64 [[DEC]], ptr [[P1]], align 8
+; MDEP-NEXT: br label [[IF_END:%.*]]
+; MDEP: if.end:
+; MDEP-NEXT: [[V2:%.*]] = phi i64 [ [[V2_PRE:%.*]], [[IF_ELSE_IF_END_CRIT_EDGE:%.*]] ], [ [[DEC]], [[IF_THEN]] ]
+; MDEP-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8
+; MDEP-NEXT: ret void
+; MDEP: if.else:
+; MDEP-NEXT: [[COND2:%.*]] = call i1 @foo()
+; MDEP-NEXT: br i1 [[COND2]], label [[IF_ELSE_IF_END_CRIT_EDGE]], label [[EXIT:%.*]]
+; MDEP: if.else.if.end_crit_edge:
+; MDEP-NEXT: [[V2_PRE]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: exit:
+; MDEP-NEXT: [[_:%.*]] = call i1 @maybethrow()
+; MDEP-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: store i64 [[V3]], ptr [[P2]], align 8
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test22(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MSSA-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1
+; MSSA-NEXT: store i64 [[DEC]], ptr [[P1]], align 8
+; MSSA-NEXT: br label [[IF_END:%.*]]
+; MSSA: if.end:
+; MSSA-NEXT: [[V2:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8
+; MSSA-NEXT: ret void
+; MSSA: if.else:
+; MSSA-NEXT: [[COND2:%.*]] = call i1 @foo()
+; MSSA-NEXT: br i1 [[COND2]], label [[IF_END]], label [[EXIT:%.*]]
+; MSSA: exit:
+; MSSA-NEXT: [[_:%.*]] = call i1 @maybethrow()
+; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V3]], ptr [[P2]], align 8
+; MSSA-NEXT: ret void
;
entry:
br i1 %cond, label %if.then, label %if.else
@@ -1106,21 +1450,38 @@ declare void @maybethrow() readnone
; also be replaced by ValuesPerBlock(BB, NewLoad). So we'll not use the deleted
; OldLoad in later PHI instruction.
define void @test23(i1 %cond1, i1 %cond2) {
-; CHECK-LABEL: @test23(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[G:%.*]] = alloca i64, align 8
-; CHECK-NEXT: [[VAL1_PRE:%.*]] = load i64, ptr @B, align 8
-; CHECK-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]]
-; CHECK: then:
-; CHECK-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]]
-; CHECK: store:
-; CHECK-NEXT: store i64 [[VAL1_PRE]], ptr @B, align 8
-; CHECK-NEXT: br label [[WRONG]]
-; CHECK: wrong:
-; CHECK-NEXT: store i64 [[VAL1_PRE]], ptr [[G]], align 8
-; CHECK-NEXT: ret void
-; CHECK: exit:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test23(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[G:%.*]] = alloca i64, align 8
+; MDEP-NEXT: [[VAL1_PRE:%.*]] = load i64, ptr @B, align 8
+; MDEP-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]]
+; MDEP: then:
+; MDEP-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]]
+; MDEP: store:
+; MDEP-NEXT: store i64 [[VAL1_PRE]], ptr @B, align 8
+; MDEP-NEXT: br label [[WRONG]]
+; MDEP: wrong:
+; MDEP-NEXT: store i64 [[VAL1_PRE]], ptr [[G]], align 8
+; MDEP-NEXT: ret void
+; MDEP: exit:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test23(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[G:%.*]] = alloca i64, align 8
+; MSSA-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]]
+; MSSA: then:
+; MSSA-NEXT: [[VAL2:%.*]] = load i64, ptr @B, align 8
+; MSSA-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]]
+; MSSA: store:
+; MSSA-NEXT: store i64 [[VAL2]], ptr @B, align 8
+; MSSA-NEXT: br label [[WRONG]]
+; MSSA: wrong:
+; MSSA-NEXT: [[VAL1:%.*]] = load i64, ptr @B, align 8
+; MSSA-NEXT: store i64 [[VAL1]], ptr [[G]], align 8
+; MSSA-NEXT: ret void
+; MSSA: exit:
+; MSSA-NEXT: ret void
;
entry:
%G = alloca i64, align 8
diff --git a/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll b/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll
index e16c21e..4cd2e47 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
declare void @side_effect()
declare i1 @side_effect_cond()
@@ -216,7 +217,7 @@ define i32 @test_load_on_exiting_cold_path_02(ptr %p) gc "statepoint-example" pe
; CHECK-NEXT: br label [[BACKEDGE]]
; CHECK: cold_path:
; CHECK-NEXT: invoke void @side_effect()
-; CHECK-NEXT: to label [[BACKEDGE]] unwind label [[COLD_EXIT:%.*]]
+; CHECK-NEXT: to label [[BACKEDGE]] unwind label [[COLD_EXIT:%.*]]
; CHECK: backedge:
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]]
; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
@@ -225,7 +226,7 @@ define i32 @test_load_on_exiting_cold_path_02(ptr %p) gc "statepoint-example" pe
; CHECK-NEXT: ret i32 [[X]]
; CHECK: cold_exit:
; CHECK-NEXT: [[LANDING_PAD:%.*]] = landingpad token
-; CHECK-NEXT: cleanup
+; CHECK-NEXT: cleanup
; CHECK-NEXT: ret i32 -1
;
entry:
@@ -447,7 +448,7 @@ define i32 @test_inner_loop(ptr %p, i1 %arg) {
; CHECK-NEXT: br label [[INNER_LOOP:%.*]]
; CHECK: inner_loop:
; CHECK-NEXT: call void @side_effect()
-; CHECK-NEXT: br i1 %arg, label [[INNER_LOOP]], label [[BACKEDGE]]
+; CHECK-NEXT: br i1 [[ARG:%.*]], label [[INNER_LOOP]], label [[BACKEDGE]]
; CHECK: backedge:
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]]
; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
@@ -633,3 +634,6 @@ exit:
cold_exit:
ret i32 -1
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll b/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll
index 2009c29..22c628b 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA
; This testcase tests insertion of no-cost phis. That is,
; when the value is already available in every predecessor,
; and we just need to insert a phi node to merge the available values.
@@ -8,6 +10,22 @@
define i32 @mai(i32 %foo, i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @mai(
+; CHECK-SAME: i32 [[FOO:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[FOO]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label %[[BB1:.*]], label %[[BB2:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[A]], [[B]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr @c, align 4
+; CHECK-NEXT: br label %[[MERGEBLOCK:.*]]
+; CHECK: [[BB2]]:
+; CHECK-NEXT: [[TMP3:%.*]] = add nsw i32 [[A]], [[B]]
+; CHECK-NEXT: store i32 [[TMP3]], ptr @d, align 4
+; CHECK-NEXT: br label %[[MERGEBLOCK]]
+; CHECK: [[MERGEBLOCK]]:
+; CHECK-NEXT: [[DOTPRE_PHI:%.*]] = phi i32 [ [[TMP3]], %[[BB2]] ], [ [[TMP2]], %[[BB1]] ]
+; CHECK-NEXT: ret i32 [[DOTPRE_PHI]]
+;
%1 = icmp ne i32 %foo, 0
br i1 %1, label %bb1, label %bb2
@@ -22,10 +40,11 @@ bb2:
br label %mergeblock
mergeblock:
-; CHECK: pre-phi = phi i32 [ %3, %bb2 ], [ %2, %bb1 ]
-; CHECK-NEXT: ret i32 %.pre-phi
%4 = add nsw i32 %a, %b
ret i32 %4
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll b/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll
index d17c459..32f149b 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll
@@ -1,52 +1,77 @@
-; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
@H = common global i32 0
@G = common global i32 0
define i32 @test1(i1 %cond, i32 %v) nounwind {
-; CHECK-LABEL: @test1
+; CHECK-LABEL: define i32 @test1(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[BB1:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V]], 42
+; CHECK-NEXT: store i32 [[ADD_1]], ptr @G, align 4
+; CHECK-NEXT: br label %[[RETURN:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[DOTPRE:%.*]] = add i32 [[V]], 42
+; CHECK-NEXT: br label %[[RETURN]]
+; CHECK: [[RETURN]]:
+; CHECK-NEXT: [[ADD_2_PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[BB1]] ], [ [[ADD_1]], %[[BB]] ]
+; CHECK-NEXT: store i32 [[ADD_2_PRE_PHI]], ptr @H, align 4
+; CHECK-NEXT: ret i32 0
+;
entry:
- br i1 %cond, label %bb, label %bb1
+ br i1 %cond, label %bb, label %bb1
bb:
- %add.1 = add nuw nsw i32 %v, 42
-; CHECK: %add.1 = add i32 %v, 42
- store i32 %add.1, ptr @G, align 4
- br label %return
+ %add.1 = add nuw nsw i32 %v, 42
+ store i32 %add.1, ptr @G, align 4
+ br label %return
bb1:
-; CHECK: %.pre = add i32 %v, 42
- br label %return
+ br label %return
return:
-; CHECK: %add.2.pre-phi = phi i32 [ %.pre, %bb1 ], [ %add.1, %bb ]
-; CHECK-NEXT: store i32 %add.2.pre-phi, ptr @H, align 4
-; CHECK-NEXT: ret i32 0
- %add.2 = add i32 %v, 42
- store i32 %add.2, ptr @H, align 4
- ret i32 0
+ %add.2 = add i32 %v, 42
+ store i32 %add.2, ptr @H, align 4
+ ret i32 0
}
define i32 @test2(i1 %cond, i32 %v) nounwind {
-; CHECK-LABEL: @test2
+; CHECK-LABEL: define i32 @test2(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[BB1:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V]], 42
+; CHECK-NEXT: store i32 [[ADD_1]], ptr @G, align 4
+; CHECK-NEXT: br label %[[RETURN:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[DOTPRE:%.*]] = add nuw nsw i32 [[V]], 42
+; CHECK-NEXT: br label %[[RETURN]]
+; CHECK: [[RETURN]]:
+; CHECK-NEXT: [[ADD_2_PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[BB1]] ], [ [[ADD_1]], %[[BB]] ]
+; CHECK-NEXT: store i32 [[ADD_2_PRE_PHI]], ptr @H, align 4
+; CHECK-NEXT: ret i32 0
+;
entry:
- br i1 %cond, label %bb, label %bb1
+ br i1 %cond, label %bb, label %bb1
bb:
- %add.1 = add i32 %v, 42
-; CHECK: %add.1 = add i32 %v, 42
- store i32 %add.1, ptr @G, align 4
- br label %return
+ %add.1 = add i32 %v, 42
+ store i32 %add.1, ptr @G, align 4
+ br label %return
bb1:
-; CHECK: %.pre = add nuw nsw i32 %v, 42
- br label %return
+ br label %return
return:
-; CHECK: %add.2.pre-phi = phi i32 [ %.pre, %bb1 ], [ %add.1, %bb ]
-; CHECK-NEXT: store i32 %add.2.pre-phi, ptr @H, align 4
-; CHECK-NEXT: ret i32 0
- %add.2 = add nuw nsw i32 %v, 42
- store i32 %add.2, ptr @H, align 4
- ret i32 0
+ %add.2 = add nuw nsw i32 %v, 42
+ store i32 %add.2, ptr @H, align 4
+ ret i32 0
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll b/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll
index 7342925..74bc6bc 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
; RUN: opt < %s -passes="gvn<load-pre>" -enable-load-pre=false -S | FileCheck %s
; This testcase assumed we'll PRE the load into %for.cond, but we don't actually
; verify that doing so is safe. If there didn't _happen_ to be a load in
@@ -12,35 +14,85 @@
@p = external global i32
define i32 @f(i32 %n) nounwind {
+; MDEP-LABEL: define i32 @f(
+; MDEP-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; MDEP-NEXT: [[ENTRY:.*]]:
+; MDEP-NEXT: br label %[[FOR_COND:.*]]
+; MDEP: [[FOR_COND]]:
+; MDEP-NEXT: [[I_0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDVAR_NEXT:%.*]], %[[FOR_INC:.*]] ]
+; MDEP-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N]]
+; MDEP-NEXT: br i1 [[CMP]], label %[[FOR_BODY:.*]], label %[[FOR_COND_FOR_END_CRIT_EDGE:.*]]
+; MDEP: [[FOR_COND_FOR_END_CRIT_EDGE]]:
+; MDEP-NEXT: [[TMP9_PRE:%.*]] = load i32, ptr @p, align 4
+; MDEP-NEXT: br label %[[FOR_END:.*]]
+; MDEP: [[FOR_BODY]]:
+; MDEP-NEXT: [[TMP3:%.*]] = load i32, ptr @p, align 4
+; MDEP-NEXT: [[DEC:%.*]] = add i32 [[TMP3]], -1
+; MDEP-NEXT: store i32 [[DEC]], ptr @p, align 4
+; MDEP-NEXT: [[CMP6:%.*]] = icmp slt i32 [[DEC]], 0
+; MDEP-NEXT: br i1 [[CMP6]], label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_INC]]
+; MDEP: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MDEP-NEXT: br label %[[FOR_END]]
+; MDEP: [[FOR_INC]]:
+; MDEP-NEXT: [[INDVAR_NEXT]] = add i32 [[I_0]], 1
+; MDEP-NEXT: br label %[[FOR_COND]]
+; MDEP: [[FOR_END]]:
+; MDEP-NEXT: [[TMP9:%.*]] = phi i32 [ [[DEC]], %[[FOR_BODY_FOR_END_CRIT_EDGE]] ], [ [[TMP9_PRE]], %[[FOR_COND_FOR_END_CRIT_EDGE]] ]
+; MDEP-NEXT: ret i32 [[TMP9]]
+;
+; MSSA-LABEL: define i32 @f(
+; MSSA-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; MSSA-NEXT: [[ENTRY:.*]]:
+; MSSA-NEXT: br label %[[FOR_COND:.*]]
+; MSSA: [[FOR_COND]]:
+; MSSA-NEXT: [[I_0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDVAR_NEXT:%.*]], %[[FOR_INC:.*]] ]
+; MSSA-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N]]
+; MSSA-NEXT: br i1 [[CMP]], label %[[FOR_BODY:.*]], label %[[FOR_COND_FOR_END_CRIT_EDGE:.*]]
+; MSSA: [[FOR_COND_FOR_END_CRIT_EDGE]]:
+; MSSA-NEXT: br label %[[FOR_END:.*]]
+; MSSA: [[FOR_BODY]]:
+; MSSA-NEXT: [[TMP3:%.*]] = load i32, ptr @p, align 4
+; MSSA-NEXT: [[DEC:%.*]] = add i32 [[TMP3]], -1
+; MSSA-NEXT: store i32 [[DEC]], ptr @p, align 4
+; MSSA-NEXT: [[CMP6:%.*]] = icmp slt i32 [[DEC]], 0
+; MSSA-NEXT: br i1 [[CMP6]], label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_INC]]
+; MSSA: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MSSA-NEXT: br label %[[FOR_END]]
+; MSSA: [[FOR_INC]]:
+; MSSA-NEXT: [[INDVAR_NEXT]] = add i32 [[I_0]], 1
+; MSSA-NEXT: br label %[[FOR_COND]]
+; MSSA: [[FOR_END]]:
+; MSSA-NEXT: [[TMP9:%.*]] = load i32, ptr @p, align 4
+; MSSA-NEXT: ret i32 [[TMP9]]
+;
entry:
- br label %for.cond
+ br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %for.inc ] ; <i32> [#uses=2]
- %cmp = icmp slt i32 %i.0, %n ; <i1> [#uses=1]
- br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
+ %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %for.inc ] ; <i32> [#uses=2]
+ %cmp = icmp slt i32 %i.0, %n ; <i1> [#uses=1]
+ br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
- br label %for.end
+ br label %for.end
-; CHECK: for.body:
-; CHECK-NEXT: %tmp3 = load i32, ptr @p
for.body: ; preds = %for.cond
- %tmp3 = load i32, ptr @p ; <i32> [#uses=1]
- %dec = add i32 %tmp3, -1 ; <i32> [#uses=2]
- store i32 %dec, ptr @p
- %cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1]
- br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc
+ %tmp3 = load i32, ptr @p ; <i32> [#uses=1]
+ %dec = add i32 %tmp3, -1 ; <i32> [#uses=2]
+ store i32 %dec, ptr @p
+ %cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1]
+ br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc
-; CHECK: for.body.for.end_crit_edge:
for.body.for.end_crit_edge: ; preds = %for.body
- br label %for.end
+ br label %for.end
for.inc: ; preds = %for.body
- %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=1]
- br label %for.cond
+ %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=1]
+ br label %for.cond
for.end: ; preds = %for.body.for.end_crit_edge, %for.cond.for.end_crit_edge
- %tmp9 = load i32, ptr @p ; <i32> [#uses=1]
- ret i32 %tmp9
+ %tmp9 = load i32, ptr @p ; <i32> [#uses=1]
+ ret i32 %tmp9
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll b/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll
index 3df63be..abbb17f 100644
--- a/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll
+++ b/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll
@@ -1,13 +1,45 @@
-; RUN: opt -passes=gvn -S < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:64:64:64"
; GVN should preserve the TBAA tag on loads when doing PRE.
-; CHECK-LABEL: @test(
-; CHECK: %tmp33.pre = load i16, ptr %P, align 2, !tbaa !0
-; CHECK: br label %for.body
define void @test(ptr %P, ptr %Q, i1 %arg) nounwind {
+; MDEP-LABEL: define void @test(
+; MDEP-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: br i1 [[ARG]], label %[[BB_NPH:.*]], label %[[FOR_END:.*]]
+; MDEP: [[BB_NPH]]:
+; MDEP-NEXT: [[TMP33_PRE:%.*]] = load i16, ptr [[P]], align 2, !tbaa [[TBAA0:![0-9]+]]
+; MDEP-NEXT: br label %[[FOR_BODY:.*]]
+; MDEP: [[FOR_BODY]]:
+; MDEP-NEXT: [[TMP33:%.*]] = phi i16 [ 0, %[[FOR_BODY]] ], [ [[TMP33_PRE]], %[[BB_NPH]] ]
+; MDEP-NEXT: store i16 [[TMP33]], ptr [[Q]], align 2
+; MDEP-NEXT: store i16 0, ptr [[P]], align 2, !tbaa [[TBAA0]]
+; MDEP-NEXT: br i1 false, label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_BODY]]
+; MDEP: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MDEP-NEXT: br label %[[FOR_END]]
+; MDEP: [[FOR_END]]:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: define void @test(
+; MSSA-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: br i1 [[ARG]], label %[[BB_NPH:.*]], label %[[FOR_END:.*]]
+; MSSA: [[BB_NPH]]:
+; MSSA-NEXT: br label %[[FOR_BODY:.*]]
+; MSSA: [[FOR_BODY]]:
+; MSSA-NEXT: [[TMP33:%.*]] = load i16, ptr [[P]], align 2, !tbaa [[TBAA0:![0-9]+]]
+; MSSA-NEXT: store i16 [[TMP33]], ptr [[Q]], align 2
+; MSSA-NEXT: store i16 0, ptr [[P]], align 2, !tbaa [[TBAA0]]
+; MSSA-NEXT: br i1 false, label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_BODY]]
+; MSSA: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MSSA-NEXT: br label %[[FOR_END]]
+; MSSA: [[FOR_END]]:
+; MSSA-NEXT: ret void
+;
entry:
br i1 %arg, label %bb.nph, label %for.end
@@ -29,3 +61,16 @@ for.end: ; preds = %for.body, %entry
!1 = !{!"omnipotent char", !2}
!2 = !{!"Simple C/C++ TBAA"}
!3 = !{!"short", !1}
+;.
+; MDEP: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
+; MDEP: [[META1]] = !{!"short", [[META2:![0-9]+]]}
+; MDEP: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]}
+; MDEP: [[META3]] = !{!"Simple C/C++ TBAA"}
+;.
+; MSSA: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
+; MSSA: [[META1]] = !{!"short", [[META2:![0-9]+]]}
+; MSSA: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]}
+; MSSA: [[META3]] = !{!"Simple C/C++ TBAA"}
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll
index 4202467..ff80328 100644
--- a/llvm/test/Transforms/GVN/cond_br2.ll
+++ b/llvm/test/Transforms/GVN/cond_br2.ll
@@ -17,7 +17,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv) #1
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -86,7 +86,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv) #1
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -105,14 +105,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @__gxx_personality_v0(...)
declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
diff --git a/llvm/test/Transforms/GVN/lifetime-simple.ll b/llvm/test/Transforms/GVN/lifetime-simple.ll
index 89ca127..bd35052 100644
--- a/llvm/test/Transforms/GVN/lifetime-simple.ll
+++ b/llvm/test/Transforms/GVN/lifetime-simple.ll
@@ -6,18 +6,18 @@ define i8 @test() nounwind {
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[P:%.*]] = alloca [32 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i8 1, ptr [[P]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[TMP0]]
;
entry:
%P = alloca [32 x i8]
- call void @llvm.lifetime.start.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.start.p0(ptr %P)
%0 = load i8, ptr %P
store i8 1, ptr %P
- call void @llvm.lifetime.end.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
%1 = load i8, ptr %P
ret i8 %1
}
@@ -28,17 +28,17 @@ define void @assume_eq_arg(ptr %arg) {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[ALLOCA]], [[ARG]]
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: store volatile i32 0, ptr [[ALLOCA]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]])
; CHECK-NEXT: ret void
;
%alloca = alloca i32
%cmp = icmp eq ptr %alloca, %arg
call void @llvm.assume(i1 %cmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
store volatile i32 0, ptr %alloca
- call void @llvm.lifetime.end.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret void
}
@@ -47,17 +47,17 @@ define void @assume_eq_null() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(1)
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[ALLOCA]], null
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: store volatile i32 0, ptr addrspace(1) null, align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: ret void
;
%alloca = alloca i32, addrspace(1)
%cmp = icmp eq ptr addrspace(1) %alloca, null
call void @llvm.assume(i1 %cmp)
- call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.start.p1(ptr addrspace(1) %alloca)
store volatile i32 0, ptr addrspace(1) %alloca
- call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.end.p1(ptr addrspace(1) %alloca)
ret void
}
@@ -67,9 +67,9 @@ define void @dom_eq_null() {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[ALLOCA]], null
; CHECK-NEXT: br i1 [[CMP]], label %[[IF:.*]], label %[[ELSE:.*]]
; CHECK: [[IF]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: store volatile i32 0, ptr addrspace(1) null, align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: ret void
; CHECK: [[ELSE]]:
; CHECK-NEXT: ret void
@@ -79,14 +79,14 @@ define void @dom_eq_null() {
br i1 %cmp, label %if, label %else
if:
- call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.start.p1(ptr addrspace(1) %alloca)
store volatile i32 0, ptr addrspace(1) %alloca
- call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.end.p1(ptr addrspace(1) %alloca)
ret void
else:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly
-declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P)
+declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly
+declare void @llvm.lifetime.end.p0(ptr nocapture %P)
diff --git a/llvm/test/Transforms/GVN/opt-remarks.ll b/llvm/test/Transforms/GVN/opt-remarks.ll
index 87cd54d..a5c3cb5c 100644
--- a/llvm/test/Transforms/GVN/opt-remarks.ll
+++ b/llvm/test/Transforms/GVN/opt-remarks.ll
@@ -109,9 +109,9 @@ entry:
define i8 @lifetime_end(i8 %val) {
%p = alloca [32 x i8]
- call void @llvm.lifetime.start.p0(i64 32, ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
store i8 %val, ptr %p
- call void @llvm.lifetime.end.p0(i64 32, ptr %p)
+ call void @llvm.lifetime.end.p0(ptr %p)
%1 = load i8, ptr %p
ret i8 %1
}
diff --git a/llvm/test/Transforms/GVN/vscale.ll b/llvm/test/Transforms/GVN/vscale.ll
index 5d6c559..b358df5 100644
--- a/llvm/test/Transforms/GVN/vscale.ll
+++ b/llvm/test/Transforms/GVN/vscale.ll
@@ -696,7 +696,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; MDEP-LABEL: @bigexample(
; MDEP-NEXT: entry:
; MDEP-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
-; MDEP-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MDEP-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]])
; MDEP-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0
; MDEP-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16
; MDEP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
@@ -720,13 +720,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; MDEP-NEXT: [[TMP9:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP7]], <vscale x 16 x i8> [[TMP8]], 2
; MDEP-NEXT: [[TMP10:%.*]] = bitcast <vscale x 4 x i32> [[A_ELT6]] to <vscale x 16 x i8>
; MDEP-NEXT: [[TMP11:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP9]], <vscale x 16 x i8> [[TMP10]], 3
-; MDEP-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MDEP-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]])
; MDEP-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP11]]
;
; MSSA-LABEL: @bigexample(
; MSSA-NEXT: entry:
; MSSA-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
-; MSSA-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MSSA-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]])
; MSSA-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0
; MSSA-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16
; MSSA-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
@@ -750,12 +750,12 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; MSSA-NEXT: [[TMP6:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP5]], <vscale x 16 x i8> [[DOTUNPACK10]], 2
; MSSA-NEXT: [[DOTUNPACK12:%.*]] = load <vscale x 16 x i8>, ptr [[REF_TMP_REPACK5]], align 16
; MSSA-NEXT: [[TMP7:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP6]], <vscale x 16 x i8> [[DOTUNPACK12]], 3
-; MSSA-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MSSA-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]])
; MSSA-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP7]]
;
entry:
%ref.tmp = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp)
%a.elt = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %a, 0
store <vscale x 4 x i32> %a.elt, ptr %ref.tmp, align 16
%0 = call i64 @llvm.vscale.i64()
@@ -790,7 +790,7 @@ entry:
%.elt11 = getelementptr inbounds i8, ptr %ref.tmp, i64 %14
%.unpack12 = load <vscale x 16 x i8>, ptr %.elt11, align 16
%15 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %12, <vscale x 16 x i8> %.unpack12, 3
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp)
ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %15
}
diff --git a/llvm/test/Transforms/GVNHoist/pr29034.ll b/llvm/test/Transforms/GVNHoist/pr29034.ll
index f5378ea..a5294c5 100644
--- a/llvm/test/Transforms/GVNHoist/pr29034.ll
+++ b/llvm/test/Transforms/GVNHoist/pr29034.ll
@@ -37,7 +37,7 @@
define void @music_task(ptr nocapture readnone %p) local_unnamed_addr {
entry:
%mapi = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %mapi)
+ call void @llvm.lifetime.start.p0(ptr %mapi)
store ptr null, ptr %mapi, align 8, !tbaa !1
%call = call i32 @music_decoder_init(ptr nonnull %mapi)
br label %while.cond
@@ -99,7 +99,7 @@ while.cond2.backedge: ; preds = %sw.default, %sw.bb1
br label %while.cond2
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @music_decoder_init(ptr)
declare i32 @music_play_api(ptr, i32, i32, i32, ptr)
declare i32 @printf(ptr nocapture readonly, ...)
diff --git a/llvm/test/Transforms/GVNSink/lifetime.ll b/llvm/test/Transforms/GVNSink/lifetime.ll
index 1a8a69b..f8731e5 100644
--- a/llvm/test/Transforms/GVNSink/lifetime.ll
+++ b/llvm/test/Transforms/GVNSink/lifetime.ll
@@ -9,34 +9,34 @@ define void @test_cant_sink(i1 %c) {
; CHECK-SAME: i1 [[C:%.*]]) {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]]
; CHECK: [[IF]]:
; CHECK-NEXT: store i64 1, ptr [[A]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: br label %[[JOIN:.*]]
; CHECK: [[ELSE]]:
; CHECK-NEXT: store i64 1, ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: br label %[[JOIN]]
; CHECK: [[JOIN]]:
; CHECK-NEXT: ret void
;
%a = alloca i8
%b = alloca i8
- call void @llvm.lifetime.start(i64 1, ptr %a)
- call void @llvm.lifetime.start(i64 1, ptr %b)
+ call void @llvm.lifetime.start(ptr %a)
+ call void @llvm.lifetime.start(ptr %b)
br i1 %c, label %if, label %else
if:
store i64 1, ptr %a
- call void @llvm.lifetime.end(i64 1, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
br label %join
else:
store i64 1, ptr %b
- call void @llvm.lifetime.end(i64 1, ptr %b)
+ call void @llvm.lifetime.end(ptr %b)
br label %join
join:
@@ -47,7 +47,7 @@ define void @test_can_sink(i1 %c) {
; CHECK-LABEL: define void @test_can_sink(
; CHECK-SAME: i1 [[C:%.*]]) {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]]
; CHECK: [[IF]]:
; CHECK-NEXT: br label %[[JOIN:.*]]
@@ -55,21 +55,21 @@ define void @test_can_sink(i1 %c) {
; CHECK-NEXT: br label %[[JOIN]]
; CHECK: [[JOIN]]:
; CHECK-NEXT: store i64 1, ptr [[A]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
- call void @llvm.lifetime.start(i64 1, ptr %a)
+ call void @llvm.lifetime.start(ptr %a)
br i1 %c, label %if, label %else
if:
store i64 1, ptr %a
- call void @llvm.lifetime.end(i64 1, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
br label %join
else:
store i64 1, ptr %a
- call void @llvm.lifetime.end(i64 1, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
br label %join
join:
diff --git a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll
index 9a8fbb8..7cb3a96 100644
--- a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll
+++ b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll
@@ -24,17 +24,17 @@ entry:
define i16 @bar() local_unnamed_addr #1 {
entry:
%local2 = alloca [1 x i16], align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %local2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %local2)
store ptr %local2, ptr @global, align 1
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %local2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %local2)
ret i16 undef
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { nofree noinline norecurse nounwind writeonly }
attributes #1 = { noinline nounwind writeonly }
diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll
index e5bab0c..28782d5 100644
--- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll
+++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll
@@ -1,8 +1,8 @@
; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=0 < %s 2>&1 | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @use(ptr)
@@ -18,17 +18,17 @@ entry:
normalPath:
; These two uses of stack slots are non-overlapping. Based on this alone,
; the stack slots could be merged.
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
call void @use(ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @use(ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local2)
ret void
; CHECK-LABEL: codeRepl:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local1)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local2)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local2)
; CHECK-NEXT: call i1 @foo.cold.1(ptr %local1, ptr %local2)
; CHECK-NEXT: br i1
@@ -36,19 +36,19 @@ outlinedPath:
; These two uses of stack slots are overlapping. This should prevent
; merging of stack slots. CodeExtractor must replicate the effects of
; these markers in the caller to inhibit stack coloring.
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.start.p0(ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @cold_use2(ptr %local1, ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local2)
br i1 undef, label %outlinedPath2, label %outlinedPathExit
outlinedPath2:
; These extra lifetime markers are used to test that we emit only one
; pair of guard markers in the caller per memory object.
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @use(ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local2)
ret void
outlinedPathExit:
diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll
index e42db78..da7a9b8 100644
--- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll
+++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=0 < %s 2>&1 | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @cold_use(ptr) cold
@@ -40,13 +40,13 @@ define void @only_lifetime_start_is_cold(i1 %arg) {
; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8
; CHECK-NEXT: br i1 [[ARG:%.*]], label [[CODEREPL:%.*]], label [[NO_EXTRACT1:%.*]]
; CHECK: codeRepl:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @only_lifetime_start_is_cold.cold.1(ptr [[LOCAL1]], i1 [[ARG]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[NO_EXTRACT1]], label [[EXIT:%.*]]
; CHECK: no-extract1:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[LOCAL1]])
; CHECK-NEXT: ret void
;
entry:
@@ -55,7 +55,7 @@ entry:
extract1:
; lt.start
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
call void @cold_use(ptr %local1)
br i1 %arg, label %extract2, label %no-extract1
@@ -67,7 +67,7 @@ no-extract1:
exit:
; lt.end
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
ret void
}
@@ -96,10 +96,10 @@ define void @only_lifetime_end_is_cold(i1 %arg) {
; CHECK-LABEL: @only_lifetime_end_is_cold(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]])
; CHECK-NEXT: br i1 [[ARG:%.*]], label [[NO_EXTRACT1:%.*]], label [[CODEREPL:%.*]]
; CHECK: no-extract1:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[LOCAL1]])
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @only_lifetime_end_is_cold.cold.1(ptr [[LOCAL1]]) #[[ATTR3]]
@@ -110,18 +110,18 @@ define void @only_lifetime_end_is_cold(i1 %arg) {
entry:
; lt.start
%local1 = alloca i256
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
br i1 %arg, label %no-extract1, label %extract1
no-extract1:
; lt.end
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
br label %exit
extract1:
; lt.end
call void @cold_use(ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
br label %exit
exit:
@@ -134,7 +134,7 @@ define void @do_not_lift_lifetime_end(i1 %arg) {
; CHECK-LABEL: @do_not_lift_lifetime_end(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]])
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: header:
; CHECK-NEXT: call void @use(ptr [[LOCAL1]])
@@ -148,7 +148,7 @@ define void @do_not_lift_lifetime_end(i1 %arg) {
entry:
; lt.start
%local1 = alloca i256
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
br label %header
header:
@@ -167,7 +167,7 @@ extract2:
extract3:
; lt.end
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
br label %exit
exit:
diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll
index 26faaa3..b453c61 100644
--- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll
+++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll
@@ -3,9 +3,9 @@
%type1 = type opaque
%type2 = type opaque
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @use(ptr, ptr)
@@ -23,16 +23,16 @@ normalPath:
ret void
; CHECK-LABEL: codeRepl:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local1)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local2)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local2)
; CHECK-NEXT: call void @foo.cold.1(ptr %local1, ptr %local2
outlinedPath:
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.start.p0(ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @use2(ptr %local1, ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local2)
br label %outlinedPathExit
outlinedPathExit:
diff --git a/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll b/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll
index df7cb3c..80249fc 100644
--- a/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll
+++ b/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll
@@ -6,8 +6,8 @@ target triple = "x86_64-apple-macosx10.14.0"
@c = common global i32 0, align 4
@h = common global i32 0, align 4
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
declare ptr @m()
@@ -27,15 +27,15 @@ bb:
bb3: ; preds = %bb
%i4 = call ptr @m()
- call void @llvm.lifetime.start.p0(i64 20, ptr %.sroa.4.i)
- call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.start.p0(ptr %.sroa.4.i)
+ call void @llvm.lifetime.start.p0(ptr %.sroa.5.i)
call void @llvm.memset.p0.i64(ptr align 2 %.sroa.4.i, i8 0, i64 20, i1 false)
call void @llvm.memset.p0.i64(ptr align 8 %.sroa.5.i, i8 0, i64 6, i1 false)
%i5 = load i32, ptr @c, align 4, !tbaa !4
%i6 = trunc i32 %i5 to i16
- call void @llvm.lifetime.end.p0(i64 20, ptr %.sroa.4.i)
- call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i)
- call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.end.p0(ptr %.sroa.4.i)
+ call void @llvm.lifetime.end.p0(ptr %.sroa.5.i)
+ call void @llvm.lifetime.start.p0(ptr %.sroa.5.i)
call void @llvm.memset.p0.i64(ptr align 1 %.sroa.5.i, i8 3, i64 6, i1 false)
br label %bb7
@@ -47,7 +47,7 @@ bb7: ; preds = %bb7, %bb3
br i1 %i10, label %bb7, label %l.exit
l.exit: ; preds = %bb7
- call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.end.p0(ptr %.sroa.5.i)
br label %bb11
bb11: ; preds = %l.exit, %bb
diff --git a/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll b/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll
index 88eff97..0c2db4a 100644
--- a/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll
+++ b/llvm/test/Transforms/IRNormalizer/regression-convergence-tokens.ll
@@ -7,9 +7,9 @@ define i32 @nested(i32 %src) #0 {
; CHECK-SAME: i32 [[A0:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[BB15160:.*:]]
; CHECK-NEXT: [[T1:%.*]] = call token @llvm.experimental.convergence.entry()
-; CHECK-NEXT: %"vl77672llvm.experimental.convergence.anchor()" = call token @llvm.experimental.convergence.anchor()
-; CHECK-NEXT: %"op68297(vl77672)" = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[A0]]) [ "convergencectrl"(token %"vl77672llvm.experimental.convergence.anchor()") ]
-; CHECK-NEXT: ret i32 %"op68297(vl77672)"
+; CHECK-NEXT: %"vl14659llvm.experimental.convergence.anchor()" = call token @llvm.experimental.convergence.anchor()
+; CHECK-NEXT: %"op15516(vl14659)" = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[A0]]) [ "convergencectrl"(token %"vl14659llvm.experimental.convergence.anchor()") ]
+; CHECK-NEXT: ret i32 %"op15516(vl14659)"
;
%t1 = call token @llvm.experimental.convergence.entry()
%t2 = call token @llvm.experimental.convergence.anchor()
diff --git a/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll b/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll
index 35ac0fd..b9be105 100644
--- a/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll
+++ b/llvm/test/Transforms/IRNormalizer/regression-infinite-loop.ll
@@ -8,18 +8,18 @@ define void @test(ptr, i32) {
; CHECK-NEXT: %"vl72693([[A1]], 1)" = add i32 [[A1]], 1
; CHECK-NEXT: br label %[[BB16110:.*]]
; CHECK: [[BB16110]]:
-; CHECK-NEXT: %"op10912(op18080, vl72693)" = phi i32 [ %"op18080(op10412, op17645)", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ]
-; CHECK-NEXT: %"op10912(op17645, vl72693)" = phi i32 [ %"op17645(op10912)70", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ]
-; CHECK-NEXT: %"op15084(op10912)" = mul i32 %"op10912(op18080, vl72693)", undef
-; CHECK-NEXT: %"op16562(op15084)" = xor i32 -1, %"op15084(op10912)"
-; CHECK-NEXT: %"op44627(op10912, op16562)" = add i32 %"op10912(op18080, vl72693)", %"op16562(op15084)"
-; CHECK-NEXT: %"op17645(op10912)" = add i32 -1, %"op10912(op17645, vl72693)"
-; CHECK-NEXT: %"op18080(op17645, op44627)" = add i32 %"op17645(op10912)", %"op44627(op10912, op16562)"
-; CHECK-NEXT: %"op17720(op15084, op18080)" = mul i32 %"op15084(op10912)", %"op18080(op17645, op44627)"
-; CHECK-NEXT: %"op16562(op17720)" = xor i32 -1, %"op17720(op15084, op18080)"
-; CHECK-NEXT: %"op17430(op16562, op18080)" = add i32 %"op16562(op17720)", %"op18080(op17645, op44627)"
+; CHECK-NEXT: %"op81283(op18080, vl72693)" = phi i32 [ %"op18080(op10412, op18131)", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ]
+; CHECK-NEXT: %"op81283(op18131, vl72693)" = phi i32 [ %"op18131(op81283)70", %[[BB16110]] ], [ %"vl72693([[A1]], 1)", %[[BB76951]] ]
+; CHECK-NEXT: %"op13219(op81283)" = mul i32 %"op81283(op18080, vl72693)", undef
+; CHECK-NEXT: %"op16562(op13219)" = xor i32 -1, %"op13219(op81283)"
+; CHECK-NEXT: %"op12556(op16562, op81283)" = add i32 %"op16562(op13219)", %"op81283(op18080, vl72693)"
+; CHECK-NEXT: %"op18131(op81283)" = add i32 -1, %"op81283(op18131, vl72693)"
+; CHECK-NEXT: %"op18080(op12556, op18131)" = add i32 %"op12556(op16562, op81283)", %"op18131(op81283)"
+; CHECK-NEXT: %"op17720(op13219, op18080)" = mul i32 %"op13219(op81283)", %"op18080(op12556, op18131)"
+; CHECK-NEXT: %"op16562(op17720)" = xor i32 -1, %"op17720(op13219, op18080)"
+; CHECK-NEXT: %"op17430(op16562, op18080)" = add i32 %"op16562(op17720)", %"op18080(op12556, op18131)"
; CHECK-NEXT: %"op10412(op17430)" = add i32 %"op17430(op16562, op18080)", undef
-; CHECK-NEXT: %"op17720(op10412, op17720)" = mul i32 %"op10412(op17430)", %"op17720(op15084, op18080)"
+; CHECK-NEXT: %"op17720(op10412, op17720)" = mul i32 %"op10412(op17430)", %"op17720(op13219, op18080)"
; CHECK-NEXT: %"op16562(op17720)1" = xor i32 -1, %"op17720(op10412, op17720)"
; CHECK-NEXT: %"op17430(op10412, op16562)" = add i32 %"op10412(op17430)", %"op16562(op17720)1"
; CHECK-NEXT: %"op10412(op17430)2" = add i32 %"op17430(op10412, op16562)", undef
@@ -45,11 +45,11 @@ define void @test(ptr, i32) {
; CHECK-NEXT: %"op17720(op10412, op17720)21" = mul i32 %"op10412(op17430)20", %"op17720(op10412, op17720)17"
; CHECK-NEXT: %"op16562(op17720)22" = xor i32 -1, %"op17720(op10412, op17720)21"
; CHECK-NEXT: %"op17430(op10412, op16562)23" = add i32 %"op10412(op17430)20", %"op16562(op17720)22"
-; CHECK-NEXT: %"op17645(op10912)24" = add i32 -9, %"op10912(op17645, vl72693)"
-; CHECK-NEXT: %"op18080(op17430, op17645)" = add i32 %"op17430(op10412, op16562)23", %"op17645(op10912)24"
-; CHECK-NEXT: %"op17720(op17720, op18080)" = mul i32 %"op17720(op10412, op17720)21", %"op18080(op17430, op17645)"
+; CHECK-NEXT: %"op18131(op81283)24" = add i32 -9, %"op81283(op18131, vl72693)"
+; CHECK-NEXT: %"op18080(op17430, op18131)" = add i32 %"op17430(op10412, op16562)23", %"op18131(op81283)24"
+; CHECK-NEXT: %"op17720(op17720, op18080)" = mul i32 %"op17720(op10412, op17720)21", %"op18080(op17430, op18131)"
; CHECK-NEXT: %"op16562(op17720)25" = xor i32 -1, %"op17720(op17720, op18080)"
-; CHECK-NEXT: %"op17430(op16562, op18080)26" = add i32 %"op16562(op17720)25", %"op18080(op17430, op17645)"
+; CHECK-NEXT: %"op17430(op16562, op18080)26" = add i32 %"op16562(op17720)25", %"op18080(op17430, op18131)"
; CHECK-NEXT: %"op10412(op17430)27" = add i32 %"op17430(op16562, op18080)26", undef
; CHECK-NEXT: %"op17720(op10412, op17720)28" = mul i32 %"op10412(op17430)27", %"op17720(op17720, op18080)"
; CHECK-NEXT: %"op16562(op17720)29" = xor i32 -1, %"op17720(op10412, op17720)28"
@@ -66,11 +66,11 @@ define void @test(ptr, i32) {
; CHECK-NEXT: %"op17720(op10412, op17720)40" = mul i32 %"op10412(op17430)39", %"op17720(op10412, op17720)36"
; CHECK-NEXT: %"op16562(op17720)41" = xor i32 -1, %"op17720(op10412, op17720)40"
; CHECK-NEXT: %"op17430(op10412, op16562)42" = add i32 %"op10412(op17430)39", %"op16562(op17720)41"
-; CHECK-NEXT: %"op17645(op10912)43" = add i32 -14, %"op10912(op17645, vl72693)"
-; CHECK-NEXT: %"op18080(op17430, op17645)44" = add i32 %"op17430(op10412, op16562)42", %"op17645(op10912)43"
-; CHECK-NEXT: %"op17720(op17720, op18080)45" = mul i32 %"op17720(op10412, op17720)40", %"op18080(op17430, op17645)44"
+; CHECK-NEXT: %"op18131(op81283)43" = add i32 -14, %"op81283(op18131, vl72693)"
+; CHECK-NEXT: %"op18080(op17430, op18131)44" = add i32 %"op17430(op10412, op16562)42", %"op18131(op81283)43"
+; CHECK-NEXT: %"op17720(op17720, op18080)45" = mul i32 %"op17720(op10412, op17720)40", %"op18080(op17430, op18131)44"
; CHECK-NEXT: %"op16562(op17720)46" = xor i32 -1, %"op17720(op17720, op18080)45"
-; CHECK-NEXT: %"op17430(op16562, op18080)47" = add i32 %"op16562(op17720)46", %"op18080(op17430, op17645)44"
+; CHECK-NEXT: %"op17430(op16562, op18080)47" = add i32 %"op16562(op17720)46", %"op18080(op17430, op18131)44"
; CHECK-NEXT: %"op10412(op17430)48" = add i32 %"op17430(op16562, op18080)47", undef
; CHECK-NEXT: %"op17720(op10412, op17720)49" = mul i32 %"op10412(op17430)48", %"op17720(op17720, op18080)45"
; CHECK-NEXT: %"op16562(op17720)50" = xor i32 -1, %"op17720(op10412, op17720)49"
@@ -93,9 +93,9 @@ define void @test(ptr, i32) {
; CHECK-NEXT: %"op17430(op10412, op16562)67" = add i32 %"op10412(op17430)64", %"op16562(op17720)66"
; CHECK-NEXT: %"op10412(op17430)68" = add i32 %"op17430(op10412, op16562)67", undef
; CHECK-NEXT: %"op10412(op10412)69" = add i32 %"op10412(op17430)68", undef
-; CHECK-NEXT: %"op17645(op10912)70" = add i32 -21, %"op10912(op17645, vl72693)"
-; CHECK-NEXT: %"op18080(op10412, op17645)" = add i32 %"op10412(op10412)69", %"op17645(op10912)70"
-; CHECK-NEXT: store i32 %"op18080(op10412, op17645)", ptr [[A0]], align 4
+; CHECK-NEXT: %"op18131(op81283)70" = add i32 -21, %"op81283(op18131, vl72693)"
+; CHECK-NEXT: %"op18080(op10412, op18131)" = add i32 %"op10412(op10412)69", %"op18131(op81283)70"
+; CHECK-NEXT: store i32 %"op18080(op10412, op18131)", ptr [[A0]], align 4
; CHECK-NEXT: br label %[[BB16110]]
;
bb:
diff --git a/llvm/test/Transforms/IRNormalizer/reordering-basic.ll b/llvm/test/Transforms/IRNormalizer/reordering-basic.ll
index fd09ce0..06e67e0 100644
--- a/llvm/test/Transforms/IRNormalizer/reordering-basic.ll
+++ b/llvm/test/Transforms/IRNormalizer/reordering-basic.ll
@@ -28,16 +28,16 @@ define double @baz(double %x) {
; CHECK-SAME: double [[A0:%.*]]) {
; CHECK-NEXT: [[BB76951:.*:]]
; CHECK-NEXT: [[IFCOND:%.*]] = fcmp one double [[A0]], 0.000000e+00
-; CHECK-NEXT: br i1 [[IFCOND]], label %[[BB91455:.*]], label %[[BB914551:.*]]
-; CHECK: [[BB91455]]:
-; CHECK-NEXT: %"vl15001bir()" = call double @bir()
+; CHECK-NEXT: br i1 [[IFCOND]], label %[[BB47054:.*]], label %[[BB470541:.*]]
+; CHECK: [[BB47054]]:
+; CHECK-NEXT: %"vl16994bir()" = call double @bir()
; CHECK-NEXT: br label %[[BB17254:.*]]
-; CHECK: [[BB914551]]:
-; CHECK-NEXT: %"vl69719bar()" = call double @bar()
+; CHECK: [[BB470541]]:
+; CHECK-NEXT: %"vl88592bar()" = call double @bar()
; CHECK-NEXT: br label %[[BB17254]]
; CHECK: [[BB17254]]:
-; CHECK-NEXT: %"op19734(vl15001, vl69719)" = phi double [ %"vl15001bir()", %[[BB91455]] ], [ %"vl69719bar()", %[[BB914551]] ]
-; CHECK-NEXT: ret double %"op19734(vl15001, vl69719)"
+; CHECK-NEXT: %"op16411(vl16994, vl88592)" = phi double [ %"vl16994bir()", %[[BB47054]] ], [ %"vl88592bar()", %[[BB470541]] ]
+; CHECK-NEXT: ret double %"op16411(vl16994, vl88592)"
;
entry:
%ifcond = fcmp one double %x, 0.000000e+00
diff --git a/llvm/test/Transforms/IRNormalizer/reordering.ll b/llvm/test/Transforms/IRNormalizer/reordering.ll
index 64abe8e..a3dbcb5 100644
--- a/llvm/test/Transforms/IRNormalizer/reordering.ll
+++ b/llvm/test/Transforms/IRNormalizer/reordering.ll
@@ -23,7 +23,7 @@ declare void @effecting()
; Place dead instruction(s) before the terminator
define void @call_effecting() {
; CHECK-LABEL: define void @call_effecting() {
-; CHECK-NEXT: bb15160:
+; CHECK-NEXT: bb14885:
; CHECK-NEXT: call void @effecting()
; CHECK-NEXT: [[TMP0:%.*]] = add i32 0, 1
; CHECK-NEXT: ret void
@@ -51,7 +51,7 @@ exit:
define void @dont_move_above_alloca() {
; CHECK-LABEL: define void @dont_move_above_alloca() {
-; CHECK-NEXT: bb15160:
+; CHECK-NEXT: bb14885:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @effecting()
; CHECK-NEXT: ret void
@@ -65,7 +65,7 @@ declare void @effecting1()
define void @dont_reorder_effecting() {
; CHECK-LABEL: define void @dont_reorder_effecting() {
-; CHECK-NEXT: bb10075:
+; CHECK-NEXT: bb45003:
; CHECK-NEXT: call void @effecting()
; CHECK-NEXT: call void @effecting1()
; CHECK-NEXT: ret void
@@ -79,7 +79,7 @@ declare void @effecting2(i32)
define void @dont_reorder_effecting1() {
; CHECK-LABEL: define void @dont_reorder_effecting1() {
-; CHECK-NEXT: bb10075:
+; CHECK-NEXT: bb45003:
; CHECK-NEXT: [[ONE:%.*]] = add i32 1, 1
; CHECK-NEXT: call void @effecting2(i32 [[ONE]])
; CHECK-NEXT: [[TWO:%.*]] = add i32 2, 2
diff --git a/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll b/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll
index a096e6d..73db71b 100644
--- a/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll
+++ b/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll
@@ -20,14 +20,14 @@ declare i32 @llvm.foo(i32, i32)
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I3_LOC:%.*]] = alloca i32, align 4, addrspace(5)
; CHECK-NEXT: [[I1_LOC:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 0, i32 1, ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: [[I1_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I1_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I3_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I3_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[I1_RELOAD]], i32 0, ptr addrspace(5) [[I3_LOC]])
; CHECK-NEXT: [[I3_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I3_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I3_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I3_LOC]])
; CHECK-NEXT: [[I4:%.*]] = tail call i32 @llvm.foo(i32 [[I3_RELOAD]], i32 0)
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/IROutliner/alloca-addrspace.ll b/llvm/test/Transforms/IROutliner/alloca-addrspace.ll
index e870150..ed76444 100644
--- a/llvm/test/Transforms/IROutliner/alloca-addrspace.ll
+++ b/llvm/test/Transforms/IROutliner/alloca-addrspace.ll
@@ -18,10 +18,10 @@ declare i32 @func(i32, i32)
; CHECK-LABEL: define {{[^@]+}}@outlineable() {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I1_LOC:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 0, i32 1, ptr addrspace(5) [[I1_LOC]], i32 0)
; CHECK-NEXT: [[I1_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I1_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[I1_RELOAD]], i32 0, ptr addrspace(5) null, i32 -1)
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/IROutliner/different-intrinsics.ll b/llvm/test/Transforms/IROutliner/different-intrinsics.ll
index 5fb22c3..f0e43bb 100644
--- a/llvm/test/Transforms/IROutliner/different-intrinsics.ll
+++ b/llvm/test/Transforms/IROutliner/different-intrinsics.ll
@@ -31,18 +31,18 @@ entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
@@ -51,18 +51,18 @@ entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll b/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
index baf27ed..6730d1b 100644
--- a/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
+++ b/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
@@ -46,10 +46,10 @@ bb5:
; CHECK-LABEL: @f1(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@@ -61,10 +61,10 @@ bb5:
; CHECK-LABEL: @f2(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
index 534efc3..53d52f5 100644
--- a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
+++ b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
@@ -49,13 +49,13 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@@ -69,13 +69,13 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
index 3d3dbff..04ec9284f 100644
--- a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
+++ b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
@@ -42,19 +42,19 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: placeholder:
; CHECK-NEXT: [[A:%.*]] = sub i32 5, 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb3:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC1]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, ptr [[PHINODE_CE_LOC1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: placeholder1:
; CHECK-NEXT: [[B:%.*]] = add i32 5, 4
diff --git a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
index cd60f93..0e82217 100644
--- a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
+++ b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
@@ -72,10 +72,10 @@ bb5:
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[F_CE_LOC]], i32 0)
; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
diff --git a/llvm/test/Transforms/IROutliner/extraction.ll b/llvm/test/Transforms/IROutliner/extraction.ll
index 1eca4ea..77f904d 100644
--- a/llvm/test/Transforms/IROutliner/extraction.ll
+++ b/llvm/test/Transforms/IROutliner/extraction.ll
@@ -59,13 +59,13 @@ define void @extract_outs1() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
@@ -99,13 +99,13 @@ define void @extract_outs2() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll b/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll
index 1184b4a..54f013c 100644
--- a/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll
+++ b/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll
@@ -44,10 +44,10 @@ next:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], ptr null, i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
@@ -61,13 +61,13 @@ next:
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[C_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[C_LOC]], ptr [[E_LOC]], i32 1)
; CHECK-NEXT: [[C_RELOAD:%.*]] = load i32, ptr [[C_LOC]], align 4
; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[C_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll b/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll
index 951466c..0c899dc 100644
--- a/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll
+++ b/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll
@@ -8,10 +8,10 @@ define i32 @r() {
; CHECK-LABEL: define i32 @r() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTLOC:%.*]] = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[DOTLOC]], i32 0)
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load ptr, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[IF_END8:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]]
; CHECK: entry_after_outline:
; CHECK-NEXT: [[CALL7:%.*]] = call i32 [[DOTRELOAD]]()
@@ -91,10 +91,10 @@ define i32 @w() !dbg !8 {
; CHECK-SAME: ) !dbg [[DBG8:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RETVAL_1_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RETVAL_1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RETVAL_1_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[RETVAL_1_CE_LOC]], i32 1), !dbg [[DBG11:![0-9]+]]
; CHECK-NEXT: [[RETVAL_1_CE_RELOAD:%.*]] = load i32, ptr [[RETVAL_1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RETVAL_1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RETVAL_1_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[CLEANUP10:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]]
; CHECK: entry_after_outline:
; CHECK-NEXT: [[CALL8:%.*]] = call i32 @llvm.bswap.i32(i32 0)
diff --git a/llvm/test/Transforms/IROutliner/illegal-assumes.ll b/llvm/test/Transforms/IROutliner/illegal-assumes.ll
index d863fe7..c0c4e1a 100644
--- a/llvm/test/Transforms/IROutliner/illegal-assumes.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-assumes.ll
@@ -12,10 +12,10 @@ define void @outline_assumes() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_4(i1 true, ptr [[D]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: [[SPLIT_INST:%.*]] = sub i1 [[DL_RELOAD]], [[DL_RELOAD]]
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
@@ -48,10 +48,10 @@ define void @outline_assumes2() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_4(i1 false, ptr [[D]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]], ptr [[B]], ptr [[C]])
@@ -82,10 +82,10 @@ define void @outline_assumes3() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i1 true, ptr [[D]], ptr [[A]], ptr [[B]], ptr [[C]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
; CHECK-NEXT: call void @outlined_ir_func_3(ptr [[A]])
; CHECK-NEXT: ret void
@@ -115,10 +115,10 @@ define void @outline_assumes4() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i1 false, ptr [[D]], ptr [[A]], ptr [[B]], ptr [[C]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
; CHECK-NEXT: call void @outlined_ir_func_3(ptr [[A]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/illegal-memcpy.ll b/llvm/test/Transforms/IROutliner/illegal-memcpy.ll
index 20e009a..523fd23 100644
--- a/llvm/test/Transforms/IROutliner/illegal-memcpy.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-memcpy.ll
@@ -12,18 +12,18 @@ define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
@@ -41,18 +41,18 @@ define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
diff --git a/llvm/test/Transforms/IROutliner/illegal-memmove.ll b/llvm/test/Transforms/IROutliner/illegal-memmove.ll
index 06480c8..7482405 100644
--- a/llvm/test/Transforms/IROutliner/illegal-memmove.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-memmove.ll
@@ -12,18 +12,18 @@ define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
@@ -41,18 +41,18 @@ define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
diff --git a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
index 38dfd25..15f9aa2 100644
--- a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
@@ -21,10 +21,10 @@ define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
entry:
@@ -56,10 +56,10 @@ define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
entry:
diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll
index 24ad86f..f9d4999 100644
--- a/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll
+++ b/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll
@@ -47,10 +47,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll
index 6b50e99..7191c80 100644
--- a/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll
+++ b/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll
@@ -38,10 +38,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll
index ab1836f..9085e7e 100644
--- a/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll
+++ b/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll
@@ -48,16 +48,16 @@ next:
; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
@@ -72,13 +72,13 @@ next:
; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr null, i32 1)
; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll b/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll
index 32973ea..3229f42 100644
--- a/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll
+++ b/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll
@@ -57,10 +57,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
@@ -78,10 +78,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
index 4426009..fb2c5e9 100644
--- a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
+++ b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
@@ -35,10 +35,10 @@ block_6:
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_3:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
index 77e3a82..9627274 100644
--- a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
+++ b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
@@ -33,10 +33,10 @@ block_6:
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_3:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/outline-memcpy.ll b/llvm/test/Transforms/IROutliner/outline-memcpy.ll
index 0cf4f34..83fd5f6 100644
--- a/llvm/test/Transforms/IROutliner/outline-memcpy.ll
+++ b/llvm/test/Transforms/IROutliner/outline-memcpy.ll
@@ -27,20 +27,20 @@ entry:
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/outline-memmove.ll b/llvm/test/Transforms/IROutliner/outline-memmove.ll
index cf79244..c512cd4 100644
--- a/llvm/test/Transforms/IROutliner/outline-memmove.ll
+++ b/llvm/test/Transforms/IROutliner/outline-memmove.ll
@@ -27,20 +27,20 @@ entry:
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
index 2d52608..6a9cbca 100644
--- a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
+++ b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
@@ -53,10 +53,10 @@ entry:
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
;
@@ -72,10 +72,10 @@ entry:
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll b/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll
index 31f1d12..a8153a4 100644
--- a/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll
@@ -8,8 +8,8 @@
; Additionally, we check that the newly added bitcast instruction is excluded in
; further extractions.
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @outline_bitcast_base() {
entry:
@@ -38,11 +38,11 @@ entry:
%al = load i32, ptr %a
%bl = load i32, ptr %b
%cl = load i32, ptr %c
- call void @llvm.lifetime.start.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.start.p0(ptr %d)
%am = load i32, ptr %b
%bm = load i32, ptr %a
%cm = load i32, ptr %c
- call void @llvm.lifetime.end.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.end.p0(ptr %d)
ret void
}
@@ -61,8 +61,8 @@ entry:
%am = add i32 %a, %b
%bm = add i32 %b, %a
%cm = add i32 %b, %c
- call void @llvm.lifetime.start.p0(i64 -1, ptr %d)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.start.p0(ptr %d)
+ call void @llvm.lifetime.end.p0(ptr %d)
ret void
}
@@ -114,13 +114,13 @@ entry:
; CHECK-LABEL: @outlined_ir_func_1(
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D]])
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
; CHECK-NEXT: [[AL:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
; CHECK-NEXT: [[BL:%.*]] = add i32 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[CL:%.*]] = add i32 [[TMP1]], [[TMP2:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D]])
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll b/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll
index 28c23e3..bb6bf8f 100644
--- a/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll
@@ -100,10 +100,10 @@ block_6:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
@@ -127,10 +127,10 @@ block_6:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/outlining-cost-model.ll b/llvm/test/Transforms/IROutliner/outlining-cost-model.ll
index 81bf4f0..bb3163a 100644
--- a/llvm/test/Transforms/IROutliner/outlining-cost-model.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-cost-model.ll
@@ -104,13 +104,13 @@ define void @function3() #0 {
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; NOCOST-NEXT: ret void
@@ -159,13 +159,13 @@ define void @function4() #0 {
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; NOCOST-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll b/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll
index 2e1fae3..64e87fc 100644
--- a/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll
@@ -14,13 +14,13 @@ define void @outline_outputs1() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]], i32 0)
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
@@ -53,13 +53,13 @@ define void @outline_outputs2() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[SUB_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SUB_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[SUB_LOC]], ptr [[DOTLOC]], i32 1)
; CHECK-NEXT: [[SUB_RELOAD:%.*]] = load i32, ptr [[SUB_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[SUB_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SUB_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[SUB_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll b/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll
index cb5d505..d901955 100644
--- a/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll
@@ -43,10 +43,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
@@ -59,10 +59,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll
index 463e097..9dbfa9e 100644
--- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll
@@ -103,19 +103,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]], i32 0)
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
@@ -143,19 +143,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 1)
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll
index 5293647..f789735 100644
--- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll
@@ -124,19 +124,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 0)
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll
index 663e6d8..1de13eb 100644
--- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll
@@ -104,19 +104,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
@@ -144,19 +144,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll b/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll
index 6d0b153..77f17c3 100644
--- a/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll
@@ -17,24 +17,24 @@ define void @outline_outputs1() #0 {
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: br label [[NEXT:%.*]]
; CHECK: next:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD2_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD2_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC2]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[ADD_RELOAD]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[OUTPUT2]], ptr [[ADD2_LOC]], ptr [[DOTLOC2]])
; CHECK-NEXT: [[ADD2_RELOAD:%.*]] = load i32, ptr [[ADD2_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD3:%.*]] = load i32, ptr [[DOTLOC2]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD2_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD2_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC2]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD3]], i32 [[ADD2_RELOAD]], ptr [[RESULT2]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll b/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll
index 380c53d..cc4f6ef 100644
--- a/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll
@@ -14,13 +14,13 @@ define void @outline_outputs1() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
@@ -52,13 +52,13 @@ define void @outline_outputs2() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
index 4bbe6e7..15d313a 100644
--- a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
+++ b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
@@ -58,10 +58,10 @@ bb5:
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 0)
; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]]
@@ -77,10 +77,10 @@ bb5:
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 1)
; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
diff --git a/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll b/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll
index 9631bfa..9e443ab 100644
--- a/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll
+++ b/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll
@@ -43,10 +43,10 @@ next:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
@@ -59,10 +59,10 @@ next:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 1)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll b/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll
index 608abfa..02930d7 100644
--- a/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll
+++ b/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll
@@ -53,10 +53,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]]
; CHECK: first:
@@ -75,10 +75,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]]
; CHECK: first:
diff --git a/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll b/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll
index f46035a..25b1e8e 100644
--- a/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll
+++ b/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll
@@ -53,10 +53,10 @@ next:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[C]], [[C]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
@@ -73,10 +73,10 @@ next:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = mul i32 [[C]], [[C]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll b/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll
index 66c7222..697c816 100644
--- a/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll
+++ b/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll
@@ -6,14 +6,14 @@
; udiv will be introduced by expand and the cost will be high.
declare void @_Z3mixRjj(ptr dereferenceable(4), i32)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i32 @_Z3fooPKcjj(ptr nocapture readonly %s, i32 %len, i32 %c) {
; CHECK-LABEL: @_Z3fooPKcjj(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 -1640531527, ptr [[A]], align 4
; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[LEN:%.*]], 11
; CHECK-NEXT: br i1 [[CMP8]], label [[WHILE_BODY_LR_PH:%.*]], label [[WHILE_END:%.*]]
@@ -40,12 +40,12 @@ define i32 @_Z3fooPKcjj(ptr nocapture readonly %s, i32 %len, i32 %c) {
; CHECK-NEXT: [[KEYLEN_0_LCSSA:%.*]] = phi i32 [ [[SUB_LCSSA]], [[WHILE_COND_WHILE_END_CRIT_EDGE]] ], [ [[LEN]], [[ENTRY:%.*]] ]
; CHECK-NEXT: call void @_Z3mixRjj(ptr dereferenceable(4) [[A]], i32 [[KEYLEN_0_LCSSA]])
; CHECK-NEXT: [[T4:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 [[T4]]
;
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 -1640531527, ptr %a, align 4
%cmp8 = icmp ugt i32 %len, 11
br i1 %cmp8, label %while.body.lr.ph, label %while.end
@@ -76,7 +76,7 @@ while.end: ; preds = %while.cond.while.en
%keylen.0.lcssa = phi i32 [ %sub.lcssa, %while.cond.while.end_crit_edge ], [ %len, %entry ]
call void @_Z3mixRjj(ptr dereferenceable(4) %a, i32 %keylen.0.lcssa)
%t4 = load i32, ptr %a, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret i32 %t4
}
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll
index 053d073..4c04e6d 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll
@@ -4,17 +4,17 @@
define i32 @lifetime_flat_pointer() {
; CHECK-LABEL: define i32 @lifetime_flat_pointer() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[ALLOCA]])
; CHECK-NEXT: store i32 1, ptr addrspace(5) [[ALLOCA]], align 4
; CHECK-NEXT: [[RET:%.*]] = load i32, ptr addrspace(5) [[ALLOCA]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[ALLOCA]])
; CHECK-NEXT: ret i32 [[RET]]
;
%alloca = alloca i32, align 4, addrspace(5)
%flat = addrspacecast ptr addrspace(5) %alloca to ptr
- call void @llvm.lifetime.start(i64 4, ptr addrspace(5) %alloca)
+ call void @llvm.lifetime.start(ptr addrspace(5) %alloca)
store i32 1, ptr %flat, align 4
%ret = load i32, ptr %flat, align 4
- call void @llvm.lifetime.end(i64 4, ptr addrspace(5) %alloca)
+ call void @llvm.lifetime.end(ptr addrspace(5) %alloca)
ret i32 %ret
}
diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll
index 31e914a..1a21416 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll
@@ -7,20 +7,20 @@ define i32 @lifetime_flat_pointer() {
; CHECK-LABEL: define i32 @lifetime_flat_pointer() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[ALLOCA]] to ptr addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: store i32 1, ptr addrspace(5) [[TMP1]], align 4
; CHECK-NEXT: [[RET:%.*]] = load i32, ptr addrspace(5) [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]])
; CHECK-NEXT: ret i32 [[RET]]
;
%alloca = alloca i32, align 4
%1 = addrspacecast ptr %alloca to ptr addrspace(5)
- call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
store i32 1, ptr addrspace(5) %1, align 4
%ret = load i32, ptr addrspace(5) %1, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret i32 %ret
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll b/llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll
new file mode 100644
index 0000000..3fc7c59
--- /dev/null
+++ b/llvm/test/Transforms/InferAlignment/propagate-from-other-load-stores.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
+%struct.S1 = type { %struct.float3, %struct.float3, i32, i32 }
+%struct.float3 = type { float, float, float }
+
+
+; ------------------------------------------------------------------------------
+; Test that we can propagate the align 16 to the load and store that are set to align 4
+; ------------------------------------------------------------------------------
+
+define void @prop_align(ptr %v, ptr %vout) {
+; CHECK-LABEL: define void @prop_align(
+; CHECK-SAME: ptr [[V:%.*]], ptr [[VOUT:%.*]]) {
+; CHECK-NEXT: [[DOTUNPACK_UNPACK:%.*]] = load float, ptr [[V]], align 16
+; CHECK-NEXT: [[DOTUNPACK_ELT7:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 4
+; CHECK-NEXT: [[DOTUNPACK_UNPACK8:%.*]] = load float, ptr [[DOTUNPACK_ELT7]], align 4
+; CHECK-NEXT: [[DOTUNPACK_ELT9:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 8
+; CHECK-NEXT: [[DOTUNPACK_UNPACK10:%.*]] = load float, ptr [[DOTUNPACK_ELT9]], align 8
+; CHECK-NEXT: [[DOTELT1:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 12
+; CHECK-NEXT: [[DOTUNPACK2_UNPACK:%.*]] = load float, ptr [[DOTELT1]], align 4
+; CHECK-NEXT: [[DOTUNPACK2_ELT12:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 16
+; CHECK-NEXT: [[DOTUNPACK2_UNPACK13:%.*]] = load float, ptr [[DOTUNPACK2_ELT12]], align 16
+; CHECK-NEXT: [[DOTUNPACK2_ELT14:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 20
+; CHECK-NEXT: [[DOTUNPACK2_UNPACK15:%.*]] = load float, ptr [[DOTUNPACK2_ELT14]], align 4
+; CHECK-NEXT: [[DOTELT3:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 24
+; CHECK-NEXT: [[DOTUNPACK4:%.*]] = load i32, ptr [[DOTELT3]], align 8
+; CHECK-NEXT: [[DOTELT5:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 28
+; CHECK-NEXT: [[DOTUNPACK6:%.*]] = load i32, ptr [[DOTELT5]], align 4
+; CHECK-NEXT: store float [[DOTUNPACK_UNPACK]], ptr [[VOUT]], align 16
+; CHECK-NEXT: [[VOUT_REPACK23:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 4
+; CHECK-NEXT: store float [[DOTUNPACK_UNPACK8]], ptr [[VOUT_REPACK23]], align 4
+; CHECK-NEXT: [[VOUT_REPACK25:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 8
+; CHECK-NEXT: store float [[DOTUNPACK_UNPACK10]], ptr [[VOUT_REPACK25]], align 8
+; CHECK-NEXT: [[VOUT_REPACK17:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 12
+; CHECK-NEXT: store float [[DOTUNPACK2_UNPACK]], ptr [[VOUT_REPACK17]], align 4
+; CHECK-NEXT: [[VOUT_REPACK17_REPACK27:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 16
+; CHECK-NEXT: store float [[DOTUNPACK2_UNPACK13]], ptr [[VOUT_REPACK17_REPACK27]], align 16
+; CHECK-NEXT: [[VOUT_REPACK17_REPACK29:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 20
+; CHECK-NEXT: store float [[DOTUNPACK2_UNPACK15]], ptr [[VOUT_REPACK17_REPACK29]], align 4
+; CHECK-NEXT: [[VOUT_REPACK19:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 24
+; CHECK-NEXT: store i32 [[DOTUNPACK4]], ptr [[VOUT_REPACK19]], align 8
+; CHECK-NEXT: [[VOUT_REPACK21:%.*]] = getelementptr inbounds nuw i8, ptr [[VOUT]], i64 28
+; CHECK-NEXT: store i32 [[DOTUNPACK6]], ptr [[VOUT_REPACK21]], align 4
+; CHECK-NEXT: ret void
+;
+ %.unpack.unpack = load float, ptr %v, align 16
+ %.unpack.elt7 = getelementptr inbounds nuw i8, ptr %v, i64 4
+ %.unpack.unpack8 = load float, ptr %.unpack.elt7, align 4
+ %.unpack.elt9 = getelementptr inbounds nuw i8, ptr %v, i64 8
+ %.unpack.unpack10 = load float, ptr %.unpack.elt9, align 8
+ %.elt1 = getelementptr inbounds nuw i8, ptr %v, i64 12
+ %.unpack2.unpack = load float, ptr %.elt1, align 4
+ %.unpack2.elt12 = getelementptr inbounds nuw i8, ptr %v, i64 16
+ %.unpack2.unpack13 = load float, ptr %.unpack2.elt12, align 4
+ %.unpack2.elt14 = getelementptr inbounds nuw i8, ptr %v, i64 20
+ %.unpack2.unpack15 = load float, ptr %.unpack2.elt14, align 4
+ %.elt3 = getelementptr inbounds nuw i8, ptr %v, i64 24
+ %.unpack4 = load i32, ptr %.elt3, align 8
+ %.elt5 = getelementptr inbounds nuw i8, ptr %v, i64 28
+ %.unpack6 = load i32, ptr %.elt5, align 4
+ store float %.unpack.unpack, ptr %vout, align 16
+ %vout.repack23 = getelementptr inbounds nuw i8, ptr %vout, i64 4
+ store float %.unpack.unpack8, ptr %vout.repack23, align 4
+ %vout.repack25 = getelementptr inbounds nuw i8, ptr %vout, i64 8
+ store float %.unpack.unpack10, ptr %vout.repack25, align 8
+ %vout.repack17 = getelementptr inbounds nuw i8, ptr %vout, i64 12
+ store float %.unpack2.unpack, ptr %vout.repack17, align 4
+ %vout.repack17.repack27 = getelementptr inbounds nuw i8, ptr %vout, i64 16
+ store float %.unpack2.unpack13, ptr %vout.repack17.repack27, align 4
+ %vout.repack17.repack29 = getelementptr inbounds nuw i8, ptr %vout, i64 20
+ store float %.unpack2.unpack15, ptr %vout.repack17.repack29, align 4
+ %vout.repack19 = getelementptr inbounds nuw i8, ptr %vout, i64 24
+ store i32 %.unpack4, ptr %vout.repack19, align 8
+ %vout.repack21 = getelementptr inbounds nuw i8, ptr %vout, i64 28
+ store i32 %.unpack6, ptr %vout.repack21, align 4
+ ret void
+}
+
+; ------------------------------------------------------------------------------
+; Test that alignment is not propagated from a source that does not dominate the destination
+; ------------------------------------------------------------------------------
+
+define void @no_prop_align(ptr %v, ptr %vout, i1 %cond) {
+; CHECK-LABEL: define void @no_prop_align(
+; CHECK-SAME: ptr [[V:%.*]], ptr [[VOUT:%.*]], i1 [[COND:%.*]]) {
+; CHECK-NEXT: br i1 [[COND]], label %[[BRANCH1:.*]], label %[[BRANCH2:.*]]
+; CHECK: [[BRANCH1]]:
+; CHECK-NEXT: [[DOTUNPACK_UNPACK:%.*]] = load float, ptr [[V]], align 16
+; CHECK-NEXT: [[DOTUNPACK_ELT7:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 4
+; CHECK-NEXT: [[DOTUNPACK_UNPACK8:%.*]] = load float, ptr [[DOTUNPACK_ELT7]], align 4
+; CHECK-NEXT: [[DOTUNPACK_ELT9:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 8
+; CHECK-NEXT: [[DOTUNPACK_UNPACK10:%.*]] = load float, ptr [[DOTUNPACK_ELT9]], align 8
+; CHECK-NEXT: [[DOTELT1:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 12
+; CHECK-NEXT: [[DOTUNPACK2_UNPACK:%.*]] = load float, ptr [[DOTELT1]], align 4
+; CHECK-NEXT: br label %[[END:.*]]
+; CHECK: [[BRANCH2]]:
+; CHECK-NEXT: [[DOTUNPACK2_ELT12:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 16
+; CHECK-NEXT: [[DOTUNPACK2_UNPACK13:%.*]] = load float, ptr [[DOTUNPACK2_ELT12]], align 4
+; CHECK-NEXT: [[DOTUNPACK2_ELT14:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 20
+; CHECK-NEXT: [[DOTUNPACK2_UNPACK15:%.*]] = load float, ptr [[DOTUNPACK2_ELT14]], align 4
+; CHECK-NEXT: [[DOTELT3:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 24
+; CHECK-NEXT: [[DOTUNPACK4:%.*]] = load i32, ptr [[DOTELT3]], align 8
+; CHECK-NEXT: [[DOTELT5:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 28
+; CHECK-NEXT: [[DOTUNPACK6:%.*]] = load i32, ptr [[DOTELT5]], align 4
+; CHECK-NEXT: br label %[[END]]
+; CHECK: [[END]]:
+; CHECK-NEXT: ret void
+;
+ br i1 %cond, label %branch1, label %branch2
+
+branch1:
+ %.unpack.unpack = load float, ptr %v, align 16
+ %.unpack.elt7 = getelementptr inbounds nuw i8, ptr %v, i64 4
+ %.unpack.unpack8 = load float, ptr %.unpack.elt7, align 4
+ %.unpack.elt9 = getelementptr inbounds nuw i8, ptr %v, i64 8
+ %.unpack.unpack10 = load float, ptr %.unpack.elt9, align 8
+ %.elt1 = getelementptr inbounds nuw i8, ptr %v, i64 12
+ %.unpack2.unpack = load float, ptr %.elt1, align 4
+ br label %end
+
+branch2:
+ %.unpack2.elt12 = getelementptr inbounds nuw i8, ptr %v, i64 16
+ %.unpack2.unpack13 = load float, ptr %.unpack2.elt12, align 4
+ %.unpack2.elt14 = getelementptr inbounds nuw i8, ptr %v, i64 20
+ %.unpack2.unpack15 = load float, ptr %.unpack2.elt14, align 4
+ %.elt3 = getelementptr inbounds nuw i8, ptr %v, i64 24
+ %.unpack4 = load i32, ptr %.elt3, align 8
+ %.elt5 = getelementptr inbounds nuw i8, ptr %v, i64 28
+ %.unpack6 = load i32, ptr %.elt5, align 4
+ br label %end
+
+end:
+ ret void
+}
+
+; ------------------------------------------------------------------------------
+; Test that we can propagate to/from negative offset GEPs
+; ------------------------------------------------------------------------------
+
+define void @prop_align_negative_offset(ptr %v) {
+; CHECK-LABEL: define void @prop_align_negative_offset(
+; CHECK-SAME: ptr [[V:%.*]]) {
+; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[V]], align 16
+; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -16
+; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 16
+; CHECK-NEXT: ret void
+;
+ %loadAligned= load float, ptr %v, align 16
+ %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -16
+ %loadUnaligned = load float, ptr %gepNegative, align 4
+ ret void
+}
+
+define void @prop_align_negative_offset_2(ptr %v) {
+; CHECK-LABEL: define void @prop_align_negative_offset_2(
+; CHECK-SAME: ptr [[V:%.*]]) {
+; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -16
+; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 16
+; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[V]], align 16
+; CHECK-NEXT: ret void
+;
+ %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -16
+ %loadAligned = load float, ptr %gepNegative, align 16
+ %loadUnaligned= load float, ptr %v, align 4
+ ret void
+}
+
+define void @prop_align_negative_offset_3(ptr %v) {
+; CHECK-LABEL: define void @prop_align_negative_offset_3(
+; CHECK-SAME: ptr [[V:%.*]]) {
+; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[V]], align 16
+; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -8
+; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 8
+; CHECK-NEXT: ret void
+;
+ %loadAligned= load float, ptr %v, align 16
+ %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -8
+ %loadUnaligned = load float, ptr %gepNegative, align 4
+ ret void
+}
+
+define void @prop_align_negative_offset_4(ptr %v) {
+; CHECK-LABEL: define void @prop_align_negative_offset_4(
+; CHECK-SAME: ptr [[V:%.*]]) {
+; CHECK-NEXT: [[LOADALIGNED:%.*]] = load float, ptr [[V]], align 16
+; CHECK-NEXT: [[GEPNEGATIVE:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 -20
+; CHECK-NEXT: [[LOADUNALIGNED:%.*]] = load float, ptr [[GEPNEGATIVE]], align 4
+; CHECK-NEXT: ret void
+;
+ %loadAligned= load float, ptr %v, align 16
+ %gepNegative = getelementptr inbounds nuw i8, ptr %v, i64 -20
+ %loadUnaligned = load float, ptr %gepNegative, align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
index c1375cb..54daf7c 100644
--- a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
@@ -14,8 +14,8 @@ entry:
define i64 @foo() {
; CHECK-LABEL: @foo(
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %{{.*}})
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %{{.*}})
+; CHECK: call void @llvm.lifetime.start.p0(ptr %{{.*}})
+; CHECK: call void @llvm.lifetime.end.p0(ptr %{{.*}})
entry:
%a = alloca <vscale x 2 x i64>, align 16
store <vscale x 2 x i64> zeroinitializer, ptr %a, align 16
diff --git a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
index 0cd61c8..c5842a1 100644
--- a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
+++ b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
@@ -13,20 +13,20 @@
; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
;
; When the bounds are very wide ("no bounds"), all inlinings happen.
-; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=%t1 -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s | FileCheck %s --check-prefixes=NOBOUNDS-OUT,CHECK
+; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-model-under-training=%t -training-log=%t1 -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s | FileCheck %s --check-prefixes=NOBOUNDS-OUT,CHECK
; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t1 | FileCheck %s --check-prefix=NOBOUNDS
;
; When the bounds are very restrictive, the first inlining happens but it's
; considered as "bad" (since it trips over the bounds) and its reward is a
; penalty. However, the mandatory inlining, which is considered next, happens.
; No other inlinings happend.
-; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=%t2 -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s | FileCheck %s --check-prefixes=BOUNDS-OUT,CHECK
+; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-model-under-training=%t -training-log=%t2 -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s | FileCheck %s --check-prefixes=BOUNDS-OUT,CHECK
; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t2 | FileCheck %s --check-prefix=BOUNDS
;
; With more restrictive bounds, the first inlining happens and is OK. The
; mandatory inlining happens next, and it trips over the bounds, which then
; forces no further inlinings.
-; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=%t3 -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.1 -S < %s | FileCheck %s --check-prefixes=RELAXED-BOUNDS-OUT,CHECK
+; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-model-under-training=%t -training-log=%t3 -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.2 -S < %s | FileCheck %s --check-prefixes=RELAXED-BOUNDS-OUT,CHECK
; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t3 | FileCheck %s --check-prefix=RELAXED-BOUNDS
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
@@ -52,15 +52,16 @@ define i64 @top() {
}
attributes #0 = { alwaysinline }
; NOBOUNDS: observation: 0
-; NOBOUNDS: delta_size: 6
-; RELAXED-BOUNDS: delta_size: 6
-; BOUNDS: delta_size: 2147483647
+; NOBOUNDS: inlining_decision: 1
+; RELAXED-BOUNDS: inlining_decision: 1
+; BOUNDS: inlining_decision: 1
; NOBOUNDS: observation: 1
-; BOUNDS-NOT: observation
-; RELAXED-BOUNDS-NOT: observation
-; NOBOUNDS: delta_size: -11
+; BOUNDS-NOT: observation: 1
+; RELAXED-BOUNDS: observation: 1
+; NOBOUNDS: inlining_decision: 1
; NOBOUNDS: observation: 2
-; NOBOUNDS: delta_size: 4
+; NOBOUNDS: inlining_decision
+; RELAXED-BOUNDS-NOT: observation: 2
; CHECK-LABEL: @top
; must_be_inlined must always be inlined, so we won't find a call to it in @top()
diff --git a/llvm/test/Transforms/Inline/ML/development-training-log.ll b/llvm/test/Transforms/Inline/ML/development-training-log.ll
index b53b6bc..c8b5198 100644
--- a/llvm/test/Transforms/Inline/ML/development-training-log.ll
+++ b/llvm/test/Transforms/Inline/ML/development-training-log.ll
@@ -5,12 +5,6 @@
; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel
; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
;
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=%t1 -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s
-; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t1 | FileCheck %s
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=%t2 -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s
-; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t2 | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=%t3 -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s
-; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t3 | FileCheck %s
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=%t4 -ml-inliner-model-under-training=%t -S < %s
; RUN: %python %S/../../../../lib/Analysis/models/log_reader.py %t4 | FileCheck %s --check-prefix=NOREWARD
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=%t5 -S < %s
diff --git a/llvm/test/Transforms/Inline/ML/size-estimator-default.ll b/llvm/test/Transforms/Inline/ML/size-estimator-default.ll
deleted file mode 100644
index c9e5978..0000000
--- a/llvm/test/Transforms/Inline/ML/size-estimator-default.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; REQUIRES: !have_tflite
-; RUN: opt -passes='print<inliner-size-estimator>' -S < %S/Inputs/size-estimator.ll 2>&1 | FileCheck %s
-
-; CHECK: [InlineSizeEstimatorAnalysis] size estimate for branches: None \ No newline at end of file
diff --git a/llvm/test/Transforms/Inline/ML/size-estimator-training.ll b/llvm/test/Transforms/Inline/ML/size-estimator-training.ll
deleted file mode 100644
index 462cc64..0000000
--- a/llvm/test/Transforms/Inline/ML/size-estimator-training.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; REQUIRES: have_tflite
-; RUN: opt -passes='print<inliner-size-estimator>' -S < %S/Inputs/size-estimator.ll 2>&1 | FileCheck %s --check-prefix=DEFAULT
-; RUN: opt -passes='print<inliner-size-estimator>' -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %S/Inputs/size-estimator.ll 2>&1 | FileCheck %s
-
-; DEFAULT: [InlineSizeEstimatorAnalysis] size estimate for branches: None
-; CHECK: [InlineSizeEstimatorAnalysis] size estimate for branches: 28 \ No newline at end of file
diff --git a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
index bbce9e4..c3f6dd7 100644
--- a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
+++ b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
@@ -31,10 +31,10 @@ entry:
await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
%val = load i32, ptr %ref.tmp7
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -54,5 +54,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll b/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll
index b5c4f42..71b463b 100644
--- a/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll
+++ b/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll
@@ -12,28 +12,28 @@ entry:
declare i32 @baz(...) #0
-define i32 @bar() #1 {
+define i32 @features_subset() #1 {
entry:
%call = call i32 @foo()
ret i32 %call
-; CHECK-LABEL: bar
-; CHECK: call i32 @foo()
+; CHECK-LABEL: features_subset
+; CHECK: call i32 (...) @baz()
}
-define i32 @qux() #0 {
+define i32 @features_equal() #0 {
entry:
%call = call i32 @foo()
ret i32 %call
-; CHECK-LABEL: qux
+; CHECK-LABEL: features_equal
; CHECK: call i32 (...) @baz()
}
-define i32 @quux() #2 {
+define i32 @features_different() #2 {
entry:
- %call = call i32 @bar()
+ %call = call i32 @foo()
ret i32 %call
-; CHECK-LABEL: quux
-; CHECK: call i32 @bar()
+; CHECK-LABEL: features_different
+; CHECK: call i32 @foo()
}
diff --git a/llvm/test/Transforms/Inline/access-attributes-prop.ll b/llvm/test/Transforms/Inline/access-attributes-prop.ll
index 28fa44e..fed2590 100644
--- a/llvm/test/Transforms/Inline/access-attributes-prop.ll
+++ b/llvm/test/Transforms/Inline/access-attributes-prop.ll
@@ -410,9 +410,9 @@ define void @prop_fn_decl_fail_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_fn_decl_fail_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
@@ -425,9 +425,9 @@ define void @prop_cb_def_wr_fail_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_cb_def_wr_fail_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
@@ -440,10 +440,10 @@ define void @prop_fn_decl_partially_okay_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_fn_decl_partially_okay_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
@@ -456,10 +456,10 @@ define void @prop_cb_def_wr_partially_okay_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_cb_def_wr_partially_okay_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Inline/byval-align.ll b/llvm/test/Transforms/Inline/byval-align.ll
index 0b135aa..a23f364 100644
--- a/llvm/test/Transforms/Inline/byval-align.ll
+++ b/llvm/test/Transforms/Inline/byval-align.ll
@@ -28,13 +28,13 @@ define void @byval_caller(ptr nocapture align 64 %a, ptr %b) #0 {
; CHECK-SAME: (ptr align 64 captures(none) [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A1:%.*]] = alloca float, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 128 [[A1]], ptr align 128 [[A]], i64 4, i1 false)
; CHECK-NEXT: [[LOAD_I:%.*]] = load float, ptr [[A1]], align 4
; CHECK-NEXT: [[B_IDX_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
; CHECK-NEXT: [[ADD_I:%.*]] = fadd float [[LOAD_I]], 2.000000e+00
; CHECK-NEXT: store float [[ADD_I]], ptr [[B_IDX_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A1]])
; CHECK-NEXT: [[CALLER_LOAD:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
; CHECK-NEXT: store float [[CALLER_LOAD]], ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/Inline/byval-tail-call.ll b/llvm/test/Transforms/Inline/byval-tail-call.ll
index 808104c..f8fd4a6 100644
--- a/llvm/test/Transforms/Inline/byval-tail-call.ll
+++ b/llvm/test/Transforms/Inline/byval-tail-call.ll
@@ -22,11 +22,11 @@ define void @bar(ptr byval(i32) %x) {
define void @foo(ptr %x) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4
; CHECK-NEXT: call void @ext(ptr nonnull [[X1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
call void @bar(ptr byval(i32) %x)
@@ -42,12 +42,12 @@ define internal void @qux(ptr byval(i32) %x) {
define void @frob(ptr %x) {
; CHECK-LABEL: @frob(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4
; CHECK-NEXT: call void @ext(ptr nonnull [[X1]])
; CHECK-NEXT: tail call void @ext(ptr null)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
tail call void @qux(ptr byval(i32) %x)
@@ -71,11 +71,11 @@ define void @bar2(ptr byval(i32) %x) {
define void @foobar(ptr %x) {
; CHECK-LABEL: @foobar(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4
; CHECK-NEXT: tail call void @ext2(ptr nonnull byval(i32) [[X1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
tail call void @bar2(ptr byval(i32) %x)
@@ -85,9 +85,9 @@ define void @foobar(ptr %x) {
define void @barfoo() {
; CHECK-LABEL: @barfoo(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: tail call void @ext2(ptr nonnull byval(i32) [[X1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
%x = alloca i32
diff --git a/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll b/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll
index 1d1cb45..e79ac66 100644
--- a/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll
+++ b/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll
@@ -26,11 +26,11 @@ define i64 @foo(ptr %arg) {
; CHECK-LABEL: define i64 @foo(
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[ARG1:%.*]] = alloca [[STRUCT:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[ARG1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARG1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ARG1]], ptr align 8 [[ARG]], i64 16, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT]], ptr [[ARG1]], i64 0, i32 1
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[ARG1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARG1]])
; CHECK-NEXT: ret i64 0
;
%1 = call i64 @bar(ptr byval(%struct) align 8 %arg)
diff --git a/llvm/test/Transforms/Inline/byval.ll b/llvm/test/Transforms/Inline/byval.ll
index b4a19c5..c945d7f 100644
--- a/llvm/test/Transforms/Inline/byval.ll
+++ b/llvm/test/Transforms/Inline/byval.ll
@@ -35,12 +35,12 @@ define i32 @test1() nounwind {
; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1
; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[S1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S1]], ptr [[S]], i64 12, i1 false)
; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S1]], align 4
; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1
; CHECK-NEXT: store i32 [[TMP2_I]], ptr [[S1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[S1]])
; CHECK-NEXT: ret i32 0
;
entry:
@@ -104,10 +104,10 @@ define void @test3() nounwind {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 64
; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[S1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[S1]], ptr align 64 [[S]], i64 12, i1 false)
; CHECK-NEXT: call void @g3(ptr align 64 [[S1]]) #[[ATTR0]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[S1]])
; CHECK-NEXT: ret void
;
entry:
@@ -157,12 +157,12 @@ define i32 @test5() {
; CHECK-LABEL: define i32 @test5() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_S0:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[B]], ptr align 4 @b, i64 4, i1 false)
; CHECK-NEXT: store i32 0, ptr @b, align 4
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[B]], align 4
; CHECK-NEXT: store i32 [[TMP0]], ptr @a, align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: ret i32 [[TMP1]]
;
diff --git a/llvm/test/Transforms/Inline/callbr.ll b/llvm/test/Transforms/Inline/callbr.ll
index 1607700..57e92bb 100644
--- a/llvm/test/Transforms/Inline/callbr.ll
+++ b/llvm/test/Transforms/Inline/callbr.ll
@@ -10,8 +10,8 @@ define dso_local i32 @main() {
; CHECK-NEXT: [[I1_I:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 0, ptr [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I1_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I1_I]])
; CHECK-NEXT: store i32 0, ptr [[I1_I]], align 4
; CHECK-NEXT: [[I2_I:%.*]] = load i32, ptr [[I1_I]], align 4
; CHECK-NEXT: callbr void asm sideeffect "", "r,!i,!i,~{dirflag},~{fpsr},~{flags}"(i32 [[I2_I]])
@@ -27,8 +27,8 @@ define dso_local i32 @main() {
; CHECK-NEXT: br label [[T32_EXIT]]
; CHECK: t32.exit:
; CHECK-NEXT: [[I7_I:%.*]] = load i32, ptr [[I_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I1_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I1_I]])
; CHECK-NEXT: ret i32 [[I7_I]]
;
bb:
diff --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll
index d29360f..f96b5a9 100644
--- a/llvm/test/Transforms/Inline/devirtualize-4.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-4.ll
@@ -48,14 +48,14 @@
define dso_local void @_Z4Testv() local_unnamed_addr {
entry:
%o = alloca %class.Impl, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %o)
+ call void @llvm.lifetime.start.p0(ptr nonnull %o)
call void @_ZN4ImplC2Ev(ptr nonnull %o)
call fastcc void @_ZL11IndirectRunR9Interface(ptr nonnull dereferenceable(8) %o)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %o)
+ call void @llvm.lifetime.end.p0(ptr nonnull %o)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define linkonce_odr dso_local void @_ZN4ImplC2Ev(ptr %this) unnamed_addr align 2 {
entry:
@@ -74,7 +74,7 @@ entry:
ret void
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(ptr %this) unnamed_addr align 2 {
entry:
@@ -85,10 +85,10 @@ entry:
define linkonce_odr dso_local void @_ZN4Impl3RunEv(ptr %this) unnamed_addr align 2 {
entry:
%ref.tmp = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp)
store ptr %this, ptr %ref.tmp, align 8
call void @_Z13DoNotOptimizeIP4ImplEvRKT_(ptr nonnull dereferenceable(8) %ref.tmp)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp)
ret void
}
@@ -160,10 +160,10 @@ memptr.end: ; preds = %memptr.nonvirtual,
define i32 @_Z2g1v() {
entry:
%a = alloca %struct.A, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @_ZN1AC1Ev(ptr nonnull %a)
%call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 1, i64 0)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret i32 %call
}
@@ -176,10 +176,10 @@ entry:
define i32 @_Z2g2v() {
entry:
%a = alloca %struct.A, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @_ZN1AC1Ev(ptr nonnull %a)
%call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 9, i64 0)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret i32 %call
}
diff --git a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
index 9b293d39..aad192a 100644
--- a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
+++ b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
@@ -54,9 +54,9 @@ define void @caller2_below_threshold(ptr %p1, i1 %b) {
; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]]
; CHECK: split:
; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0()
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 60000, ptr [[VLA_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VLA_I]])
; CHECK-NEXT: call void @extern_call(ptr nonnull [[VLA_I]]) #[[ATTR3]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 60000, ptr [[VLA_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VLA_I]])
; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SAVEDSTACK]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll b/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll
index c74351b..02f5774 100644
--- a/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll
+++ b/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll
@@ -52,7 +52,7 @@ return: ; preds = %check_pointers_are_
define i32 @main() {
; CHECK-LABEL: define i32 @main() {
; CHECK-NEXT: [[G_VAR:%.*]] = alloca [[STRUCT_A:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[G_VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[G_VAR]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[G_VAR]], ptr align 8 @g_var, i64 20, i1 false)
; CHECK-NEXT: [[VAL_I:%.*]] = load i32, ptr [[G_VAR]], align 8
; CHECK-NEXT: [[DOTNOT_I:%.*]] = icmp eq i32 [[VAL_I]], 0
@@ -68,7 +68,7 @@ define i32 @main() {
; CHECK-NEXT: call void @abort()
; CHECK-NEXT: unreachable
; CHECK: callee.exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[G_VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[G_VAR]])
; CHECK-NEXT: ret i32 0
;
call void @callee(ptr noundef byval(%struct.a) align 8 @g_var)
diff --git a/llvm/test/Transforms/Inline/inline-tail.ll b/llvm/test/Transforms/Inline/inline-tail.ll
index 0bfd056..b2bf3bb 100644
--- a/llvm/test/Transforms/Inline/inline-tail.ll
+++ b/llvm/test/Transforms/Inline/inline-tail.ll
@@ -64,7 +64,7 @@ define void @test_byval_a(ptr byval(i32) %p) {
; CHECK-LABEL: define void @test_byval_a
; CHECK-SAME: (ptr byval(i32) [[P:%.*]]) {
; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[P1]], ptr [[P]], i64 4, i1 false)
; CHECK-NEXT: musttail call void @test_byval_c(ptr byval(i32) [[P1]])
; CHECK-NEXT: ret void
@@ -87,7 +87,7 @@ define void @test_dynalloca_a(ptr byval(i32) %p, i32 %n) {
; CHECK-SAME: (ptr byval(i32) [[P:%.*]], i32 [[N:%.*]]) {
; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0()
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[P1]], ptr [[P]], i64 4, i1 false)
; CHECK-NEXT: [[BUF_I:%.*]] = alloca i8, i32 [[N]], align 1
; CHECK-NEXT: call void @escape(ptr [[BUF_I]])
diff --git a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
index 4e13ff4..4ac4675 100644
--- a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
+++ b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
@@ -312,9 +312,9 @@ define void @caller_multiple(i32 %a, i32 %b) #1 {
; CHECK-NEXT: store i32 [[INC]], ptr [[I]], align 4
; CHECK-NEXT: br label %[[FOR_COND1]]
; CHECK: [[FOR_END4]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]])
; CHECK-NEXT: store i32 0, ptr [[A_ADDR_I]], align 4
; CHECK-NEXT: store i32 5, ptr [[B_ADDR_I]], align 4
; CHECK-NEXT: br label %[[FOR_COND_I:.*]]
@@ -526,9 +526,9 @@ define void @caller_nested(i32 %a, i32 %b) #1 {
; CHECK-NEXT: store i32 [[INC14]], ptr [[I9]], align 4
; CHECK-NEXT: br label %[[FOR_COND10]]
; CHECK: [[FOR_END15]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]])
; CHECK-NEXT: store i32 0, ptr [[A_ADDR_I]], align 4
; CHECK-NEXT: store i32 5, ptr [[B_ADDR_I]], align 4
; CHECK-NEXT: br label %[[FOR_COND_I:.*]]
diff --git a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
index 7438ef3..074550b 100644
--- a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
+++ b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
@@ -18,9 +18,9 @@ define void @helper() {
define void @test() {
; CHECK-LABEL: define void @test() {
; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @use(ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: ret void
;
call void @helper()
diff --git a/llvm/test/Transforms/Inline/lifetime.ll b/llvm/test/Transforms/Inline/lifetime.ll
index 3ef5019..06b911d 100644
--- a/llvm/test/Transforms/Inline/lifetime.ll
+++ b/llvm/test/Transforms/Inline/lifetime.ll
@@ -2,21 +2,21 @@
; RUN: opt -passes=inline -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
define void @helper_both_markers() {
; CHECK-LABEL: define void @helper_both_markers() {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
; Size in llvm.lifetime.start / llvm.lifetime.end differs from
; allocation size. We should use the former.
- call void @llvm.lifetime.start.p0(i64 2, ptr %a)
- call void @llvm.lifetime.end.p0(i64 2, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -24,10 +24,10 @@ define void @test_both_markers() {
; CHECK-LABEL: define void @test_both_markers() {
; CHECK-NEXT: [[A_I1:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A_I1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]])
; CHECK-NEXT: ret void
;
call void @helper_both_markers()
@@ -54,12 +54,12 @@ define void @test_no_marker() {
; CHECK-LABEL: define void @test_no_marker() {
; CHECK-NEXT: [[A_I1:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @use(ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]])
; CHECK-NEXT: call void @use(ptr [[A_I1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]])
; CHECK-NEXT: ret void
;
call void @helper_no_markers()
@@ -70,13 +70,13 @@ define void @test_no_marker() {
define void @helper_two_casts() {
; CHECK-LABEL: define void @helper_two_casts() {
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -84,10 +84,10 @@ define void @test_two_casts() {
; CHECK-LABEL: define void @test_two_casts() {
; CHECK-NEXT: [[A_I1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]])
; CHECK-NEXT: ret void
;
call void @helper_two_casts()
@@ -109,9 +109,9 @@ define void @helper_arrays_alloca() {
define void @test_arrays_alloca() {
; CHECK-LABEL: define void @test_arrays_alloca() {
; CHECK-NEXT: [[A_I:%.*]] = alloca [10 x i32], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @use(ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: ret void
;
call void @helper_arrays_alloca()
diff --git a/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll b/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll
index 531801d..2bded9c 100644
--- a/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll
+++ b/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll
@@ -16,12 +16,12 @@
define i32 @caller_no_gc() {
; CHECK-LABEL: define i32 @caller_no_gc() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: ret i32 [[LENGTH_I]]
;
%x = call i32 @callee_with_gc()
@@ -32,12 +32,12 @@ define i32 @caller_no_gc() {
define i32 @caller_same_gc() gc "example" {
; CHECK-LABEL: define i32 @caller_same_gc() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: ret i32 [[LENGTH_I]]
;
%x = call i32 @callee_with_gc()
@@ -97,12 +97,12 @@ define i32 @callee_with_other_gc() gc "other-example" {
define i32 @caller_inline_first_caller() {
; CHECK-LABEL: define i32 @caller_inline_first_caller() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: [[Y:%.*]] = call i32 @callee_with_other_gc()
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LENGTH_I]], [[Y]]
; CHECK-NEXT: ret i32 [[ADD]]
@@ -118,12 +118,12 @@ define i32 @caller_inline_first_caller() {
define i32 @caller_inline_second_caller() gc "example" {
; CHECK-LABEL: define i32 @caller_inline_second_caller() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: [[Y:%.*]] = call i32 @callee_with_other_gc()
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LENGTH_I]], [[Y]]
; CHECK-NEXT: ret i32 [[ADD]]
diff --git a/llvm/test/Transforms/Inline/noalias-calls-always.ll b/llvm/test/Transforms/Inline/noalias-calls-always.ll
index a80cd12..18a65b9 100644
--- a/llvm/test/Transforms/Inline/noalias-calls-always.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls-always.ll
@@ -33,13 +33,13 @@ define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b)
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias [[META3]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C:%.*]], i64 16, i1 false), !noalias [[META0]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META5:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META5]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
@@ -74,13 +74,13 @@ define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias [[META9]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C:%.*]], i64 16, i1 false), !noalias [[META6]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META11:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META11]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/Inline/noalias-calls.ll b/llvm/test/Transforms/Inline/noalias-calls.ll
index fdbad60..4673dae 100644
--- a/llvm/test/Transforms/Inline/noalias-calls.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls.ll
@@ -36,13 +36,13 @@ define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b)
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias [[META3]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META5:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META5]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
@@ -79,13 +79,13 @@ define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias [[META9]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META11:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META11]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll b/llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll
new file mode 100644
index 0000000..e778d92
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/2025-08-06-shufflevector-bitcast-vector-of-pointers.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+; Make sure that we don't crash when optimizing shufflevector of <N x ptr> with <1 x i32> mask followed by bitcast of <1 x ptr> to ptr
+
+define ptr @test(<3 x ptr> %vptr) {
+; CHECK-LABEL: define ptr @test(
+; CHECK-SAME: <3 x ptr> [[VPTR:%.*]]) {
+; CHECK-NEXT: [[SV_EXTRACT:%.*]] = extractelement <3 x ptr> [[VPTR]], i64 0
+; CHECK-NEXT: ret ptr [[SV_EXTRACT]]
+;
+ %SV = shufflevector <3 x ptr> %vptr, <3 x ptr> zeroinitializer, <1 x i32> zeroinitializer
+ %BC = bitcast <1 x ptr> %SV to ptr
+ ret ptr %BC
+}
diff --git a/llvm/test/Transforms/InstCombine/assume_inevitable.ll b/llvm/test/Transforms/InstCombine/assume_inevitable.ll
index 2643c9b..5f27ff1 100644
--- a/llvm/test/Transforms/InstCombine/assume_inevitable.ll
+++ b/llvm/test/Transforms/InstCombine/assume_inevitable.ll
@@ -35,10 +35,10 @@ entry:
%dummy_eq = icmp ugt i32 %loadres, 42
tail call void @llvm.assume(i1 %dummy_eq)
- call void @llvm.lifetime.start.p0(i64 1, ptr %dummy)
+ call void @llvm.lifetime.start.p0(ptr %dummy)
%i = call ptr @llvm.invariant.start.p0(i64 1, ptr %dummy)
call void @llvm.invariant.end.p0(ptr %i, i64 1, ptr %dummy)
- call void @llvm.lifetime.end.p0(i64 1, ptr %dummy)
+ call void @llvm.lifetime.end.p0(ptr %dummy)
%m_a = call ptr @llvm.ptr.annotation.p0(ptr %m, ptr @.str, ptr @.str1, i32 2, ptr null)
%objsz = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false)
@@ -61,8 +61,8 @@ declare i64 @llvm.objectsize.i64.p0(ptr, i1)
declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @llvm.invariant.start.p0(i64, ptr nocapture)
declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture)
diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
index fe8b321..93c4ae6 100644
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
@@ -15,16 +15,16 @@ entry:
define i32 @objsize2_custom_idx() #0 {
entry:
%var = alloca %struct.V, align 4
- call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.start.p0(ptr %var) #3
%arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1
%0 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false, i1 false, i1 false)
%conv = trunc i64 %0 to i32
- call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.end.p0(ptr %var) #3
ret i32 %conv
; CHECK: ret i32 27
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare ptr @malloc(i64)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll
index b8919a7..051466f 100644
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll
@@ -25,24 +25,24 @@ define i32 @foo1(i32 %N) {
entry:
%Big = alloca [20 x i8], align 16
%Small = alloca [10 x i8], align 1
- call void @llvm.lifetime.start.p0(i64 20, ptr %Big)
- call void @llvm.lifetime.start.p0(i64 10, ptr %Small)
+ call void @llvm.lifetime.start.p0(ptr %Big)
+ call void @llvm.lifetime.start.p0(ptr %Small)
%tobool = icmp ne i32 %N, 0
%add.ptr = getelementptr inbounds [20 x i8], ptr %Big, i64 0, i64 10
%cond = select i1 %tobool, ptr %add.ptr, ptr %Small
%0 = call i64 @llvm.objectsize.i64.p0(ptr %cond, i1 false)
%conv = trunc i64 %0 to i32
- call void @llvm.lifetime.end.p0(i64 10, ptr %Small)
- call void @llvm.lifetime.end.p0(i64 20, ptr %Big)
+ call void @llvm.lifetime.end.p0(ptr %Small)
+ call void @llvm.lifetime.end.p0(ptr %Big)
ret i32 %conv
; CHECK: ret i32 10
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i64 @llvm.objectsize.i64.p0(ptr, i1)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @foo() {
entry:
diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll
index 533451f..3a7b760 100644
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll
@@ -18,11 +18,11 @@ define i32 @foo() #0 {
; CHECK-NEXT: ret i32 27
;
%var = alloca %struct.V, align 4
- call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.start.p0(ptr %var) #3
%arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1
%t1 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false)
%conv = trunc i64 %t1 to i32
- call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.end.p0(ptr %var) #3
ret i32 %conv
}
@@ -63,9 +63,9 @@ define ptr @minimal_invariant_start_use(i8 %x) {
ret ptr %i
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i64 @llvm.objectsize.i64.p0(ptr, i1) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #0
declare ptr @llvm.invariant.start.p0(i64 immarg, ptr nocapture) #0
declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr nocapture) #0
diff --git a/llvm/test/Transforms/InstCombine/compare-alloca.ll b/llvm/test/Transforms/InstCombine/compare-alloca.ll
index a27cd70..55d92b7 100644
--- a/llvm/test/Transforms/InstCombine/compare-alloca.ll
+++ b/llvm/test/Transforms/InstCombine/compare-alloca.ll
@@ -86,18 +86,18 @@ define i1 @alloca_argument_compare_escaped_through_store(ptr %arg, ptr %ptr) {
ret i1 %cmp
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i1 @alloca_argument_compare_benign_instrs(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare_benign_instrs(
; CHECK-NEXT: ret i1 false
;
%alloc = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc)
+ call void @llvm.lifetime.start.p0(ptr %alloc)
%cmp = icmp eq ptr %arg, %alloc
%x = load i8, ptr %arg
store i8 %x, ptr %alloc
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc)
+ call void @llvm.lifetime.end.p0(ptr %alloc)
ret i1 %cmp
}
diff --git a/llvm/test/Transforms/InstCombine/deadcode.ll b/llvm/test/Transforms/InstCombine/deadcode.ll
index f3e1ba6..4dcdbb9 100644
--- a/llvm/test/Transforms/InstCombine/deadcode.ll
+++ b/llvm/test/Transforms/InstCombine/deadcode.ll
@@ -22,13 +22,13 @@ define ptr @test2(i32 %width) {
declare ptr @llvm.stacksave()
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
define void @test3() {
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
index 422e179..597bd2c 100644
--- a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
@@ -2,8 +2,8 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture, ptr nocapture)
define void @bar(i1 %flag) #0 !dbg !4 {
@@ -20,11 +20,11 @@ define void @bar(i1 %flag) #0 !dbg !4 {
; CHECK-NEXT: #dbg_declare(ptr [[TEXT]], [[META16:![0-9]+]], !DIExpression(), [[META24:![0-9]+]])
; CHECK-NEXT: br label [[FIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUFF]])
; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEXT]])
; CHECK-NEXT: br label [[FIN]]
; CHECK: fin:
; CHECK-NEXT: ret void
@@ -35,31 +35,31 @@ entry:
br i1 %flag, label %if, label %else
if:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
br label %bb3
bb3:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
else:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
call void @foo(ptr %buff, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
fin:
diff --git a/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll b/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll
index e379b32..fd45fe2 100644
--- a/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture)
define void @asan() sanitize_address {
@@ -9,8 +9,8 @@ entry:
; CHECK-LABEL: @asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -24,8 +24,8 @@ entry:
; CHECK-LABEL: @hwasan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -39,8 +39,8 @@ entry:
; CHECK-LABEL: @msan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -54,8 +54,8 @@ entry:
; CHECK-LABEL: @no_asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK-NO: call void @llvm.lifetime
call void @foo(ptr %text) ; Keep alloca alive
diff --git a/llvm/test/Transforms/InstCombine/lifetime.ll b/llvm/test/Transforms/InstCombine/lifetime.ll
index b94c9694..6313dba 100644
--- a/llvm/test/Transforms/InstCombine/lifetime.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime.ll
@@ -2,8 +2,8 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture, ptr nocapture)
define void @bar(i1 %flag) !dbg !4 {
@@ -20,11 +20,11 @@ define void @bar(i1 %flag) !dbg !4 {
; CHECK-NEXT: #dbg_declare(ptr [[TEXT]], [[META16:![0-9]+]], !DIExpression(), [[META24:![0-9]+]])
; CHECK-NEXT: br label [[FIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUFF]])
; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEXT]])
; CHECK-NEXT: br label [[FIN]]
; CHECK: fin:
; CHECK-NEXT: ret void
@@ -35,31 +35,31 @@ entry:
br i1 %flag, label %if, label %else
if:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
br label %bb3
bb3:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
else:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
call void @foo(ptr %buff, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
fin:
diff --git a/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll b/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll
index 0072153..7aa5eed 100644
--- a/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll
+++ b/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll
@@ -26,7 +26,7 @@ entry:
%retval = alloca i32, align 4
%d1 = alloca i32, align 4
store i32 0, ptr %retval, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %d1) #4, !dbg !17
+ call void @llvm.lifetime.start.p0(ptr %d1) #4, !dbg !17
; CHECK: #dbg_value(i32 42, [[METADATA_IDX1:![0-9]+]], !DIExpression(),
; CHECK-NEXT: store
call void @llvm.dbg.declare(metadata ptr %d1, metadata !16, metadata !DIExpression()), !dbg !17
@@ -48,11 +48,11 @@ while.body: ; preds = %while.cond
br label %while.cond, !dbg !22, !llvm.loop !24
while.end: ; preds = %while.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %d1) #4, !dbg !25
+ call void @llvm.lifetime.end.p0(ptr %d1) #4, !dbg !25
ret i32 0, !dbg !26
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.dbg.declare(metadata, metadata, metadata)
@@ -64,7 +64,7 @@ define internal void @_ZL6escapeRi(ptr dereferenceable(4) %c) #3 !dbg !34 {
ret void
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!8, !9, !10}
diff --git a/llvm/test/Transforms/InstCombine/malloc-free.ll b/llvm/test/Transforms/InstCombine/malloc-free.ll
index d8a1c07..5cff5d6 100644
--- a/llvm/test/Transforms/InstCombine/malloc-free.ll
+++ b/llvm/test/Transforms/InstCombine/malloc-free.ll
@@ -97,8 +97,8 @@ define i1 @foo() {
ret i1 %z
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64(ptr, i1)
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
index 9c9ba83..64091a9 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -178,14 +178,14 @@ define void @test4() {
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
define void @test5() {
; CHECK-LABEL: @test5(
; CHECK-NEXT: call void @baz(ptr nonnull byval(i8) @G)
; CHECK-NEXT: ret void
;
%A = alloca %T
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @G, i64 124, i1 false)
call void @baz(ptr byval(i8) %A)
ret void
@@ -308,7 +308,7 @@ define float @test11(i64 %i) {
entry:
%a = alloca [4 x float], align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 @I, i64 16, i1 false)
%g = getelementptr inbounds [4 x float], ptr %a, i64 0, i64 %i
%r = load float, ptr %g, align 4
@@ -320,7 +320,7 @@ define float @test11_volatile(i64 %i) {
; CHECK-LABEL: @test11_volatile(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca [4 x float], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr addrspace(1) align 4 @I, i64 16, i1 true)
; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds [4 x float], ptr [[A]], i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[R:%.*]] = load float, ptr [[G]], align 4
@@ -329,7 +329,7 @@ define float @test11_volatile(i64 %i) {
entry:
%a = alloca [4 x float], align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 @I, i64 16, i1 true)
%g = getelementptr inbounds [4 x float], ptr %a, i64 0, i64 %i
%r = load float, ptr %g, align 4
diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
index 86e586e..a4e247e 100644
--- a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
@@ -236,12 +236,11 @@ define float @simple_recurrence_intrinsic_maximumnum(i32 %n, float %a, float %b)
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximumnum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call nnan float @llvm.maximumnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -265,12 +264,11 @@ define float @simple_recurrence_intrinsic_minimumnum(i32 %n, float %a, float %b)
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimumnum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call nnan float @llvm.minimumnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
@@ -296,7 +294,7 @@ define i8 @simple_recurrence_intrinsic_multiuse_phi(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
; CHECK-NEXT: call void @use(i8 [[UMAX_ACC]])
-; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]])
+; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
index ccb9601..bd43daa 100644
--- a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
+++ b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
@@ -49,12 +49,12 @@ define i32 @test() {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[VAR1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR]])
; CHECK-NEXT: [[VAR4:%.*]] = icmp eq i32 [[VAR3]], 0
; CHECK-NEXT: br i1 [[VAR4]], label [[BB5:%.*]], label [[BB14:%.*]]
; CHECK: bb5:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR1]])
; CHECK-NEXT: [[VAR8:%.*]] = load i32, ptr [[VAR]], align 4
; CHECK-NEXT: [[VAR9:%.*]] = icmp eq i32 [[VAR8]], 0
; CHECK-NEXT: [[VAR7:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR1]])
@@ -66,23 +66,23 @@ define i32 @test() {
; CHECK-NEXT: br label [[BB12]]
; CHECK: bb12:
; CHECK-NEXT: [[VAR13:%.*]] = phi i32 [ [[VAR11]], [[BB10]] ], [ [[VAR7]], [[BB_CRIT_EDGE]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR1]])
; CHECK-NEXT: br label [[BB14]]
; CHECK: bb14:
; CHECK-NEXT: [[VAR15:%.*]] = phi i32 [ [[VAR13]], [[BB12]] ], [ 0, [[BB:%.*]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[VAR15]]
;
bb:
%var = alloca i32, align 4
%var1 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %var) #4
%var3 = call i32 @foo(ptr nonnull writeonly %var)
%var4 = icmp eq i32 %var3, 0
br i1 %var4, label %bb5, label %bb14
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var1) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %var1) #4
%var8 = load i32, ptr %var, align 4
%var9 = icmp eq i32 %var8, 0
%var7 = call i32 @foo(ptr nonnull writeonly %var1)
@@ -97,12 +97,12 @@ bb_crit_edge:
bb12: ; preds = %bb10, %bb5
%var13 = phi i32 [ %var11, %bb10 ], [ %var7, %bb_crit_edge ]
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var1) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %var1) #4
br label %bb14
bb14: ; preds = %bb12, %bb
%var15 = phi i32 [ %var13, %bb12 ], [ 0, %bb ]
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var)
+ call void @llvm.lifetime.end.p0(ptr nonnull %var)
ret i32 %var15
}
@@ -325,18 +325,18 @@ define i32 @sink_lifetime1(i1 %c) {
; CHECK-LABEL: @sink_lifetime1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
@@ -344,7 +344,7 @@ early_return:
ret i32 0
use_block:
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
ret i32 %var3
}
@@ -352,25 +352,25 @@ define i32 @sink_lifetime2(i1 %c) {
; CHECK-LABEL: @sink_lifetime2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[MERGE:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: merge:
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAR3]], [[USE_BLOCK]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[RET]]
; CHECK: use_block:
; CHECK-NEXT: br label [[MERGE]]
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %merge, label %use_block
merge:
%ret = phi i32 [0, %entry], [%var3, %use_block]
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
ret i32 %ret
use_block:
@@ -390,8 +390,8 @@ define i32 @sink_lifetime3(i1 %c) {
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
; If unknown accesses %var, that's UB
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
@@ -407,9 +407,9 @@ define i32 @sink_lifetime4a(i1 %c) {
; CHECK-LABEL: @sink_lifetime4a(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -418,9 +418,9 @@ define i32 @sink_lifetime4a(i1 %c) {
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
br i1 %c, label %early_return, label %use_block
early_return:
@@ -436,9 +436,9 @@ define i32 @sink_lifetime4b(i1 %c) {
; CHECK-LABEL: @sink_lifetime4b(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull writeonly [[VAR]]) #[[ATTR1]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -447,9 +447,9 @@ define i32 @sink_lifetime4b(i1 %c) {
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr writeonly %var) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
br i1 %c, label %early_return, label %use_block
early_return:
@@ -486,6 +486,6 @@ use_block:
declare i32 @bar()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
index 128edff..758071a 100644
--- a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
+++ b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @unknown()
declare void @f(ptr)
@@ -25,9 +25,9 @@ define void @test_lifetime() {
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -40,11 +40,11 @@ define void @test_lifetime2() {
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @unknown()
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
call void @unknown()
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll b/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll
index ab744c62..9c64bfb 100644
--- a/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll
+++ b/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll
@@ -12,7 +12,7 @@ define void @pr150338(ptr %arg) {
%a = alloca i32
store ptr %a, ptr %arg
store i1 true, ptr poison
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -33,7 +33,7 @@ entry:
bb1:
%phi1 = phi ptr [ null, %entry ], [ %phi2, %bb2 ]
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
br label %bb2
bb2:
@@ -45,7 +45,7 @@ define void @lifetime_poison() {
; CHECK-LABEL: define void @lifetime_poison() {
; CHECK-NEXT: ret void
;
- call void @llvm.lifetime.start.p0(i64 4, ptr poison)
- call void @llvm.lifetime.end.p0(i64 4, ptr poison)
+ call void @llvm.lifetime.start.p0(ptr poison)
+ call void @llvm.lifetime.end.p0(ptr poison)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/vararg.ll b/llvm/test/Transforms/InstCombine/vararg.ll
index eb24256..93d230d 100644
--- a/llvm/test/Transforms/InstCombine/vararg.ll
+++ b/llvm/test/Transforms/InstCombine/vararg.ll
@@ -12,14 +12,14 @@ define void @func(ptr nocapture readnone %fmt, ...) {
entry:
%va0 = alloca %struct.__va_list, align 8
%va1 = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %va0)
+ call void @llvm.lifetime.start.p0(ptr %va0)
call void @llvm.va_start(ptr %va0)
- call void @llvm.lifetime.start.p0(i64 32, ptr %va1)
+ call void @llvm.lifetime.start.p0(ptr %va1)
call void @llvm.va_copy(ptr %va1, ptr %va0)
call void @llvm.va_end(ptr %va1)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va1)
+ call void @llvm.lifetime.end.p0(ptr %va1)
call void @llvm.va_end(ptr %va0)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va0)
+ call void @llvm.lifetime.end.p0(ptr %va0)
ret void
}
@@ -31,28 +31,28 @@ define void @func_destroy_copy_src(ptr nocapture readnone %fmt, ...) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VA0:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
; CHECK-NEXT: [[VA1:%.*]] = alloca [[STRUCT___VA_LIST]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VA0]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VA1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VA0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VA1]])
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VA0]])
; CHECK-NEXT: call void @llvm.va_copy.p0(ptr nonnull [[VA1]], ptr nonnull [[VA0]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA0]])
; CHECK-NEXT: call void @callee(ptr nonnull [[VA1]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VA1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VA0]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VA1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VA0]])
; CHECK-NEXT: ret void
;
entry:
%va0 = alloca %struct.__va_list, align 8
%va1 = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %va0)
- call void @llvm.lifetime.start.p0(i64 32, ptr %va1)
+ call void @llvm.lifetime.start.p0(ptr %va0)
+ call void @llvm.lifetime.start.p0(ptr %va1)
call void @llvm.va_start(ptr %va0)
call void @llvm.va_copy(ptr %va1, ptr %va0)
call void @llvm.va_end(ptr %va0)
call void @callee(ptr %va1)
call void @llvm.va_end(ptr %va1)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va1)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va0)
+ call void @llvm.lifetime.end.p0(ptr %va1)
+ call void @llvm.lifetime.end.p0(ptr %va0)
ret void
}
diff --git a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll
index ab4448b4..820fff4 100644
--- a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll
+++ b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll
@@ -213,7 +213,7 @@ define double @fmul_nnan_ninf_nneg_n0.0_commute(i127 %x) {
define float @fmul_ninf_nnan_mul_zero_nsz(float nofpclass(inf nan) %f) {
; CHECK-LABEL: @fmul_ninf_nnan_mul_zero_nsz(
-; CHECK-NEXT: ret float 0.000000e+00
+; CHECK-NEXT: ret float 0.000000e+00
;
%r = fmul nsz float %f, 0.0
ret float %r
@@ -221,7 +221,7 @@ define float @fmul_ninf_nnan_mul_zero_nsz(float nofpclass(inf nan) %f) {
define float @fmul_ninf_nnan_mul_nzero_nsz(float nofpclass(inf nan) %f) {
; CHECK-LABEL: @fmul_ninf_nnan_mul_nzero_nsz(
-; CHECK-NEXT: ret float 0.000000e+00
+; CHECK-NEXT: ret float 0.000000e+00
;
%r = fmul nsz float %f, -0.0
ret float %r
@@ -1255,3 +1255,20 @@ define i1 @fptrunc_round_unknown_positive(double %unknown) {
%cmp = fcmp nnan oge float %op, 0.0
ret i1 %cmp
}
+
+define half @fabs_select_fabs(half noundef %x) {
+; CHECK-LABEL: @fabs_select_fabs(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ABS1:%.*]] = call half @llvm.fabs.f16(half [[X:%.*]])
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt half [[ABS1]], 0xH0000
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], half [[X]], half 0xH0000
+; CHECK-NEXT: [[ABS2:%.*]] = call half @llvm.fabs.f16(half [[SEL]])
+; CHECK-NEXT: ret half [[ABS2]]
+;
+entry:
+ %abs1 = call half @llvm.fabs.f16(half %x)
+ %cmp = fcmp ogt half %abs1, 0xH0000
+ %sel = select i1 %cmp, half %x, half 0xH0000
+ %abs2 = call half @llvm.fabs.f16(half %sel)
+ ret half %abs2
+}
diff --git a/llvm/test/Transforms/LICM/dropped-tbaa.ll b/llvm/test/Transforms/LICM/dropped-tbaa.ll
index 11083b4..92839f1 100644
--- a/llvm/test/Transforms/LICM/dropped-tbaa.ll
+++ b/llvm/test/Transforms/LICM/dropped-tbaa.ll
@@ -24,7 +24,7 @@ define void @foo(ptr %data, ptr %addend) #0 {
; CHECK-NEXT: [[CONV_I:%.*]] = sitofp i32 [[TMP2]] to double
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !1
br i1 true, label %for.body.lr.ph, label %for.cond.cleanup
@@ -35,7 +35,7 @@ for.cond.for.cond.cleanup_crit_edge: ; preds = %for.inc
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.for.cond.cleanup_crit_edge, %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.body.lr.ph, %for.inc
@@ -67,8 +67,8 @@ for.end: ; preds = %for.cond.cleanup
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
attributes #0 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll b/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll
index 61f0eb19..0aa56d2 100644
--- a/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll
+++ b/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll
@@ -17,7 +17,7 @@ declare i16 @e(i32)
define i16 @g() !dbg !13 {
entry:
%l_284 = alloca [2 x [3 x [6 x i32]]], align 16
- call void @llvm.lifetime.start.p0(i64 144, ptr nonnull %l_284), !dbg !24
+ call void @llvm.lifetime.start.p0(ptr nonnull %l_284), !dbg !24
call void @llvm.dbg.declare(metadata ptr %l_284, metadata !17, metadata !DIExpression()), !dbg !25
%0 = load i16, ptr @a, align 2, !dbg !26, !tbaa !29
%cmp11 = icmp sgt i16 %0, -1, !dbg !33
@@ -51,15 +51,15 @@ for.body.cleanup_crit_edge: ; preds = %for.body
br label %cleanup, !dbg !38
cleanup: ; preds = %for.body.cleanup_crit_edge, %for.cond.cleanup_crit_edge, %entry
- call void @llvm.lifetime.end.p0(i64 144, ptr nonnull %l_284), !dbg !51
+ call void @llvm.lifetime.end.p0(ptr nonnull %l_284), !dbg !51
ret i16 1, !dbg !51
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
declare void @llvm.dbg.value(metadata, metadata, metadata) #0
diff --git a/llvm/test/Transforms/LICM/licm-coroutine.ll b/llvm/test/Transforms/LICM/licm-coroutine.ll
new file mode 100644
index 0000000..a4765ac
--- /dev/null
+++ b/llvm/test/Transforms/LICM/licm-coroutine.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=licm -S | FileCheck %s
+
+; %fca.0 and %fca.1 should not be hoisted out of the loop because the ramp
+; function and resume function have different stack frames, so %pointer1 and
+; %pointer2 have different values before and after @llvm.coro.suspend.
+
+define ptr @f(i32 %n) presplitcoroutine {
+; CHECK-LABEL: define ptr @f(
+; CHECK-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[POINTER1:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[POINTER2:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+; CHECK-NEXT: [[SIZE:%.*]] = call i32 @llvm.coro.size.i32()
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i32 [[SIZE]])
+; CHECK-NEXT: [[HDL:%.*]] = call noalias ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[N_VAL:%.*]] = phi i32 [ [[N]], %[[ENTRY]] ], [ [[INC:%.*]], %[[RESUME:.*]] ]
+; CHECK-NEXT: [[INC]] = add nsw i32 [[N_VAL]], 1
+; CHECK-NEXT: call void @print(i32 [[N_VAL]])
+; CHECK-NEXT: [[TMP0:%.*]] = call i8 @llvm.coro.suspend(token none, i1 false)
+; CHECK-NEXT: switch i8 [[TMP0]], label %[[SUSPEND_LOOPEXIT:.*]] [
+; CHECK-NEXT: i8 0, label %[[RESUME]]
+; CHECK-NEXT: i8 1, label %[[CLEANUP:.*]]
+; CHECK-NEXT: ]
+; CHECK: [[RESUME]]:
+; CHECK-NEXT: [[FCA_0:%.*]] = insertvalue [2 x ptr] poison, ptr [[POINTER1]], 0
+; CHECK-NEXT: [[FCA_1:%.*]] = insertvalue [2 x ptr] [[FCA_0]], ptr [[POINTER2]], 1
+; CHECK-NEXT: call void @foo([2 x ptr] [[FCA_1]])
+; CHECK-NEXT: br label %[[LOOP]]
+; CHECK: [[CLEANUP]]:
+; CHECK-NEXT: [[MEM:%.*]] = call ptr @llvm.coro.free(token [[ID]], ptr [[HDL]])
+; CHECK-NEXT: call void @free(ptr [[MEM]])
+; CHECK-NEXT: br label %[[SUSPEND:.*]]
+; CHECK: [[SUSPEND_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[SUSPEND]]
+; CHECK: [[SUSPEND]]:
+; CHECK-NEXT: [[UNUSED:%.*]] = call i1 @llvm.coro.end(ptr [[HDL]], i1 false, token none)
+; CHECK-NEXT: ret ptr [[HDL]]
+;
+entry:
+ %pointer1 = alloca ptr
+ %pointer2 = alloca ptr
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %size = call i32 @llvm.coro.size.i32()
+ %alloc = call ptr @malloc(i32 %size)
+ %hdl = call noalias ptr @llvm.coro.begin(token %id, ptr %alloc)
+ br label %loop
+
+loop:
+ %n.val = phi i32 [ %n, %entry ], [ %inc, %resume ]
+ %inc = add nsw i32 %n.val, 1
+ call void @print(i32 %n.val)
+ %0 = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %0, label %suspend [i8 0, label %resume
+ i8 1, label %cleanup]
+
+resume:
+ %fca.0 = insertvalue [2 x ptr] poison, ptr %pointer1, 0
+ %fca.1 = insertvalue [2 x ptr] %fca.0, ptr %pointer2, 1
+ call void @foo([2 x ptr] %fca.1)
+ br label %loop
+
+cleanup:
+ %mem = call ptr @llvm.coro.free(token %id, ptr %hdl)
+ call void @free(ptr %mem)
+ br label %suspend
+suspend:
+ %unused = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none)
+ ret ptr %hdl
+}
+
+declare void @free(ptr)
+declare ptr @malloc(i32)
+declare void @print(i32)
+declare void @foo([2 x ptr])
diff --git a/llvm/test/Transforms/LICM/loopsink-pr38462.ll b/llvm/test/Transforms/LICM/loopsink-pr38462.ll
index 51eee1f..8b2ff10 100644
--- a/llvm/test/Transforms/LICM/loopsink-pr38462.ll
+++ b/llvm/test/Transforms/LICM/loopsink-pr38462.ll
@@ -37,7 +37,7 @@ __except:
catchret from %1 to label %__except3
__except3:
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
%call.i = call zeroext i1 @g(ptr nonnull %s)
br i1 %call.i, label %if.then.i, label %exit
@@ -46,7 +46,7 @@ if.then.i:
br label %exit
exit:
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
br label %__try.cont
__try.cont:
@@ -58,8 +58,8 @@ __try.cont:
declare i32 @__C_specific_handler(...)
declare i32 @f()
declare zeroext i1 @g(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!1 = !{!"function_entry_count", i64 1}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll
index c7a0de22..970643a 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll
@@ -6,7 +6,7 @@ define void @test(ptr %p, i64 %idx) {
; CHECK-SAME: ptr [[P:%.*]], i64 [[IDX:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [4 x [4 x i32]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IDX]], 6
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 48
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]]
@@ -31,12 +31,12 @@ define void @test(ptr %p, i64 %idx) {
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]])
; CHECK-NEXT: ret void
;
entry:
%alloca = alloca [4 x [4 x i32]], align 16
- call void @llvm.lifetime.start.p0(i64 64, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
br label %loop
loop:
@@ -54,6 +54,6 @@ loop:
br i1 %exitcond.not, label %exit, label %loop
exit:
- call void @llvm.lifetime.end.p0(i64 64, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret void
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll b/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll
index 3ec4fea..f8c5e82 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll
@@ -5,10 +5,10 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone uwtable
define dso_local i32 @foo(i32 %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6) local_unnamed_addr #3 {
@@ -83,31 +83,31 @@ bb:
%tmp16 = alloca [100 x [100 x i32]], align 16
%tmp17 = alloca [100 x [100 x i32]], align 16
%tmp18 = alloca [100 x [100 x i32]], align 16
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp7) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp7) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp7, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp8) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp8) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp8, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp9) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp9) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp9, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp10) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp10) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp10, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp11) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp11) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp11, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp12) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp12) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp12, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp13) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp13) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp13, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp14) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp14) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp14, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp15) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp15) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp15, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp16) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp16) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp16, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp17) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp17) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp17, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp18) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp18) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp18, i8 0, i64 40000, i1 false)
%tmp32 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 3
br label %bb33
@@ -1300,19 +1300,19 @@ bb1051: ; preds = %bb1007
%tmp1063 = sub i32 %tmp1062, %tmp960
%tmp1064 = add i32 %tmp1063, %tmp1004
%tmp1065 = sub i32 %tmp1064, %tmp1048
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp18) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp17) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp16) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp15) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp14) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp13) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp12) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp11) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp10) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp9) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp8) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp7) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp18) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp17) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp16) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp15) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp14) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp13) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp12) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp11) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp10) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp9) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp8) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp7) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #4
ret i32 %tmp1065
}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
index a8d9a0c..aa22252b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
@@ -9,19 +9,13 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 8, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 8)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP8]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP1]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -34,7 +28,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
; CHECK-NEXT: [[TMP11:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP10]]
; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 8 x i64> [[TMP11]] to <vscale x 8 x i8>
; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP14]], ptr [[NEXT_GEP]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 8)
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -92,19 +86,13 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[WIDE_TRIP_COUNT]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP8]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP1]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -117,7 +105,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
; CHECK-NEXT: [[TMP11:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP10]]
; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 8 x i64> [[TMP11]] to <vscale x 8 x i8>
; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP14]], ptr [[NEXT_GEP]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]])
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 4b895ae..6d16339 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -702,12 +702,6 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED: [[VECTOR_PH]]:
; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; PRED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 257, [[TMP2]]
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2
; PRED-NEXT: [[TMP8:%.*]] = sub i64 257, [[TMP7]]
@@ -726,7 +720,7 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED-NEXT: [[TMP13:%.*]] = or <vscale x 2 x i16> [[BROADCAST_SPLAT]], splat (i16 1)
; PRED-NEXT: [[TMP14:%.*]] = uitofp <vscale x 2 x i16> [[TMP13]] to <vscale x 2 x double>
; PRED-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP14]], ptr [[NEXT_GEP]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP10]])
; PRED-NEXT: [[TMP16:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; PRED-NEXT: [[TMP17:%.*]] = extractelement <vscale x 2 x i1> [[TMP16]], i32 0
@@ -1242,9 +1236,6 @@ define void @test_conditional_interleave_group (ptr noalias %src.1, ptr noalias
; PRED-NEXT: [[TMP14:%.*]] = or i1 [[TMP13]], [[TMP12]]
; PRED-NEXT: br i1 [[TMP14]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 7
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; PRED-NEXT: [[TMP15:%.*]] = sub i64 [[TMP0]], 8
; PRED-NEXT: [[TMP16:%.*]] = icmp ugt i64 [[TMP0]], 8
; PRED-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i64 [[TMP15]], i64 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
index 1ad1e426..6ff6bb4 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
@@ -24,8 +24,6 @@ define void @sdiv_feeding_gep(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) {
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
; CHECK-NEXT: [[TMP18:%.*]] = sdiv i64 [[M]], [[CONV6]]
; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP18]], [[CONV61]]
@@ -43,7 +41,7 @@ define void @sdiv_feeding_gep(ptr %dst, i32 %x, i64 %M, i64 %conv6, i64 %N) {
; CHECK-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP34]], i64 [[TMP38]]
; CHECK-NEXT: store <vscale x 2 x double> zeroinitializer, ptr [[TMP34]], align 8
; CHECK-NEXT: store <vscale x 2 x double> zeroinitializer, ptr [[TMP39]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP40]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -110,12 +108,6 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP7]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
@@ -127,7 +119,7 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i
; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 2 x i64> [[TMP15]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP17]]
-; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP9]]
+; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP20]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -149,7 +141,7 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i
; CHECK-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64
; CHECK-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP33]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> zeroinitializer, ptr [[TMP34]], i32 8, <vscale x 2 x i1> [[TMP23]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP14]])
; CHECK-NEXT: [[TMP36:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
@@ -232,12 +224,6 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) {
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP7]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP0]], [[TMP11]]
@@ -249,7 +235,7 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) {
; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 2 x i64> [[TMP15]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP17]]
-; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP9]]
+; CHECK-NEXT: [[TMP20:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP20]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -274,7 +260,7 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) {
; CHECK-NEXT: [[TMP37:%.*]] = ashr i64 [[TMP36]], 32
; CHECK-NEXT: [[TMP38:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP37]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP23]], ptr [[TMP38]], i32 4, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP14]])
; CHECK-NEXT: [[TMP47:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
index ab008e7..6a592ed 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll
@@ -17,16 +17,14 @@ define void @f1(ptr %A) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 4 x i32> splat (i32 1), ptr [[TMP6]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: store <vscale x 4 x i32> splat (i32 1), ptr [[TMP4]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll
index 596a2ed..a8d0b37 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/gather-do-not-vectorize-addressing.ll
@@ -80,8 +80,6 @@ define dso_local double @test(ptr nocapture noundef readonly %data, ptr nocaptur
; SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; SVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SVE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; SVE: vector.body:
; SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -92,7 +90,7 @@ define dso_local double @test(ptr nocapture noundef readonly %data, ptr nocaptur
; SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[DATA:%.*]], <vscale x 2 x i64> [[TMP7]]
; SVE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x double> poison)
; SVE-NEXT: [[TMP9]] = fadd <vscale x 2 x double> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; SVE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; SVE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SVE: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
index 8b354d9..3e417a0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
@@ -28,8 +28,6 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 {
; DEFAULT-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16
; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP10]]
; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 16
; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[X]], i64 0
; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
; DEFAULT-NEXT: [[TMP13:%.*]] = trunc <vscale x 8 x i32> [[BROADCAST_SPLAT]] to <vscale x 8 x i16>
@@ -60,7 +58,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 {
; DEFAULT-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP38]], i64 [[TMP42]]
; DEFAULT-NEXT: store <vscale x 8 x i8> [[TMP36]], ptr [[TMP38]], align 1
; DEFAULT-NEXT: store <vscale x 8 x i8> [[TMP37]], ptr [[TMP43]], align 1
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; DEFAULT-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; DEFAULT-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
@@ -103,12 +101,6 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 {
; PRED: [[VECTOR_PH]]:
; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
-; PRED-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP8]]
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16
; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[X]], i64 0
; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PRED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
@@ -132,7 +124,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 {
; PRED-NEXT: [[TMP23:%.*]] = trunc <vscale x 16 x i16> [[TMP21]] to <vscale x 16 x i8>
; PRED-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]]
; PRED-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP23]], ptr [[TMP26]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]]
; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP15]])
; PRED-NEXT: [[TMP28:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; PRED-NEXT: [[TMP29:%.*]] = extractelement <vscale x 16 x i1> [[TMP28]], i32 0
@@ -270,9 +262,6 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 {
; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]]
; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 1
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 2
; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 2
; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0
@@ -441,9 +430,6 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 {
; PRED-NEXT: [[TMP13:%.*]] = or i1 [[TMP9]], [[TMP12]]
; PRED-NEXT: br i1 [[TMP13]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; PRED-NEXT: [[TMP14:%.*]] = sub i64 [[TMP0]], 4
; PRED-NEXT: [[TMP15:%.*]] = icmp ugt i64 [[TMP0]], 4
; PRED-NEXT: [[TMP16:%.*]] = select i1 [[TMP15]], i64 [[TMP14]], i64 0
@@ -635,9 +621,6 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 {
; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]]
; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 4
; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 4
; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll
index 649be65..1c4b621 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll
@@ -55,8 +55,6 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 17, [[TMP19]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0
@@ -67,7 +65,7 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP26:%.*]] = mul <vscale x 2 x i64> [[TMP25]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[BROADCAST_SPLAT4]], [[TMP26]]
-; CHECK-NEXT: [[TMP27:%.*]] = mul i64 1, [[TMP21]]
+; CHECK-NEXT: [[TMP27:%.*]] = mul i64 1, [[TMP17]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP27]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
@@ -84,7 +82,7 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3
; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP32]], align 1
; CHECK-NEXT: [[TMP34:%.*]] = zext <vscale x 2 x i32> [[TMP31]] to <vscale x 2 x i64>
; CHECK-NEXT: [[TMP35]] = or <vscale x 2 x i64> [[VEC_PHI8]], [[TMP34]]
-; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP21]]
+; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP17]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]]
; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
@@ -193,8 +191,6 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 17, [[TMP19]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP22:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0
@@ -205,7 +201,7 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP38:%.*]] = mul <vscale x 2 x i64> [[TMP25]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[BROADCAST_SPLAT4]], [[TMP38]]
-; CHECK-NEXT: [[TMP39:%.*]] = mul i64 1, [[TMP21]]
+; CHECK-NEXT: [[TMP39:%.*]] = mul i64 1, [[TMP17]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP39]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
@@ -222,7 +218,7 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no
; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP32]], align 1
; CHECK-NEXT: [[TMP34:%.*]] = zext <vscale x 2 x i32> [[TMP31]] to <vscale x 2 x i64>
; CHECK-NEXT: [[TMP35]] = or <vscale x 2 x i64> [[VEC_PHI8]], [[TMP34]]
-; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP21]]
+; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP17]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]]
; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll
index f069347..fa8d17c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll
@@ -145,8 +145,6 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) {
; INTERLEAVE-4-VLA-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; INTERLEAVE-4-VLA-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; INTERLEAVE-4-VLA-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; INTERLEAVE-4-VLA-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; INTERLEAVE-4-VLA-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; INTERLEAVE-4-VLA-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE-4-VLA: vector.body:
; INTERLEAVE-4-VLA-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -172,7 +170,7 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) {
; INTERLEAVE-4-VLA-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[VEC_PHI1]], [[WIDE_LOAD4]]
; INTERLEAVE-4-VLA-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[VEC_PHI2]], [[WIDE_LOAD5]]
; INTERLEAVE-4-VLA-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[VEC_PHI3]], [[WIDE_LOAD6]]
-; INTERLEAVE-4-VLA-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; INTERLEAVE-4-VLA-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; INTERLEAVE-4-VLA-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; INTERLEAVE-4-VLA-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; INTERLEAVE-4-VLA: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
index 5f5d326..a54a404 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
@@ -72,8 +72,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS1-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 16
; CHECK-VS1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]]
; CHECK-VS1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; CHECK-VS1-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VS1-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16
; CHECK-VS1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
; CHECK-VS1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-VS1-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -84,7 +82,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1
; CHECK-VS1-NEXT: [[TMP24:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
; CHECK-VS1-NEXT: store <vscale x 16 x i8> [[TMP24]], ptr [[TMP22]], align 1
-; CHECK-VS1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
+; CHECK-VS1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]]
; CHECK-VS1-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VS1-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-VS1: [[MIDDLE_BLOCK]]:
@@ -103,8 +101,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS1-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 8
; CHECK-VS1-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], [[TMP29]]
; CHECK-VS1-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]]
-; CHECK-VS1-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VS1-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 8
; CHECK-VS1-NEXT: [[TMP39:%.*]] = add i64 [[TMP0]], [[N_VEC3]]
; CHECK-VS1-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
; CHECK-VS1-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT7]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -116,7 +112,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS1-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x i8>, ptr [[TMP33]], align 1
; CHECK-VS1-NEXT: [[TMP35:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]]
; CHECK-VS1-NEXT: store <vscale x 8 x i8> [[TMP35]], ptr [[TMP33]], align 1
-; CHECK-VS1-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP31]]
+; CHECK-VS1-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP29]]
; CHECK-VS1-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]]
; CHECK-VS1-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-VS1: [[VEC_EPILOG_MIDDLE_BLOCK]]:
@@ -176,8 +172,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS2-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 8
; CHECK-VS2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]]
; CHECK-VS2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; CHECK-VS2-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VS2-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 8
; CHECK-VS2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i8> poison, i8 [[CONV]], i64 0
; CHECK-VS2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-VS2-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -188,7 +182,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP22]], align 1
; CHECK-VS2-NEXT: [[TMP24:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
; CHECK-VS2-NEXT: store <vscale x 8 x i8> [[TMP24]], ptr [[TMP22]], align 1
-; CHECK-VS2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
+; CHECK-VS2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]]
; CHECK-VS2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VS2-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-VS2: [[MIDDLE_BLOCK]]:
@@ -207,8 +201,6 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS2-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 4
; CHECK-VS2-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], [[TMP29]]
; CHECK-VS2-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]]
-; CHECK-VS2-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VS2-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 4
; CHECK-VS2-NEXT: [[TMP39:%.*]] = add i64 [[TMP0]], [[N_VEC3]]
; CHECK-VS2-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 4 x i8> poison, i8 [[CONV]], i64 0
; CHECK-VS2-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 4 x i8> [[BROADCAST_SPLATINSERT7]], <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -220,7 +212,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
; CHECK-VS2-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 4 x i8>, ptr [[TMP33]], align 1
; CHECK-VS2-NEXT: [[TMP35:%.*]] = add <vscale x 4 x i8> [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]]
; CHECK-VS2-NEXT: store <vscale x 4 x i8> [[TMP35]], ptr [[TMP33]], align 1
-; CHECK-VS2-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP31]]
+; CHECK-VS2-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP29]]
; CHECK-VS2-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]]
; CHECK-VS2-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-VS2: [[VEC_EPILOG_MIDDLE_BLOCK]]:
@@ -416,12 +408,6 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i32 noundef %
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[TMP1]])
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[CONV]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -434,7 +420,7 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i32 noundef %
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP13]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison)
; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 16 x i8> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP15]], ptr [[TMP13]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 [[TMP1]])
; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 16 x i1> [[TMP16]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index 6029095..32a69f1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -18,8 +18,6 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFNONE: [[VECTOR_BODY]]:
; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -28,7 +26,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i1> splat (i1 true))
; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
@@ -141,8 +139,6 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFNONE: [[VECTOR_BODY]]:
; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -153,7 +149,7 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i64> [[TMP8]], <vscale x 2 x i64> zeroinitializer
; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
; TFNONE-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP10]], align 8
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
@@ -168,10 +164,10 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP12]], 50
; TFNONE-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END]]
; TFNONE: [[IF_THEN]]:
-; TFNONE-NEXT: [[TMP13:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR4]]
+; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR4]]
; TFNONE-NEXT: br label %[[IF_END]]
; TFNONE: [[IF_END]]:
-; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP13]], %[[IF_THEN]] ], [ 0, %[[FOR_BODY]] ]
+; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP9]], %[[IF_THEN]] ], [ 0, %[[FOR_BODY]] ]
; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]]
; TFNONE-NEXT: store i64 [[TMP14]], ptr [[ARRAYIDX1]], align 8
; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -292,8 +288,6 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFNONE: [[VECTOR_BODY]]:
; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -306,7 +300,7 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP9]]
; TFNONE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
; TFNONE-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP11]], align 8
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
@@ -460,8 +454,6 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFNONE: [[VECTOR_BODY]]:
; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -470,7 +462,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]])
; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
@@ -515,8 +507,6 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFFALLBACK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFFALLBACK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFFALLBACK-NEXT: br label %[[VECTOR_BODY:.*]]
; TFFALLBACK: [[VECTOR_BODY]]:
; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -525,7 +515,7 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFFALLBACK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]])
; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFFALLBACK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFFALLBACK-NEXT: br i1 [[TMP9]], label %[[FOR_BODY:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; TFFALLBACK: [[FOR_BODY]]:
@@ -589,8 +579,6 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFNONE: [[VECTOR_BODY]]:
; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -599,7 +587,7 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]])
; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
@@ -737,8 +725,6 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFNONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TFNONE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[M]], i64 0
; TFNONE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -753,7 +739,7 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFNONE-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP10]], align 8
; TFNONE-NEXT: [[TMP11]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[VEC_PHI]], <vscale x 2 x double> [[TMP7]])
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
@@ -926,8 +912,6 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 {
; TFNONE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; TFNONE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; TFNONE-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; TFNONE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFNONE: [[VECTOR_BODY]]:
; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -942,7 +926,7 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 {
; TFNONE-NEXT: [[TMP13:%.*]] = sub i32 [[TMP12]], 1
; TFNONE-NEXT: [[TMP14:%.*]] = extractelement <vscale x 2 x double> [[PREDPHI]], i32 [[TMP13]]
; TFNONE-NEXT: store double [[TMP14]], ptr [[P]], align 8
-; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; TFNONE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFNONE-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; TFNONE: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
index cc36cdb..3ab7171 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
@@ -495,12 +495,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; DEFAULT: [[VECTOR_PH]]:
; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; DEFAULT-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; DEFAULT-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; DEFAULT-NEXT: [[N_RND_UP:%.*]] = add i64 15, [[TMP2]]
-; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; DEFAULT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; DEFAULT-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; DEFAULT-NEXT: [[TMP7:%.*]] = sub i64 15, [[TMP6]]
@@ -516,7 +510,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; DEFAULT-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
; DEFAULT-NEXT: [[TMP11:%.*]] = mul <vscale x 16 x i8> [[TMP10]], splat (i8 1)
; DEFAULT-NEXT: [[INDUCTION:%.*]] = add <vscale x 16 x i8> zeroinitializer, [[TMP11]]
-; DEFAULT-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP4]] to i8
+; DEFAULT-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP1]] to i8
; DEFAULT-NEXT: [[TMP13:%.*]] = mul i8 1, [[TMP12]]
; DEFAULT-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP13]], i64 0
; DEFAULT-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -534,7 +528,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; DEFAULT-NEXT: [[TMP21:%.*]] = add <vscale x 16 x i8> [[TMP18]], [[TMP20]]
; DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]]
; DEFAULT-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; DEFAULT-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; DEFAULT-NEXT: [[TMP24:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i8> [[VEC_IND]], [[DOTSPLAT]]
@@ -570,12 +564,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; OPTSIZE: [[VECTOR_PH]]:
; OPTSIZE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; OPTSIZE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; OPTSIZE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; OPTSIZE-NEXT: [[N_RND_UP:%.*]] = add i64 15, [[TMP2]]
-; OPTSIZE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; OPTSIZE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; OPTSIZE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; OPTSIZE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; OPTSIZE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; OPTSIZE-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; OPTSIZE-NEXT: [[TMP7:%.*]] = sub i64 15, [[TMP6]]
@@ -591,7 +579,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; OPTSIZE-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
; OPTSIZE-NEXT: [[TMP11:%.*]] = mul <vscale x 16 x i8> [[TMP10]], splat (i8 1)
; OPTSIZE-NEXT: [[INDUCTION:%.*]] = add <vscale x 16 x i8> zeroinitializer, [[TMP11]]
-; OPTSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP4]] to i8
+; OPTSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP1]] to i8
; OPTSIZE-NEXT: [[TMP13:%.*]] = mul i8 1, [[TMP12]]
; OPTSIZE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP13]], i64 0
; OPTSIZE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -609,7 +597,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; OPTSIZE-NEXT: [[TMP21:%.*]] = add <vscale x 16 x i8> [[TMP18]], [[TMP20]]
; OPTSIZE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]]
; OPTSIZE-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; OPTSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; OPTSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; OPTSIZE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; OPTSIZE-NEXT: [[TMP24:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; OPTSIZE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i8> [[VEC_IND]], [[DOTSPLAT]]
@@ -645,12 +633,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; MINSIZE: [[VECTOR_PH]]:
; MINSIZE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; MINSIZE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; MINSIZE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; MINSIZE-NEXT: [[N_RND_UP:%.*]] = add i64 15, [[TMP2]]
-; MINSIZE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; MINSIZE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; MINSIZE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; MINSIZE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; MINSIZE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; MINSIZE-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; MINSIZE-NEXT: [[TMP7:%.*]] = sub i64 15, [[TMP6]]
@@ -666,7 +648,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; MINSIZE-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
; MINSIZE-NEXT: [[TMP11:%.*]] = mul <vscale x 16 x i8> [[TMP10]], splat (i8 1)
; MINSIZE-NEXT: [[INDUCTION:%.*]] = add <vscale x 16 x i8> zeroinitializer, [[TMP11]]
-; MINSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP4]] to i8
+; MINSIZE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP1]] to i8
; MINSIZE-NEXT: [[TMP13:%.*]] = mul i8 1, [[TMP12]]
; MINSIZE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP13]], i64 0
; MINSIZE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -684,7 +666,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; MINSIZE-NEXT: [[TMP21:%.*]] = add <vscale x 16 x i8> [[TMP18]], [[TMP20]]
; MINSIZE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]]
; MINSIZE-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; MINSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; MINSIZE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; MINSIZE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; MINSIZE-NEXT: [[TMP24:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; MINSIZE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i8> [[VEC_IND]], [[DOTSPLAT]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
index 2cec6ca..f284afc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
@@ -18,38 +18,36 @@ define void @foo() {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_LATCH:%.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_LATCH]] ]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP9]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x float], ptr @A, i64 0, <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
; CHECK-NEXT: br label [[INNER_LOOP1:%.*]]
; CHECK: inner_loop1:
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP12:%.*]], [[INNER_LOOP1]] ]
-; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x float> [ [[WIDE_MASKED_GATHER]], [[VECTOR_BODY]] ], [ [[TMP11:%.*]], [[INNER_LOOP1]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, <vscale x 4 x i64> [[VEC_PHI]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
-; CHECK-NEXT: [[TMP11]] = fmul <vscale x 4 x float> [[VEC_PHI2]], [[WIDE_MASKED_GATHER3]]
-; CHECK-NEXT: [[TMP12]] = add nuw nsw <vscale x 4 x i64> [[VEC_PHI]], splat (i64 1)
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq <vscale x 4 x i64> [[TMP12]], splat (i64 512)
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0
-; CHECK-NEXT: br i1 [[TMP14]], label [[VECTOR_LATCH]], label [[INNER_LOOP1]]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP10:%.*]], [[INNER_LOOP1]] ]
+; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x float> [ [[WIDE_MASKED_GATHER]], [[VECTOR_BODY]] ], [ [[TMP9:%.*]], [[INNER_LOOP1]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [512 x float], ptr @B, i64 0, <vscale x 4 x i64> [[VEC_PHI]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> poison)
+; CHECK-NEXT: [[TMP9]] = fmul <vscale x 4 x float> [[VEC_PHI2]], [[WIDE_MASKED_GATHER3]]
+; CHECK-NEXT: [[TMP10]] = add nuw nsw <vscale x 4 x i64> [[VEC_PHI]], splat (i64 1)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <vscale x 4 x i64> [[TMP10]], splat (i64 512)
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[TMP11]], i32 0
+; CHECK-NEXT: br i1 [[TMP12]], label [[VECTOR_LATCH]], label [[INNER_LOOP1]]
; CHECK: vector.latch:
-; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 4 x float> [ [[TMP11]], [[INNER_LOOP1]] ]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI4]], <vscale x 4 x ptr> [[TMP9]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[INNER_LOOP1]] ]
+; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI4]], <vscale x 4 x ptr> [[TMP7]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
index 787d63c..7232fe5 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
@@ -61,8 +61,6 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -80,7 +78,7 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -104,8 +102,6 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -124,7 +120,7 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP17]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -217,8 +213,6 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -236,7 +230,7 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -260,8 +254,6 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -279,7 +271,7 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -373,8 +365,6 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -392,7 +382,7 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -416,8 +406,6 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -436,7 +424,7 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -533,8 +521,6 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -552,7 +538,7 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -576,8 +562,6 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -597,7 +581,7 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP18]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP19]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -695,8 +679,6 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -716,7 +698,7 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP21]] = add <vscale x 4 x i32> [[TMP19]], [[TMP20]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -740,8 +722,6 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -761,7 +741,7 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]])
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP18]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -863,8 +843,6 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -884,7 +862,7 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP21]] = sub <vscale x 4 x i32> [[TMP19]], [[TMP20]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -908,8 +886,6 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -931,7 +907,7 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP19]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP20]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -1029,8 +1005,6 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1047,7 +1021,7 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[TMP15]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -1071,8 +1045,6 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1089,7 +1061,7 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP15]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -1177,8 +1149,6 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1191,7 +1161,7 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP12:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; CHECK-SVE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP11]]
; CHECK-SVE-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[TMP12]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -1215,8 +1185,6 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1229,7 +1197,7 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP11]])
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE2]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP12]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
@@ -1318,8 +1286,6 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1336,7 +1302,7 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP15]]
; CHECK-SVE-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP16]], [[TMP17]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-SVE: middle.block:
@@ -1360,8 +1326,6 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-SVE-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-SVE-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-SVE-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE-MAXBW: vector.body:
; CHECK-SVE-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1378,7 +1342,7 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP15]])
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-SVE-MAXBW: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
index be8cfa2..9d4a969 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
@@ -27,6 +27,8 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
+; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
+; CHECK: scalar.ph:
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
index d01effd..368cb18e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
@@ -17,8 +17,6 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -44,7 +42,7 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]])
; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -65,8 +63,6 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-NOI8MM-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NOI8MM-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-NOI8MM: vector.body:
; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -92,7 +88,7 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]])
; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]])
-; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-NOI8MM: middle.block:
@@ -136,8 +132,6 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -163,7 +157,7 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]])
; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
@@ -184,8 +178,6 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-NOI8MM-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NOI8MM-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-NOI8MM: vector.body:
; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -211,7 +203,7 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]])
; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]])
-; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-NOI8MM: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
index 25a4ab8..cc3203c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
@@ -493,6 +493,8 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
+; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15
+; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-INTERLEAVE1: scalar.ph:
;
; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_loop_carried(
@@ -517,6 +519,8 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15
+; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-INTERLEAVED: scalar.ph:
;
; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_loop_carried(
@@ -541,6 +545,8 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-MAXBW: middle.block:
+; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15
+; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-MAXBW: scalar.ph:
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
index 195101f..0f0713d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
@@ -18,8 +18,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ]
@@ -32,7 +30,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP18]], [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -52,8 +50,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ]
@@ -79,7 +75,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP29]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP30]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -100,8 +96,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -114,7 +108,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP20]], [[TMP13]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP22]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -157,8 +151,6 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP9]], 2
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP18]], 2
; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -173,7 +165,7 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64>
; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP11]]
; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[TMP14]], [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -193,8 +185,6 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP9]], 4
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP16]]
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP28]], 4
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]]
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]]
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -222,7 +212,7 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP15]]
; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[TMP22]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[TMP23]], [[VEC_PHI1]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -243,8 +233,6 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -259,7 +247,7 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64>
; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP13]], [[TMP11]]
; CHECK-MAXBW-NEXT: [[TMP15]] = add <vscale x 8 x i64> [[TMP14]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -304,8 +292,6 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP14]], 2
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul i64 [[TMP10]], 2
; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP20]]
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = mul i64 [[TMP10]], 2
@@ -324,7 +310,7 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD3]] to <vscale x 2 x i64>
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP15]], [[TMP13]]
; CHECK-INTERLEAVE1-NEXT: [[TMP17]] = add <vscale x 2 x i64> [[TMP16]], [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -344,8 +330,6 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP11]]
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 4
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP15]], 2
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]]
; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul i64 [[TMP15]], 2
@@ -377,7 +361,7 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP23]], [[TMP17]]
; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 2 x i64> [[TMP24]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI1]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP15]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -398,8 +382,6 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 2
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 2
@@ -418,7 +400,7 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD3]] to <vscale x 4 x i64>
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP15]], [[TMP13]]
; CHECK-MAXBW-NEXT: [[TMP17]] = add <vscale x 4 x i64> [[TMP16]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -819,8 +801,6 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 8
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -838,7 +818,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP16]] = mul <vscale x 8 x i32> [[TMP15]], [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> [[VECTOR_RECUR]], <vscale x 8 x i32> [[TMP16]], i32 -1)
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP17]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -865,8 +845,6 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -890,7 +868,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul <vscale x 8 x i32> [[TMP23]], [[TMP16]]
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> [[TMP24]], <vscale x 8 x i32> [[TMP25]], i32 -1)
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -917,8 +895,6 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 8
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -936,7 +912,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP25]] = mul <vscale x 8 x i32> [[TMP23]], [[TMP16]]
; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> [[VECTOR_RECUR]], <vscale x 8 x i32> [[TMP25]], i32 -1)
; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -986,8 +962,6 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -999,7 +973,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul <vscale x 8 x i32> [[TMP15]], [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP15]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -1026,8 +1000,6 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1045,7 +1017,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul <vscale x 8 x i32> [[TMP22]], [[TMP15]]
; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP30]], [[TMP22]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -1072,8 +1044,6 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1085,7 +1055,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP14]]
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP19]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -1136,8 +1106,6 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 4
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP20]], 4
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1180,7 +1148,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD10]] to <vscale x 4 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = mul nsw <vscale x 4 x i32> [[TMP37]], [[TMP39]]
; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add <vscale x 4 x i32> [[TMP40]], [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP26]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -1204,8 +1172,6 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 8
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP34]], 8
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1300,7 +1266,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP79:%.*]] = mul nsw <vscale x 4 x i32> [[TMP71]], [[TMP77]]
; CHECK-INTERLEAVED-NEXT: [[TMP80]] = add <vscale x 4 x i32> [[TMP78]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add <vscale x 4 x i32> [[TMP79]], [[VEC_PHI1]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP40]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -1328,8 +1294,6 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1372,7 +1336,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD26]] to <vscale x 8 x i32>
; CHECK-MAXBW-NEXT: [[TMP73:%.*]] = mul nsw <vscale x 8 x i32> [[TMP65]], [[TMP71]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI4]], <vscale x 8 x i32> [[TMP73]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -1445,12 +1409,6 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: entry:
; CHECK-INTERLEAVE1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVE1: vector.ph:
-; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-INTERLEAVE1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -1488,12 +1446,6 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: entry:
; CHECK-INTERLEAVED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVED: vector.ph:
-; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-INTERLEAVED-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -1531,12 +1483,6 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
-; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-MAXBW-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -1603,8 +1549,6 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP8]], 4
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP11]], 4
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1617,7 +1561,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -1641,8 +1585,6 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP7]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP14]], 8
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1668,7 +1610,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP20]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP21]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -1693,8 +1635,6 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1707,7 +1647,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP20]], [[TMP13]]
; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -1755,8 +1695,6 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1770,7 +1708,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD1]] to <vscale x 2 x i64>
; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -1791,8 +1729,6 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1819,7 +1755,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP22]]
; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -1841,8 +1777,6 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1853,14 +1787,14 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP11]], align 1
-; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP12]], [[TMP9]]
-; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP15]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i64>
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP8]], [[TMP9]]
+; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]])
+; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -2150,8 +2084,6 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EXT_B]], i64 0
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -2163,7 +2095,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i64>
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP9]], [[BROADCAST_SPLAT]]
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -2285,8 +2217,6 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EXT_B]], i64 0
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -2298,7 +2228,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i64>
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = mul nuw nsw <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP9]]
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -2349,8 +2279,6 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
@@ -2367,7 +2295,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64>
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP16]], [[TMP14]]
; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add <vscale x 2 x i64> [[TMP17]], [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -2392,8 +2320,6 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
@@ -2423,7 +2349,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP24]], [[TMP18]]
; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add <vscale x 2 x i64> [[TMP26]], [[VEC_PHI1]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -2449,8 +2375,6 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
@@ -2467,7 +2391,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64>
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP16]], [[TMP14]]
; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK-MAXBW: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
index a46340c..17da2af 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
@@ -18,8 +18,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -33,7 +31,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP13]]
; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP14]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
@@ -53,8 +51,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -82,7 +78,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP22]]
; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP23]]
; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 4 x i32> [[VEC_PHI1]], [[TMP24]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
@@ -103,8 +99,6 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
@@ -118,7 +112,7 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP9]]
; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP13]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.experimental.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP14]])
-; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
index d2c03d1..025a826 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
@@ -68,8 +68,6 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -78,7 +76,7 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP8]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -254,8 +252,6 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -264,7 +260,7 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i64>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv16i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 16 x i64> [[TMP9]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -352,8 +348,6 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -362,7 +356,7 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP7]], align 2
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i64>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv8i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 8 x i64> [[TMP9]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -450,8 +444,6 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -460,7 +452,7 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -496,12 +488,6 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-INTERLEAVE1: vector.ph:
; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-INTERLEAVE1-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025)
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
@@ -513,7 +499,7 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_MASKED_LOAD]] to <vscale x 4 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[TMP9]], <vscale x 4 x i32> [[VEC_PHI]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[TMP11]], i32 0
@@ -530,12 +516,6 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-INTERLEAVED: vector.ph:
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-INTERLEAVED-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025)
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
@@ -547,7 +527,7 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_MASKED_LOAD]] to <vscale x 4 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[VEC_PHI]]
; CHECK-INTERLEAVED-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[TMP9]], <vscale x 4 x i32> [[VEC_PHI]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[TMP11]], i32 0
@@ -564,12 +544,6 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-MAXBW-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1025)
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
@@ -581,7 +555,7 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i8> [[WIDE_MASKED_LOAD]] to <vscale x 16 x i32>
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i32> [[TMP8]], <vscale x 16 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <vscale x 16 x i1> [[TMP10]], i32 0
@@ -752,8 +726,6 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -762,7 +734,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-MAXBW-NEXT: [[TMP10]] = sub <vscale x 8 x i32> [[VEC_PHI]], [[TMP9]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -850,8 +822,6 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -860,7 +830,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -970,8 +940,6 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul nuw i32 [[TMP5]], 16
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = add i32 [[D]], [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = insertelement <vscale x 16 x i32> zeroinitializer, i32 [[A]], i32 0
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV1]], i64 0
@@ -984,7 +952,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-MAXBW-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP9]], align 1
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP6]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-MAXBW: middle.block:
@@ -1093,8 +1061,6 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul nuw i32 [[TMP5]], 16
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[C]], i64 0
; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = add i32 [[D]], [[N_VEC]]
@@ -1108,7 +1074,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-MAXBW-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP10]], align 1
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP6]]
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-MAXBW: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll
index 8495dee..5b0696e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll
@@ -1,47 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
-; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+sve -S \
-; RUN: -debug-only=loop-vectorize %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+sve -S %s | FileCheck %s
-; FIXME: Hoisted vector code should be costed with scalable cost.
-; In this example, `<vscale x 4 x float> @llvm.minimumnum` has an invalid cost,
-; and hence should not be produced by LoopVectorize.
-
-; CHECK: LV: Found an estimated cost of Invalid for VF vscale x 4 For instruction: %res = tail call float @llvm.minimumnum.f32(float %arg, float 0.000000e+00)
define void @cost_hoisted_vector_code(ptr %p, float %arg) {
; CHECK-LABEL: define void @cost_hoisted_vector_code(
; CHECK-SAME: ptr [[P:%.*]], float [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 -1, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 -1, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[ARG]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 1, [[N_VEC]]
-; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[BROADCAST_SPLAT]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[ARG]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> zeroinitializer)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[INDEX:%.*]] = add i64 1, [[INDEX1]]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP10]]
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP7]], ptr [[TMP8]], align 4
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP7]], ptr [[TMP11]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[P]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP1]], i32 4
+; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP1]], align 4
+; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], -8
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 -1, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
index 6d5bbde..3b43d52 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
@@ -16,8 +16,6 @@ define void @test_invar_gep(ptr %dst) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -36,7 +34,7 @@ define void @test_invar_gep(ptr %dst) #0 {
; CHECK-NEXT: [[TMP17:%.*]] = sub i32 [[TMP16]], 1
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <vscale x 4 x i64> [[TMP9]], i32 [[TMP17]]
; CHECK-NEXT: store i64 [[TMP18]], ptr [[TMP14:%.*]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
index 830e7da..b732f88 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
@@ -12,9 +12,6 @@ define i32 @pr70988(ptr %src, i32 %n) {
; CHECK-NEXT: [[UMAX:%.*]] = zext i32 [[TMP2]] to i64
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], 1
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = icmp ult i64 0, [[UMAX]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ult i64 1, [[UMAX]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
index 381d2e1..dcaaa89 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
@@ -59,8 +59,6 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; VSCALEFORTUNING2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; VSCALEFORTUNING2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z]], i64 0
; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
@@ -121,7 +119,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; VSCALEFORTUNING2-NEXT: [[TMP46:%.*]] = or <vscale x 4 x i32> [[WIDE_MASKED_GATHER8]], [[VEC_PHI5]]
; VSCALEFORTUNING2-NEXT: [[TMP47]] = or <vscale x 4 x i32> [[TMP45]], [[WIDE_MASKED_GATHER9]]
; VSCALEFORTUNING2-NEXT: [[TMP48]] = or <vscale x 4 x i32> [[TMP46]], [[WIDE_MASKED_GATHER10]]
-; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; VSCALEFORTUNING2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; VSCALEFORTUNING2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VSCALEFORTUNING2: [[MIDDLE_BLOCK]]:
@@ -182,12 +180,6 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; PRED: [[VECTOR_PH]]:
; PRED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; PRED-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP3]]
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z]], i64 0
; PRED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; PRED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
@@ -218,7 +210,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; PRED-NEXT: [[VECTOR_RECUR_INIT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 0, i32 [[TMP27]]
; PRED-NEXT: br label %[[VECTOR_BODY:.*]]
; PRED: [[VECTOR_BODY]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; PRED-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[BROADCAST_SPLAT6:%.*]], %[[VECTOR_BODY]] ]
; PRED-NEXT: [[VECTOR_RECUR4:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT3]], %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ]
@@ -241,7 +233,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; PRED-NEXT: [[TMP39:%.*]] = or <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
; PRED-NEXT: [[TMP40:%.*]] = or <vscale x 4 x i32> [[TMP39]], [[WIDE_MASKED_GATHER7]]
; PRED-NEXT: [[TMP41]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[TMP40]], <vscale x 4 x i32> [[VEC_PHI]]
-; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[TMP5]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[IV]], [[TMP2]]
; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[IV]], i64 [[TMP10]])
; PRED-NEXT: [[TMP42:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; PRED-NEXT: [[TMP43:%.*]] = extractelement <vscale x 4 x i1> [[TMP42]], i32 0
@@ -343,8 +335,6 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; DEFAULT-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 [[X]], i64 0
; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -362,7 +352,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; DEFAULT-NEXT: [[TMP20:%.*]] = udiv <vscale x 4 x i16> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]]
; DEFAULT-NEXT: [[TMP21]] = or <vscale x 4 x i16> [[TMP19]], [[VEC_PHI]]
; DEFAULT-NEXT: [[TMP22]] = or <vscale x 4 x i16> [[TMP20]], [[VEC_PHI1]]
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; DEFAULT-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; DEFAULT-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
@@ -401,8 +391,6 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; VSCALEFORTUNING2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; VSCALEFORTUNING2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 [[X]], i64 0
; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
; VSCALEFORTUNING2-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -420,7 +408,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; VSCALEFORTUNING2-NEXT: [[TMP14:%.*]] = udiv <vscale x 4 x i16> [[WIDE_LOAD2]], [[BROADCAST_SPLAT]]
; VSCALEFORTUNING2-NEXT: [[TMP15]] = or <vscale x 4 x i16> [[TMP13]], [[VEC_PHI]]
; VSCALEFORTUNING2-NEXT: [[TMP16]] = or <vscale x 4 x i16> [[TMP14]], [[VEC_PHI1]]
-; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; VSCALEFORTUNING2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; VSCALEFORTUNING2-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VSCALEFORTUNING2: [[MIDDLE_BLOCK]]:
@@ -454,12 +442,6 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; PRED: [[VECTOR_PH]]:
; PRED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 8
-; PRED-NEXT: [[TMP5:%.*]] = sub i64 [[TMP2]], 1
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP5]]
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
; PRED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8
; PRED-NEXT: [[TMP10:%.*]] = sub i64 [[TMP0]], [[TMP9]]
@@ -478,7 +460,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; PRED-NEXT: [[TMP20:%.*]] = udiv <vscale x 8 x i16> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
; PRED-NEXT: [[TMP21:%.*]] = or <vscale x 8 x i16> [[TMP20]], [[VEC_PHI]]
; PRED-NEXT: [[TMP16]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x i16> [[TMP21]], <vscale x 8 x i16> [[VEC_PHI]]
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]]
; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP12]])
; PRED-NEXT: [[TMP17:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; PRED-NEXT: [[TMP18:%.*]] = extractelement <vscale x 8 x i1> [[TMP17]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
index eb3d724..8d8d427 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
@@ -23,15 +23,13 @@ define void @test_no_scalarization(ptr %a, ptr noalias %b, i32 %idx, i32 %n) #0
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 2
; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[IDX]], [[N_VEC]]
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[IDX]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 2 x i32> [[TMP8]], splat (i32 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i32> [[DOTSPLAT]], [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP7]]
+; CHECK-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP5]]
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP13]], i64 0
; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -44,7 +42,7 @@ define void @test_no_scalarization(ptr %a, ptr noalias %b, i32 %idx, i32 %n) #0
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP16]], align 8
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[B:%.*]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: store <vscale x 2 x double> [[WIDE_LOAD]], ptr [[TMP18]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[DOTSPLAT2]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
index a7ec749..c78f68f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
@@ -14,8 +14,6 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -28,7 +26,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP12:%.*]] = select fast <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP12]])
; CHECK-NEXT: [[TMP14]] = fadd fast float [[VEC_PHI]], [[TMP13]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -98,8 +96,6 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -113,7 +109,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP12]])
; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP13]], [[VEC_PHI]]
; CHECK-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP13]], float [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index 0cad053..b0ee9fc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -42,37 +42,35 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP7]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP5]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP7]])
+; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP5]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP10]], [[SUM_07]]
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP8]], [[SUM_07]]
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: ret float [[ADD_LCSSA]]
;
; CHECK-ORDERED-LABEL: define float @fadd_strict
@@ -87,36 +85,34 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-ORDERED-NEXT: [[TMP7]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-ORDERED-NEXT: [[TMP5]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP9]], [[SUM_07]]
+; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP7]], [[SUM_07]]
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: ret float [[ADD_LCSSA]]
;
; CHECK-ORDERED-TF-LABEL: define float @fadd_strict
@@ -126,32 +122,26 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]])
; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED-TF: vector.body:
; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP10]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP11]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractelement <vscale x 8 x i1> [[TMP13]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP7]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP9]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP8]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = extractelement <vscale x 8 x i1> [[TMP10]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -162,13 +152,13 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP15]], [[SUM_07]]
+; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP12]], [[SUM_07]]
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]]
;
@@ -222,58 +212,56 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]]
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24
-; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP16]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-UNORDERED-NEXT: [[TMP17]] = fadd <vscale x 8 x float> [[WIDE_LOAD4]], [[VEC_PHI1]]
-; CHECK-UNORDERED-NEXT: [[TMP18]] = fadd <vscale x 8 x float> [[WIDE_LOAD5]], [[VEC_PHI2]]
-; CHECK-UNORDERED-NEXT: [[TMP19]] = fadd <vscale x 8 x float> [[WIDE_LOAD6]], [[VEC_PHI3]]
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]]
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]]
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP14]] = fadd <vscale x 8 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-UNORDERED-NEXT: [[TMP15]] = fadd <vscale x 8 x float> [[WIDE_LOAD4]], [[VEC_PHI1]]
+; CHECK-UNORDERED-NEXT: [[TMP16]] = fadd <vscale x 8 x float> [[WIDE_LOAD5]], [[VEC_PHI2]]
+; CHECK-UNORDERED-NEXT: [[TMP17]] = fadd <vscale x 8 x float> [[WIDE_LOAD6]], [[VEC_PHI3]]
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP17]], [[TMP16]]
-; CHECK-UNORDERED-NEXT: [[BIN_RDX7:%.*]] = fadd <vscale x 8 x float> [[TMP18]], [[BIN_RDX]]
-; CHECK-UNORDERED-NEXT: [[BIN_RDX8:%.*]] = fadd <vscale x 8 x float> [[TMP19]], [[BIN_RDX7]]
-; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX8]])
+; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP15]], [[TMP14]]
+; CHECK-UNORDERED-NEXT: [[BIN_RDX7:%.*]] = fadd <vscale x 8 x float> [[TMP16]], [[BIN_RDX]]
+; CHECK-UNORDERED-NEXT: [[BIN_RDX8:%.*]] = fadd <vscale x 8 x float> [[TMP17]], [[BIN_RDX7]]
+; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX8]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP22]], [[SUM_07]]
+; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD]] = fadd float [[TMP20]], [[SUM_07]]
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: ret float [[ADD_LCSSA]]
;
; CHECK-ORDERED-LABEL: define float @fadd_strict_unroll
@@ -288,51 +276,49 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
-; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16
-; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]]
-; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24
-; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
-; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]])
-; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP16]], <vscale x 8 x float> [[WIDE_LOAD1]])
-; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP17]], <vscale x 8 x float> [[WIDE_LOAD2]])
-; CHECK-ORDERED-NEXT: [[TMP19]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP18]], <vscale x 8 x float> [[WIDE_LOAD3]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]]
+; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
+; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]]
+; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4
+; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[WIDE_LOAD]])
+; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP14]], <vscale x 8 x float> [[WIDE_LOAD1]])
+; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP15]], <vscale x 8 x float> [[WIDE_LOAD2]])
+; CHECK-ORDERED-NEXT: [[TMP17]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP16]], <vscale x 8 x float> [[WIDE_LOAD3]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP21]], [[SUM_07]]
+; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-NEXT: [[ADD]] = fadd float [[TMP19]], [[SUM_07]]
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: ret float [[ADD_LCSSA]]
;
; CHECK-ORDERED-TF-LABEL: define float @fadd_strict_unroll
@@ -342,26 +328,20 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP13]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 24
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP15]]
+; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
+; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP8]]
+; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP10]]
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP12]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]])
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[N]])
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[N]])
@@ -373,46 +353,46 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK6:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT12:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT13:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT14:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP30:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP15]]
; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
+; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP18]]
; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP25]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP26]])
-; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP27]], <vscale x 8 x float> [[TMP28]])
-; CHECK-ORDERED-TF-NEXT: [[TMP30:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP29]], <vscale x 8 x float> [[TMP30]])
-; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP33]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP31]], <vscale x 8 x float> [[TMP32]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 24
+; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP21]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[WIDE_MASKED_LOAD]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP23]])
+; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP24]], <vscale x 8 x float> [[TMP25]])
+; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP26]], <vscale x 8 x float> [[TMP27]])
+; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP30]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP28]], <vscale x 8 x float> [[TMP29]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = mul nuw i64 [[TMP31]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = add i64 [[INDEX]], [[TMP32]]
; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP34]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP34]], 16
; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = add i64 [[INDEX]], [[TMP35]]
; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = add i64 [[INDEX]], [[TMP38]]
-; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = mul nuw i64 [[TMP40]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = add i64 [[INDEX]], [[TMP41]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP36]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP42]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP44:%.*]] = extractelement <vscale x 8 x i1> [[TMP43]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP33]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP36]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = extractelement <vscale x 8 x i1> [[TMP40]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -423,13 +403,13 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP45]], [[SUM_07]]
+; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP42]], [[SUM_07]]
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP30]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]]
;
@@ -500,54 +480,52 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 2
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A2]], i32 0
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A1]], i32 0
+; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 2
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A2]], i32 0
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[A1]], i32 0
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x float> [ [[TMP11]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP8]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
; CHECK-UNORDERED-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_VEC]])
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0
-; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1
-; CHECK-UNORDERED-NEXT: [[TMP15]] = fadd <vscale x 4 x float> [[TMP13]], [[VEC_PHI1]]
-; CHECK-UNORDERED-NEXT: [[TMP16]] = fadd <vscale x 4 x float> [[TMP14]], [[VEC_PHI]]
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
-; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0
+; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1
+; CHECK-UNORDERED-NEXT: [[TMP13]] = fadd <vscale x 4 x float> [[TMP11]], [[VEC_PHI1]]
+; CHECK-UNORDERED-NEXT: [[TMP14]] = fadd <vscale x 4 x float> [[TMP12]], [[VEC_PHI]]
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]])
-; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP15]])
+; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP14]])
+; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP13]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP18]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ]
-; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP16]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD1]] = fadd float [[TMP20]], [[ADD_PHI2]]
+; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD1]] = fadd float [[TMP18]], [[ADD_PHI2]]
; CHECK-UNORDERED-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
-; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD2]] = fadd float [[TMP21]], [[ADD_PHI1]]
+; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD2]] = fadd float [[TMP19]], [[ADD_PHI1]]
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
-; CHECK-UNORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4
; CHECK-UNORDERED-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4
; CHECK-UNORDERED-NEXT: ret void
@@ -570,50 +548,48 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 2
+; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 2
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
-; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]]
-; CHECK-ORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-ORDERED-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x float>, ptr [[TMP8]], align 4
; CHECK-ORDERED-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_VEC]])
-; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0
-; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1
-; CHECK-ORDERED-NEXT: [[TMP13]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP12]])
-; CHECK-ORDERED-NEXT: [[TMP14]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP11]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
-; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0
+; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1
+; CHECK-ORDERED-NEXT: [[TMP11]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP10]])
+; CHECK-ORDERED-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP9]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP13]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ]
-; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ [[A2]], [[ENTRY:%.*]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX2:%.*]] = phi float [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ [[A1]], [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
-; CHECK-ORDERED-NEXT: [[ADD1]] = fadd float [[TMP16]], [[ADD_PHI2]]
+; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
+; CHECK-ORDERED-NEXT: [[ADD1]] = fadd float [[TMP14]], [[ADD_PHI2]]
; CHECK-ORDERED-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
-; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
-; CHECK-ORDERED-NEXT: [[ADD2]] = fadd float [[TMP17]], [[ADD_PHI1]]
+; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
+; CHECK-ORDERED-NEXT: [[ADD2]] = fadd float [[TMP15]], [[ADD_PHI1]]
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4
; CHECK-ORDERED-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4
; CHECK-ORDERED-NEXT: ret void
@@ -631,40 +607,34 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP5]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = sub i64 [[TMP2]], [[TMP9]]
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = icmp ugt i64 [[TMP2]], [[TMP9]]
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i64 [[TMP10]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[TMP2]], [[TMP6]]
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP2]], [[TMP6]]
+; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP2]])
; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED-TF: vector.body:
; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ [[A2]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI1:%.*]] = phi float [ [[A1]], [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-ORDERED-TF-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP10]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x float> poison)
; CHECK-ORDERED-TF-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> [[WIDE_MASKED_VEC]])
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP15]], <vscale x 4 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP17]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP16]])
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP19]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP18]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP12]])
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = extractelement <vscale x 4 x i1> [[TMP20]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0
+; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1
+; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP12]], <vscale x 4 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP14]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP13]])
+; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP11]], <vscale x 4 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP16]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI1]], <vscale x 4 x float> [[TMP15]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = extractelement <vscale x 4 x i1> [[TMP17]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -677,18 +647,18 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP22]], [[ADD_PHI2]]
+; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
+; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP19]], [[ADD_PHI2]]
; CHECK-ORDERED-TF-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP23]], [[ADD_PHI1]]
+; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
+; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP20]], [[ADD_PHI1]]
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4
; CHECK-ORDERED-TF-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4
; CHECK-ORDERED-TF-NEXT: ret void
@@ -770,43 +740,41 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP4]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; CHECK-UNORDERED-NEXT: [[TMP10]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[TMP9]]
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; CHECK-UNORDERED-NEXT: [[TMP8]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[TMP7]]
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP10]])
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-UNORDERED-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-UNORDERED-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP13]], [[TMP14]]
+; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP11]], [[TMP12]]
; CHECK-UNORDERED-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]]
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-UNORDERED: for.end.loopexit:
-; CHECK-UNORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_END]]
; CHECK-UNORDERED: for.end:
; CHECK-UNORDERED-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ]
@@ -829,42 +797,40 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP4]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4
-; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; CHECK-ORDERED-NEXT: [[TMP10]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP9]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
+; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = fadd <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; CHECK-ORDERED-NEXT: [[TMP8]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP7]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
+; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-ORDERED-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-ORDERED-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
-; CHECK-ORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP12]], [[TMP13]]
+; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
+; CHECK-ORDERED-NEXT: [[ADD:%.*]] = fadd float [[TMP10]], [[TMP11]]
; CHECK-ORDERED-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]]
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-ORDERED: for.end.loopexit:
-; CHECK-ORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_END]]
; CHECK-ORDERED: for.end:
; CHECK-ORDERED-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ]
@@ -882,35 +848,29 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP3]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = sub i64 [[N]], [[TMP7]]
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[N]], [[TMP7]]
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i64 [[TMP8]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = sub i64 [[N]], [[TMP4]]
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[N]], [[TMP4]]
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i64 [[TMP5]], i64 0
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED-TF: vector.body:
; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = fadd <vscale x 4 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP13]], <vscale x 4 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP15]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP10]])
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[TMP16]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP9]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = fadd <vscale x 4 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]]
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[TMP10]], <vscale x 4 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP11]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP2]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP7]])
+; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END_LOOPEXIT:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -921,16 +881,16 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP18]], [[TMP19]]
+; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
+; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP15]], [[TMP16]]
; CHECK-ORDERED-TF-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]]
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-ORDERED-TF: for.end.loopexit:
-; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END]]
; CHECK-ORDERED-TF: for.end:
; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ]
@@ -1002,49 +962,47 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 1.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> poison)
-; CHECK-UNORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00)
-; CHECK-UNORDERED-NEXT: [[TMP9]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[PREDPHI]]
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 1.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP4]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP6]], i32 4, <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> poison)
+; CHECK-UNORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00)
+; CHECK-UNORDERED-NEXT: [[TMP7]] = fadd <vscale x 4 x float> [[VEC_PHI]], [[PREDPHI]]
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]])
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP7]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-UNORDERED-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-UNORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP12]], 0.000000e+00
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-UNORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP10]], 0.000000e+00
; CHECK-UNORDERED-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK-UNORDERED: if.then:
; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-UNORDERED-NEXT: br label [[FOR_INC]]
; CHECK-UNORDERED: for.inc:
-; CHECK-UNORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP13]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP11]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]]
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: ret float [[RDX]]
;
; CHECK-ORDERED-LABEL: define float @fadd_conditional
@@ -1059,48 +1017,46 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
-; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> poison)
-; CHECK-ORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00)
-; CHECK-ORDERED-NEXT: [[TMP9]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[PREDPHI]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP4]], align 4
+; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_LOAD]], zeroinitializer
+; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP6]], i32 4, <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> poison)
+; CHECK-ORDERED-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP5]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> splat (float 3.000000e+00)
+; CHECK-ORDERED-NEXT: [[TMP7]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[PREDPHI]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 1.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-ORDERED-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP11]], 0.000000e+00
+; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP9]], 0.000000e+00
; CHECK-ORDERED-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK-ORDERED: if.then:
; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-ORDERED-NEXT: br label [[FOR_INC]]
; CHECK-ORDERED: for.inc:
-; CHECK-ORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP12]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[PHI:%.*]] = phi float [ [[TMP10]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]]
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: ret float [[RDX]]
;
; CHECK-ORDERED-TF-LABEL: define float @fadd_conditional
@@ -1110,37 +1066,31 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
; CHECK-ORDERED-TF-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED-TF: vector.body:
; CHECK-ORDERED-TF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_MASKED_LOAD]], zeroinitializer
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> zeroinitializer
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP13]], i32 4, <vscale x 4 x i1> [[TMP12]], <vscale x 4 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x float> [[WIDE_MASKED_LOAD1]], <vscale x 4 x float> splat (float 3.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[PREDPHI]], <vscale x 4 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP15]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[TMP16]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP7]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = fcmp une <vscale x 4 x float> [[WIDE_MASKED_LOAD]], zeroinitializer
+; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i1> zeroinitializer
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD1]], <vscale x 4 x float> splat (float 3.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[PREDPHI]], <vscale x 4 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP12]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP11]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -1151,21 +1101,21 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP18]], 0.000000e+00
+; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP15]], 0.000000e+00
; CHECK-ORDERED-TF-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; CHECK-ORDERED-TF: if.then:
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-ORDERED-TF-NEXT: br label [[FOR_INC]]
; CHECK-ORDERED-TF: for.inc:
-; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP19]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP16]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]]
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[RDX]]
;
@@ -1234,43 +1184,41 @@ define float @fadd_multiple(ptr noalias nocapture %a, ptr noalias nocapture %b,
; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float -0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = fadd <vscale x 8 x float> [[VEC_PHI]], [[WIDE_LOAD]]
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP8]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP9]] = fadd <vscale x 8 x float> [[TMP7]], [[WIDE_LOAD1]]
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float -0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = fadd <vscale x 8 x float> [[VEC_PHI]], [[WIDE_LOAD]]
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP7]] = fadd <vscale x 8 x float> [[TMP5]], [[WIDE_LOAD1]]
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP9]])
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[TMP7]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ -0.000000e+00, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ -0.000000e+00, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[SUM]], [[TMP12]]
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD:%.*]] = fadd float [[SUM]], [[TMP10]]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-UNORDERED-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP13]]
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-UNORDERED-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP11]]
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: ret float [[RDX]]
;
; CHECK-ORDERED-LABEL: define float @fadd_multiple
@@ -1371,74 +1319,72 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]]
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24
-; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
-; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-UNORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP26]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]])
-; CHECK-UNORDERED-NEXT: [[TMP27]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]])
-; CHECK-UNORDERED-NEXT: [[TMP28]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]])
-; CHECK-UNORDERED-NEXT: [[TMP29]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]])
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]]
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]]
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
+; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]]
+; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16
+; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]]
+; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24
+; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP24]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]])
+; CHECK-UNORDERED-NEXT: [[TMP25]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]])
+; CHECK-UNORDERED-NEXT: [[TMP26]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]])
+; CHECK-UNORDERED-NEXT: [[TMP27]] = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]])
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-UNORDERED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP27]], [[TMP26]]
-; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd <vscale x 8 x float> [[TMP28]], [[BIN_RDX]]
-; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd <vscale x 8 x float> [[TMP29]], [[BIN_RDX11]]
-; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]])
+; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd <vscale x 8 x float> [[TMP25]], [[TMP24]]
+; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd <vscale x 8 x float> [[TMP26]], [[BIN_RDX]]
+; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd <vscale x 8 x float> [[TMP27]], [[BIN_RDX11]]
+; CHECK-UNORDERED-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP29]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP32]], float [[TMP33]], float [[SUM_07]])
+; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP30]], float [[TMP31]], float [[SUM_07]])
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP29]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: ret float [[MULADD_LCSSA]]
;
; CHECK-ORDERED-LABEL: define float @fmuladd_strict
@@ -1453,71 +1399,69 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
-; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16
-; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]]
-; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24
-; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
-; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
-; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4
-; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]]
-; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]]
-; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]]
-; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]]
-; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP26]])
-; CHECK-ORDERED-NEXT: [[TMP31:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]])
-; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP31]], <vscale x 8 x float> [[TMP28]])
-; CHECK-ORDERED-NEXT: [[TMP33]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP32]], <vscale x 8 x float> [[TMP29]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP34]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP31:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]]
+; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
+; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]]
+; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4
+; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
+; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]]
+; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16
+; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]]
+; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24
+; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4
+; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]]
+; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]]
+; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]]
+; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]]
+; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP24]])
+; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP28]], <vscale x 8 x float> [[TMP25]])
+; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP29]], <vscale x 8 x float> [[TMP26]])
+; CHECK-ORDERED-NEXT: [[TMP31]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP33]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP35]], float [[TMP36]], float [[SUM_07]])
+; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP33]], float [[TMP34]], float [[SUM_07]])
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: ret float [[MULADD_LCSSA]]
;
; CHECK-ORDERED-TF-LABEL: define float @fmuladd_strict
@@ -1527,26 +1471,20 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP13]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 24
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP15]]
+; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
+; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP8]]
+; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP10]]
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP12]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]])
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[N]])
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[N]])
@@ -1558,64 +1496,64 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK6:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT16:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT17:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT18:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP47:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP44:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP15]]
; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
+; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP18]]
; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP25]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 24
+; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP21]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP25]]
; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP28]]
+; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP28]]
; CHECK-ORDERED-TF-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 16
-; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP31]]
-; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = mul nuw i64 [[TMP33]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP34]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP35]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]]
-; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]]
-; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]]
-; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]]
-; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP40]])
-; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP37]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP41]], <vscale x 8 x float> [[TMP42]])
-; CHECK-ORDERED-TF-NEXT: [[TMP44:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP38]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP43]], <vscale x 8 x float> [[TMP44]])
-; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP39]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP47]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP45]], <vscale x 8 x float> [[TMP46]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 24
+; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP31]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP23]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]]
+; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]]
+; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]]
+; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]]
+; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP33]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP37]])
+; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP34]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP38]], <vscale x 8 x float> [[TMP39]])
+; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP35]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP40]], <vscale x 8 x float> [[TMP41]])
+; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = select <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP44]] = call float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP42]], <vscale x 8 x float> [[TMP43]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP47:%.*]] = add i64 [[INDEX]], [[TMP46]]
; CHECK-ORDERED-TF-NEXT: [[TMP48:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 16
; CHECK-ORDERED-TF-NEXT: [[TMP50:%.*]] = add i64 [[INDEX]], [[TMP49]]
; CHECK-ORDERED-TF-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], [[TMP52]]
-; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = mul nuw i64 [[TMP54]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = add i64 [[INDEX]], [[TMP55]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP56]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP58:%.*]] = extractelement <vscale x 8 x i1> [[TMP57]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP58]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP47]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = extractelement <vscale x 8 x i1> [[TMP54]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -1626,15 +1564,15 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP60:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP59]], float [[TMP60]], float [[SUM_07]])
+; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]])
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP47]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]]
;
@@ -1693,74 +1631,72 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-UNORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-UNORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-UNORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-UNORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-UNORDERED: vector.body:
; CHECK-UNORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ]
-; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
-; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16
-; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]]
-; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24
-; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
-; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-UNORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-UNORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4
-; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4
-; CHECK-UNORDERED-NEXT: [[TMP26]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]])
-; CHECK-UNORDERED-NEXT: [[TMP27]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]])
-; CHECK-UNORDERED-NEXT: [[TMP28]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]])
-; CHECK-UNORDERED-NEXT: [[TMP29]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]])
-; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-UNORDERED-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x float> [ insertelement (<vscale x 8 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 8 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ]
+; CHECK-UNORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-UNORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]]
+; CHECK-UNORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
+; CHECK-UNORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]]
+; CHECK-UNORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-UNORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-UNORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
+; CHECK-UNORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]]
+; CHECK-UNORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16
+; CHECK-UNORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]]
+; CHECK-UNORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-UNORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24
+; CHECK-UNORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]]
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4
+; CHECK-UNORDERED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP24]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD]], <vscale x 8 x float> [[WIDE_LOAD7]], <vscale x 8 x float> [[VEC_PHI]])
+; CHECK-UNORDERED-NEXT: [[TMP25]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD4]], <vscale x 8 x float> [[WIDE_LOAD8]], <vscale x 8 x float> [[VEC_PHI1]])
+; CHECK-UNORDERED-NEXT: [[TMP26]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD5]], <vscale x 8 x float> [[WIDE_LOAD9]], <vscale x 8 x float> [[VEC_PHI2]])
+; CHECK-UNORDERED-NEXT: [[TMP27]] = call nnan <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> [[WIDE_LOAD6]], <vscale x 8 x float> [[WIDE_LOAD10]], <vscale x 8 x float> [[VEC_PHI3]])
+; CHECK-UNORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-UNORDERED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-UNORDERED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-UNORDERED: middle.block:
-; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd nnan <vscale x 8 x float> [[TMP27]], [[TMP26]]
-; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd nnan <vscale x 8 x float> [[TMP28]], [[BIN_RDX]]
-; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd nnan <vscale x 8 x float> [[TMP29]], [[BIN_RDX11]]
-; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]])
+; CHECK-UNORDERED-NEXT: [[BIN_RDX:%.*]] = fadd nnan <vscale x 8 x float> [[TMP25]], [[TMP24]]
+; CHECK-UNORDERED-NEXT: [[BIN_RDX11:%.*]] = fadd nnan <vscale x 8 x float> [[TMP26]], [[BIN_RDX]]
+; CHECK-UNORDERED-NEXT: [[BIN_RDX12:%.*]] = fadd nnan <vscale x 8 x float> [[TMP27]], [[BIN_RDX11]]
+; CHECK-UNORDERED-NEXT: [[TMP29:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> [[BIN_RDX12]])
; CHECK-UNORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-UNORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-UNORDERED: scalar.ph:
; CHECK-UNORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-UNORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP29]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-UNORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-UNORDERED: for.body:
; CHECK-UNORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-UNORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-UNORDERED-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-UNORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-UNORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP32]], float [[TMP33]], float [[SUM_07]])
+; CHECK-UNORDERED-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-UNORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP30]], float [[TMP31]], float [[SUM_07]])
; CHECK-UNORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-UNORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-UNORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-UNORDERED: for.end:
-; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ]
+; CHECK-UNORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP29]], [[MIDDLE_BLOCK]] ]
; CHECK-UNORDERED-NEXT: ret float [[MULADD_LCSSA]]
;
; CHECK-ORDERED-LABEL: define float @fmuladd_strict_fmf
@@ -1775,71 +1711,69 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-ORDERED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-ORDERED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-ORDERED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-ORDERED: vector.body:
; CHECK-ORDERED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
-; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 16
-; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP11]]
-; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 24
-; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP14]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP6]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP9]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP12]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP15]], align 4
-; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
-; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP16]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP19]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP22]], align 4
-; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP25]], align 4
-; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]]
-; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]]
-; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]]
-; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]]
-; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP26]])
-; CHECK-ORDERED-NEXT: [[TMP31:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]])
-; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP31]], <vscale x 8 x float> [[TMP28]])
-; CHECK-ORDERED-NEXT: [[TMP33]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP32]], <vscale x 8 x float> [[TMP29]])
-; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-ORDERED-NEXT: br i1 [[TMP34]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-ORDERED-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP31:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-ORDERED-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP6]]
+; CHECK-ORDERED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
+; CHECK-ORDERED-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP9]]
+; CHECK-ORDERED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-ORDERED-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP12]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x float>, ptr [[TMP4]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x float>, ptr [[TMP7]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x float>, ptr [[TMP10]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x float>, ptr [[TMP13]], align 4
+; CHECK-ORDERED-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
+; CHECK-ORDERED-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP16]]
+; CHECK-ORDERED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16
+; CHECK-ORDERED-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP19]]
+; CHECK-ORDERED-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 24
+; CHECK-ORDERED-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP22]]
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x float>, ptr [[TMP14]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x float>, ptr [[TMP17]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x float>, ptr [[TMP20]], align 4
+; CHECK-ORDERED-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 8 x float>, ptr [[TMP23]], align 4
+; CHECK-ORDERED-NEXT: [[TMP24:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]]
+; CHECK-ORDERED-NEXT: [[TMP25:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]]
+; CHECK-ORDERED-NEXT: [[TMP26:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]]
+; CHECK-ORDERED-NEXT: [[TMP27:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]]
+; CHECK-ORDERED-NEXT: [[TMP28:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP24]])
+; CHECK-ORDERED-NEXT: [[TMP29:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP28]], <vscale x 8 x float> [[TMP25]])
+; CHECK-ORDERED-NEXT: [[TMP30:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP29]], <vscale x 8 x float> [[TMP26]])
+; CHECK-ORDERED-NEXT: [[TMP31]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP30]], <vscale x 8 x float> [[TMP27]])
+; CHECK-ORDERED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-ORDERED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-ORDERED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-ORDERED: middle.block:
; CHECK-ORDERED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-ORDERED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK-ORDERED: scalar.ph:
; CHECK-ORDERED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP33]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
+; CHECK-ORDERED-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP31]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED: for.body:
; CHECK-ORDERED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP35]], float [[TMP36]], float [[SUM_07]])
+; CHECK-ORDERED-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP33]], float [[TMP34]], float [[SUM_07]])
; CHECK-ORDERED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-NEXT: ret float [[MULADD_LCSSA]]
;
; CHECK-ORDERED-TF-LABEL: define float @fmuladd_strict_fmf
@@ -1849,26 +1783,20 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF: vector.ph:
; CHECK-ORDERED-TF-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-ORDERED-TF-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-ORDERED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-ORDERED-TF-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
-; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP13]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 24
-; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP15]]
+; CHECK-ORDERED-TF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
+; CHECK-ORDERED-TF-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], [[TMP3]]
+; CHECK-ORDERED-TF-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
+; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP8]]
+; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 16
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT1:%.*]] = add i64 0, [[TMP10]]
+; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 24
+; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT2:%.*]] = add i64 0, [[TMP12]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[N]])
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY3:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT]], i64 [[N]])
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_ENTRY4:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_PART_NEXT1]], i64 [[N]])
@@ -1880,64 +1808,64 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK6:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY3]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT16:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK7:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY4]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT17:%.*]], [[VECTOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK8:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY5]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT18:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP47:%.*]], [[VECTOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP44:%.*]], [[VECTOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP15]]
; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
+; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP18]]
; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP24]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP25]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 24
+; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP21]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP13]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP16]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP19]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP22]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP25]]
; CHECK-ORDERED-TF-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 8
-; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP28]]
+; CHECK-ORDERED-TF-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP28]]
; CHECK-ORDERED-TF-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 16
-; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP31]]
-; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = mul nuw i64 [[TMP33]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP34]]
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP35]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
-; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]]
-; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]]
-; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]]
-; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]]
-; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP40]])
-; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP37]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP41]], <vscale x 8 x float> [[TMP42]])
-; CHECK-ORDERED-TF-NEXT: [[TMP44:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP38]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP43]], <vscale x 8 x float> [[TMP44]])
-; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP39]], <vscale x 8 x float> splat (float -0.000000e+00)
-; CHECK-ORDERED-TF-NEXT: [[TMP47]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP45]], <vscale x 8 x float> [[TMP46]])
-; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-ORDERED-TF-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 24
+; CHECK-ORDERED-TF-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP31]]
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP23]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP26]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD14:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP29]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <vscale x 8 x float> @llvm.masked.load.nxv8f32.p0(ptr [[TMP32]], i32 4, <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> poison)
+; CHECK-ORDERED-TF-NEXT: [[TMP33:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD12]]
+; CHECK-ORDERED-TF-NEXT: [[TMP34:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD13]]
+; CHECK-ORDERED-TF-NEXT: [[TMP35:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD10]], [[WIDE_MASKED_LOAD14]]
+; CHECK-ORDERED-TF-NEXT: [[TMP36:%.*]] = fmul nnan <vscale x 8 x float> [[WIDE_MASKED_LOAD11]], [[WIDE_MASKED_LOAD15]]
+; CHECK-ORDERED-TF-NEXT: [[TMP37:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x float> [[TMP33]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP38:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], <vscale x 8 x float> [[TMP37]])
+; CHECK-ORDERED-TF-NEXT: [[TMP39:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK6]], <vscale x 8 x float> [[TMP34]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP38]], <vscale x 8 x float> [[TMP39]])
+; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK7]], <vscale x 8 x float> [[TMP35]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP40]], <vscale x 8 x float> [[TMP41]])
+; CHECK-ORDERED-TF-NEXT: [[TMP43:%.*]] = select nnan <vscale x 8 x i1> [[ACTIVE_LANE_MASK8]], <vscale x 8 x float> [[TMP36]], <vscale x 8 x float> splat (float -0.000000e+00)
+; CHECK-ORDERED-TF-NEXT: [[TMP44]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[TMP42]], <vscale x 8 x float> [[TMP43]])
+; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-ORDERED-TF-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP47:%.*]] = add i64 [[INDEX]], [[TMP46]]
; CHECK-ORDERED-TF-NEXT: [[TMP48:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 8
+; CHECK-ORDERED-TF-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], 16
; CHECK-ORDERED-TF-NEXT: [[TMP50:%.*]] = add i64 [[INDEX]], [[TMP49]]
; CHECK-ORDERED-TF-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 16
+; CHECK-ORDERED-TF-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], [[TMP52]]
-; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = mul nuw i64 [[TMP54]], 24
-; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = add i64 [[INDEX]], [[TMP55]]
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP56]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-ORDERED-TF-NEXT: [[TMP58:%.*]] = extractelement <vscale x 8 x i1> [[TMP57]], i32 0
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP58]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP47]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP50]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]])
+; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = extractelement <vscale x 8 x i1> [[TMP54]], i32 0
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
; CHECK-ORDERED-TF: scalar.ph:
@@ -1948,15 +1876,15 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP60:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP59]], float [[TMP60]], float [[SUM_07]])
+; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]])
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP47]], [[MIDDLE_BLOCK]] ]
+; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll
index eaf85694..4a12aef 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll
@@ -22,8 +22,6 @@ define i64 @same_exit_block_pre_inc_use1() #1 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NEXT: [[TMP6:%.*]] = add i64 3, [[N_VEC]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -34,7 +32,7 @@ define i64 @same_exit_block_pre_inc_use1() #1 {
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
-; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP3]]
; CHECK-NEXT: [[TMP17:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP16]])
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT3]], [[N_VEC]]
; CHECK-NEXT: [[TMP19:%.*]] = or i1 [[TMP17]], [[TMP18]]
@@ -264,8 +262,6 @@ define i64 @loop_contains_safe_div() #1 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP10]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]]
; CHECK-NEXT: [[INDEX1:%.*]] = sub i64 64, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -275,7 +271,7 @@ define i64 @loop_contains_safe_div() #1 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP1]], align 1
; CHECK-NEXT: [[TMP13:%.*]] = udiv <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 20000)
; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <vscale x 4 x i32> [[TMP13]], splat (i32 1)
-; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX2]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX2]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[INDEX1]]
; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll
index 070f658..cc83819 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll
@@ -22,8 +22,6 @@ define i64 @same_exit_block_pre_inc_use1() #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 64
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 510, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 510, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 64
; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add i64 3, [[N_VEC]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: vector.body:
@@ -61,7 +59,7 @@ define i64 @same_exit_block_pre_inc_use1() #0 {
; CHECK-NEXT: [[TMP30:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD2]], [[WIDE_LOAD6]]
; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD3]], [[WIDE_LOAD7]]
; CHECK-NEXT: [[TMP59:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD4]], [[WIDE_LOAD8]]
-; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP3]]
; CHECK-NEXT: [[TMP34:%.*]] = or <vscale x 16 x i1> [[TMP32]], [[TMP30]]
; CHECK-NEXT: [[TMP37:%.*]] = or <vscale x 16 x i1> [[TMP34]], [[TMP31]]
; CHECK-NEXT: [[TMP33:%.*]] = or <vscale x 16 x i1> [[TMP37]], [[TMP59]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index d32b898..cc88946 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -23,8 +23,6 @@ define void @cost_store_i8(ptr %dst) #0 {
; DEFAULT-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 101, [[TMP5]]
; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 101, [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 32
; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
; DEFAULT: vector.body:
; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -34,7 +32,7 @@ define void @cost_store_i8(ptr %dst) #0 {
; DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP9]], i64 [[TMP23]]
; DEFAULT-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP9]], align 1
; DEFAULT-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP24]], align 1
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DEFAULT: middle.block:
@@ -52,14 +50,12 @@ define void @cost_store_i8(ptr %dst) #0 {
; DEFAULT-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 8
; DEFAULT-NEXT: [[N_MOD_VF2:%.*]] = urem i64 101, [[TMP15]]
; DEFAULT-NEXT: [[N_VEC3:%.*]] = sub i64 101, [[N_MOD_VF2]]
-; DEFAULT-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; DEFAULT-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 8
; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; DEFAULT: vec.epilog.vector.body:
; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX5]]
; DEFAULT-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP19]], align 1
-; DEFAULT-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP17]]
+; DEFAULT-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP15]]
; DEFAULT-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]]
; DEFAULT-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; DEFAULT: vec.epilog.middle.block:
@@ -85,12 +81,6 @@ define void @cost_store_i8(ptr %dst) #0 {
; PRED: vector.ph:
; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; PRED-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 101, [[TMP4]]
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; PRED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; PRED-NEXT: [[TMP9:%.*]] = sub i64 101, [[TMP8]]
@@ -103,7 +93,7 @@ define void @cost_store_i8(ptr %dst) #0 {
; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; PRED-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]]
; PRED-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> zeroinitializer, ptr [[TMP13]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP11]])
; PRED-NEXT: [[TMP15:%.*]] = xor <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; PRED-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[TMP15]], i32 0
@@ -227,12 +217,6 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; PRED: vector.ph:
; PRED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP10]], 2
-; PRED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 1000, [[TMP2]]
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; PRED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i16> poison, i16 [[X]], i64 0
; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i16> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1000)
@@ -248,7 +232,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; PRED-NEXT: [[TMP9:%.*]] = and <vscale x 2 x i8> [[TMP8]], [[TMP11]]
; PRED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]]
; PRED-NEXT: call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP9]], ptr [[TMP5]], i32 1, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]), !alias.scope [[META7:![0-9]+]], !noalias [[META4]]
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1000)
; PRED-NEXT: [[TMP12:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; PRED-NEXT: [[TMP13:%.*]] = extractelement <vscale x 2 x i1> [[TMP12]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll
index c721493..bdbbfdf 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll
@@ -82,8 +82,8 @@ define void @struct_return_replicate(ptr noalias %in, ptr noalias writeonly %out
; CHECK: [[ENTRY:.*:]]
; CHECK: [[VECTOR_PH:.*:]]
; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3:[0-9]+]]
-; CHECK: [[TMP6:%.*]] = tail call { half, half } @foo(half [[TMP5:%.*]]) #[[ATTR3]]
+; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR3:[0-9]+]]
+; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3]]
; CHECK: [[MIDDLE_BLOCK:.*:]]
; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
@@ -149,10 +149,9 @@ exit:
define void @struct_return_scalable(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) #2 {
; CHECK-LABEL: define void @struct_return_scalable(
; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK: [[ENTRY:.*:]]
-; CHECK: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK: [[VECTOR_PH:.*:]]
; CHECK: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK: [[VECTOR_PH1:.*:]]
; CHECK: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK: [[VECTOR_BODY:.*:]]
; CHECK: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
index 67f5083..495f9c0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
@@ -10,11 +10,10 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -69,11 +68,10 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -126,11 +124,10 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[B:%.*]], i64 168
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[TMP5]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
index e555785..d89c525 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
@@ -19,8 +19,6 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -36,7 +34,7 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[TMP17]] = and i64 [[VEC_PHI]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD3]])
; CHECK-NEXT: [[TMP19]] = and i64 [[VEC_PHI2]], [[TMP18]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll
index af9c39e..5e22532 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll
@@ -19,8 +19,6 @@ define i64 @int_reduction_add(ptr %a, i64 %N) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -34,7 +32,7 @@ define i64 @int_reduction_add(ptr %a, i64 %N) {
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i64>, ptr [[TMP15]], align 8
; CHECK-NEXT: [[TMP16]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[VEC_PHI]]
; CHECK-NEXT: [[TMP17]] = add <vscale x 2 x i64> [[WIDE_LOAD3]], [[VEC_PHI2]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll
index fca29cd..52117e3f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll
@@ -19,8 +19,6 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -33,7 +31,7 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) {
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[WIDE_LOAD]])
; CHECK-NEXT: [[TMP19]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[TMP18]], <vscale x 4 x float> [[WIDE_LOAD2]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
index 0f407cd..a521bfa 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
@@ -35,8 +35,6 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 32
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -46,7 +44,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 [[TMP18]]
; CHECK-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP14]], align 1
; CHECK-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP19]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -64,14 +62,12 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 8
; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 1024, [[TMP24]]
; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 1024, [[N_MOD_VF2]]
-; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 8
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX5]]
; CHECK-NEXT: store <vscale x 8 x i8> splat (i8 1), ptr [[TMP28]], align 1
-; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP26]]
+; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP24]]
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]]
; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: vec.epilog.middle.block:
@@ -95,8 +91,6 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -106,7 +100,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 [[TMP16]]
; CHECK-VF8-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP12]], align 1
; CHECK-VF8-NEXT: store <vscale x 16 x i8> splat (i8 1), ptr [[TMP17]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-VF8: middle.block:
@@ -159,8 +153,6 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -170,7 +162,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i64 [[TMP8]]
; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP6]], align 1
; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP9]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
@@ -194,8 +186,6 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -205,7 +195,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i64 [[TMP8]]
; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP6]], align 1
; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP9]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-VF8: middle.block:
@@ -276,8 +266,6 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -287,7 +275,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP16]]
; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP12]], align 1
; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP17]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
@@ -331,8 +319,6 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -342,7 +328,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP16]]
; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP12]], align 1
; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP17]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-VF8: middle.block:
@@ -406,8 +392,6 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 10000, [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 32
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -417,7 +401,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP18]]
; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP14]], align 1
; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP19]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
@@ -436,15 +420,13 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 8
; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 10000, [[TMP24]]
; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 10000, [[N_MOD_VF2]]
-; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 8
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC3]]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX7]]
; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP28]], align 1
-; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP26]]
+; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP24]]
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]]
; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: vec.epilog.middle.block:
@@ -469,8 +451,6 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32
; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 10000, [[TMP3]]
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]]
-; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 32
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -480,7 +460,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]]
; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP12]], align 1
; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP17]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-VF8: middle.block:
@@ -545,8 +525,6 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -569,7 +547,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
@@ -587,8 +565,6 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 2
; CHECK-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[N]], [[TMP27]]
; CHECK-NEXT: [[N_VEC6:%.*]] = sub i64 [[N]], [[N_MOD_VF5]]
-; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 2
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
@@ -598,7 +574,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 2 x float>, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = fmul <vscale x 2 x float> [[WIDE_LOAD8]], [[WIDE_LOAD9]]
; CHECK-NEXT: store <vscale x 2 x float> [[TMP34]], ptr [[TMP32]], align 4
-; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP29]]
+; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP27]]
; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC6]]
; CHECK-NEXT: br i1 [[TMP35]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: vec.epilog.middle.block:
@@ -621,8 +597,6 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -645,7 +619,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP19]]
; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP16]], ptr [[TMP11]], align 4
; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP20]], align 4
-; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-VF8: middle.block:
@@ -697,8 +671,6 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -721,7 +693,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
@@ -739,8 +711,6 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 2
; CHECK-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[N]], [[TMP27]]
; CHECK-NEXT: [[N_VEC6:%.*]] = sub i64 [[N]], [[N_MOD_VF5]]
-; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 2
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
@@ -750,7 +720,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 2 x float>, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = fmul <vscale x 2 x float> [[WIDE_LOAD8]], [[WIDE_LOAD9]]
; CHECK-NEXT: store <vscale x 2 x float> [[TMP34]], ptr [[TMP32]], align 4
-; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP29]]
+; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP27]]
; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC6]]
; CHECK-NEXT: br i1 [[TMP35]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: vec.epilog.middle.block:
@@ -773,8 +743,6 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -797,7 +765,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP19]]
; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP16]], ptr [[TMP11]], align 4
; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP20]], align 4
-; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-VF8: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll
index 20bc0af..76a7536 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a510 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA510
; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a520 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA520
+; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a320 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA320
define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) {
; CHECK-CA510-LABEL: define void @sve_add(
@@ -131,6 +132,70 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) {
; CHECK-CA520: [[FOR_COND_CLEANUP]]:
; CHECK-CA520-NEXT: ret void
;
+; CHECK-CA320-LABEL: define void @sve_add(
+; CHECK-CA320-SAME: ptr [[DST:%.*]], ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-CA320-NEXT: [[ENTRY:.*:]]
+; CHECK-CA320-NEXT: [[B3:%.*]] = ptrtoint ptr [[B]] to i64
+; CHECK-CA320-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-CA320-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64
+; CHECK-CA320-NEXT: [[CMP9_NOT:%.*]] = icmp eq i64 [[N]], 0
+; CHECK-CA320-NEXT: br i1 [[CMP9_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
+; CHECK-CA320: [[FOR_BODY_PREHEADER]]:
+; CHECK-CA320-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-CA320-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-CA320: [[VECTOR_MEMCHECK]]:
+; CHECK-CA320-NEXT: [[TMP0:%.*]] = sub i64 [[DST1]], [[A2]]
+; CHECK-CA320-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32
+; CHECK-CA320-NEXT: [[TMP1:%.*]] = sub i64 [[DST1]], [[B3]]
+; CHECK-CA320-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], 32
+; CHECK-CA320-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; CHECK-CA320-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; CHECK-CA320: [[VECTOR_PH]]:
+; CHECK-CA320-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
+; CHECK-CA320-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-CA320-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-CA320: [[VECTOR_BODY]]:
+; CHECK-CA320-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-CA320-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
+; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
+; CHECK-CA320-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
+; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i32 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
+; CHECK-CA320-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]]
+; CHECK-CA320-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]]
+; CHECK-CA320-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4
+; CHECK-CA320-NEXT: store <4 x float> [[TMP6]], ptr [[TMP8]], align 4
+; CHECK-CA320-NEXT: store <4 x float> [[TMP7]], ptr [[TMP9]], align 4
+; CHECK-CA320-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-CA320-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-CA320-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-CA320: [[MIDDLE_BLOCK]]:
+; CHECK-CA320-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-CA320-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-CA320: [[SCALAR_PH]]:
+; CHECK-CA320-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-CA320-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK-CA320: [[FOR_BODY]]:
+; CHECK-CA320-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-CA320-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-CA320-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-CA320-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-CA320-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-CA320-NEXT: [[ADD:%.*]] = fadd fast float [[TMP12]], [[TMP11]]
+; CHECK-CA320-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-CA320-NEXT: store float [[ADD]], ptr [[ARRAYIDX4]], align 4
+; CHECK-CA320-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-CA320-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-CA320-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-CA320: [[FOR_COND_CLEANUP_LOOPEXIT]]:
+; CHECK-CA320-NEXT: br label %[[FOR_COND_CLEANUP]]
+; CHECK-CA320: [[FOR_COND_CLEANUP]]:
+; CHECK-CA320-NEXT: ret void
+;
entry:
%cmp9.not = icmp eq i64 %n, 0
br i1 %cmp9.not, label %for.cond.cleanup, label %for.body
@@ -160,3 +225,8 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo
; CHECK-CA520: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK-CA520: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.
+; CHECK-CA320: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-CA320: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-CA320: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-CA320: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll
index 24f93f0..863dae7 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fneg.ll
@@ -31,8 +31,6 @@ define void @fneg(ptr nocapture noundef writeonly %d, ptr nocapture noundef read
; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 16
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP7]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -50,7 +48,7 @@ define void @fneg(ptr nocapture noundef writeonly %d, ptr nocapture noundef read
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds half, ptr [[TMP18]], i64 [[TMP21]]
; CHECK-NEXT: store <vscale x 8 x half> [[TMP16]], ptr [[TMP18]], align 2
; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP22]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
index 663cf41..d336f5f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
@@ -10,11 +10,10 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -62,11 +61,10 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -113,11 +111,10 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -166,11 +163,10 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -228,14 +224,12 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_VEC]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP7]], i64 [[N_VEC]]
; CHECK-NEXT: [[N_VEC1:%.*]] = sub i64 [[N]], [[TMP6]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 3
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[DOTIDX1:%.*]] = shl i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[DOTIDX1]]
-; CHECK-NEXT: [[DOTIDX3:%.*]] = shl nuw nsw i64 [[TMP3]], 5
+; CHECK-NEXT: [[DOTIDX3:%.*]] = shl nuw nsw i64 [[TMP2]], 5
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[B]], i64 [[DOTIDX3]]
; CHECK-NEXT: [[DOTIDX4:%.*]] = shl i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[DOTIDX4]]
@@ -251,7 +245,7 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP12]], i64 [[DOTIDX]]
; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER]], ptr [[TMP12]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[WIDE_MASKED_GATHER2]], ptr [[TMP14]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC1]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
index fefb5af..351da8b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
@@ -12,13 +12,11 @@ define void @induction_i7(ptr %dst) #0 {
; CHECK-LABEL: define void @induction_i7(
; CHECK-SAME: ptr [[DST:%.*]])
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP40]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]]
; CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i7
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP40]], i64 0
; CHECK-NEXT: [[DOTSPLAT_:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -76,13 +74,11 @@ define void @induction_i3_zext(ptr %dst) #0 {
; CHECK-LABEL: define void @induction_i3_zext(
; CHECK-SAME: ptr [[DST:%.*]])
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP40]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]]
; CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i3
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP40]], i64 0
; CHECK-NEXT: [[DOTSPLAT_:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
index 4f0637f..95836f8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
@@ -23,10 +23,8 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -37,7 +35,7 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[TMP9]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 47ce05d..fd0bc0b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -437,8 +437,6 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 512, [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
; CHECK-NEXT: [[IND_END:%.*]] = shl nuw nsw i64 [[N_VEC]], 1
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -452,7 +450,7 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[INDEX]], 9223372036854775804
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP7]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP6]], ptr [[TMP8]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
@@ -508,8 +506,6 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP6]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[TMP9]]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
; CHECK-NEXT: [[IND_END:%.*]] = shl i64 [[N_VEC]], 1
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -523,7 +519,7 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[INDEX]], 9223372036854775804
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP15]]
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP16]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
@@ -803,12 +799,10 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -822,7 +816,7 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -875,10 +869,8 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -896,7 +888,7 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 {
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
; CHECK-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[TMP16]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -952,12 +944,10 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -971,7 +961,7 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
@@ -1025,10 +1015,8 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -1047,7 +1035,7 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]])
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
; CHECK-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
@@ -1103,11 +1091,10 @@ define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[TMP2]], [[TMP4]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP5]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNEG]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP7]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNOT]]
; CHECK-NEXT: [[IND_END:%.*]] = shl nuw i64 [[N_VEC]], 1
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -1180,11 +1167,10 @@ define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[TMP2]], [[TMP4]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP5]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNEG]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP8]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNOT]]
; CHECK-NEXT: [[TMP11:%.*]] = shl nuw i64 [[N_VEC]], 1
; CHECK-NEXT: [[IND_END:%.*]] = or disjoint i64 [[TMP11]], 3
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0
@@ -1273,11 +1259,10 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) #1 {
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP8]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], [[DOTNEG]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP10]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], [[DOTNOT]]
; CHECK-NEXT: [[IND_END:%.*]] = shl i64 [[N_VEC]], 1
; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP15:%.*]] = shl <vscale x 4 x i64> [[TMP14]], splat (i64 1)
@@ -1372,10 +1357,8 @@ define void @interleave_deinterleave_factor3(ptr writeonly noalias %dst, ptr rea
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -1402,7 +1385,7 @@ define void @interleave_deinterleave_factor3(ptr writeonly noalias %dst, ptr rea
; CHECK-NEXT: [[TMP17:%.*]] = shl <vscale x 4 x i32> [[TMP9]], [[TMP13]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw i8, <vscale x 4 x ptr> [[TMP10]], i64 8
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP17]], <vscale x 4 x ptr> [[TMP25]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
@@ -1466,8 +1449,6 @@ define void @interleave_deinterleave(ptr writeonly noalias %dst, ptr readonly %a
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1492,7 +1473,7 @@ define void @interleave_deinterleave(ptr writeonly noalias %dst, ptr readonly %a
; CHECK-NEXT: [[TMP24:%.*]] = ashr <vscale x 4 x i32> [[TMP12]], [[TMP19]]
; CHECK-NEXT: [[INTERLEAVED_VEC13:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x i32> [[TMP22]], <vscale x 4 x i32> [[TMP23]], <vscale x 4 x i32> [[TMP24]])
; CHECK-NEXT: store <vscale x 16 x i32> [[INTERLEAVED_VEC13]], ptr [[TMP21]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
index f152dd3..40ad5bb 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
@@ -30,41 +30,39 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no
; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALAR_TAIL_FOLDING: vector.ph:
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4
+; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALAR_TAIL_FOLDING: vector.body:
; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 1
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]]
-; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP10]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
+; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
+; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP7]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
; SCALAR_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]])
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]])
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = sext i32 [[TMP8]] to i64
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP14]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]]
-; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP16]])
-; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
-; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]])
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP11]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP10]]
+; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP13]])
+; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
+; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALAR_TAIL_FOLDING: middle.block:
; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -80,42 +78,42 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_TAIL_FOLDING: vector.body:
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 1
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP10]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
+; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP8]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = shl i32 [[INDEX]], 1
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
; PREDICATED_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = sext i32 [[TMP8]] to i64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP14]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP16]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sext i32 [[TMP10]] to i64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP18]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]])
+; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP17]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP19]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_TAIL_FOLDING: scalar.ph:
@@ -176,34 +174,32 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no
; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALAR_TAIL_FOLDING: vector.ph:
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4
+; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALAR_TAIL_FOLDING: vector.body:
; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64>
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP8]]
-; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP9]], i32 1, <vscale x 16 x i1> splat (i1 true))
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1)
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext nneg <vscale x 16 x i32> [[TMP11]] to <vscale x 16 x i64>
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP12]]
-; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP13]], i32 1, <vscale x 16 x i1> [[TMP10]])
-; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = zext nneg <vscale x 16 x i32> [[TMP4]] to <vscale x 16 x i64>
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP5]]
+; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP6]], i32 1, <vscale x 16 x i1> splat (i1 true))
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP4]], splat (i32 1)
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64>
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]]
+; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[TMP7]])
+; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALAR_TAIL_FOLDING: middle.block:
; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -219,35 +215,35 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_TAIL_FOLDING: vector.body:
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64>
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP7]]
-; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP8]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or disjoint <vscale x 16 x i32> [[TMP6]], splat (i32 1)
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext nneg <vscale x 16 x i32> [[TMP11]] to <vscale x 16 x i64>
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP12]]
-; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP13]], i32 1, <vscale x 16 x i1> [[TMP10]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64>
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]]
+; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP11]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 16 x i32> [[TMP8]], splat (i32 1)
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP13]] to <vscale x 16 x i64>
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]]
+; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP15]], i32 1, <vscale x 16 x i1> [[TMP12]])
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP14]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_TAIL_FOLDING: scalar.ph:
@@ -304,37 +300,35 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no
; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALAR_TAIL_FOLDING: vector.ph:
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4
+; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV3]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALAR_TAIL_FOLDING: vector.body:
; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64>
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]]
-; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[TMP8]])
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1)
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP12]] to <vscale x 16 x i64>
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP13]]
-; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP14]], i32 1, <vscale x 16 x i1> [[TMP11]])
-; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = zext nneg <vscale x 16 x i32> [[TMP4]] to <vscale x 16 x i64>
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP6]]
+; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP7]], i32 1, <vscale x 16 x i1> [[TMP5]])
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP4]], splat (i32 1)
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64>
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP10]]
+; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP11]], i32 1, <vscale x 16 x i1> [[TMP8]])
+; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT4]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALAR_TAIL_FOLDING: middle.block:
; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -351,39 +345,39 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV3]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_TAIL_FOLDING: vector.body:
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64>
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP9]]
-; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP10]], i32 1, <vscale x 16 x i1> [[TMP8]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP11]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 16 x i32> [[TMP6]], splat (i32 1)
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP13]] to <vscale x 16 x i64>
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]]
-; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP15]], i32 1, <vscale x 16 x i1> [[TMP12]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64>
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP11]]
+; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 1), <vscale x 16 x ptr> [[TMP12]], i32 1, <vscale x 16 x i1> [[TMP10]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP13]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 16 x i32> [[TMP8]], splat (i32 1)
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = zext nneg <vscale x 16 x i32> [[TMP15]] to <vscale x 16 x i64>
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP16]]
+; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x ptr> [[TMP17]], i32 1, <vscale x 16 x i1> [[TMP14]])
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT4]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_TAIL_FOLDING: scalar.ph:
@@ -452,45 +446,43 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p,
; SCALAR_TAIL_FOLDING-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i32 [[TMP0]], 64
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALAR_TAIL_FOLDING: vector.ph:
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP3]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4
+; SCALAR_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl nuw i32 [[TMP4]], 4
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0
; SCALAR_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALAR_TAIL_FOLDING: vector.body:
; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 2
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]]
-; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP10]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
+; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 2
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
+; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP7]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
; SCALAR_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]])
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]])
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]])
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP17]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = sext i32 [[TMP8]] to i64
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP19]]
-; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]], <vscale x 16 x i8> [[TMP18]])
-; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP20]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
-; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]])
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]])
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP14]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
+; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]])
+; SCALAR_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
+; SCALAR_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]]
; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; SCALAR_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; SCALAR_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALAR_TAIL_FOLDING: middle.block:
; SCALAR_TAIL_FOLDING-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -506,46 +498,46 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p,
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPA:%.*]] = sub i32 1024, [[TMP3]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMPB:%.*]] = icmp ult i32 [[TMP2]], 64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = select i1 [[TMPB]], i32 [[TMPA]], i32 0
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = sub i32 1024, [[TMP3]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 0
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_TAIL_FOLDING: vector.body:
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = shl i32 [[INDEX]], 2
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP10]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
+; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP8]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = shl i32 [[INDEX]], 2
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP12]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
; PREDICATED_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[TMP12]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]])
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP17]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = sext i32 [[TMP8]] to i64
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP19]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]], <vscale x 16 x i8> [[TMP18]])
-; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP7]])
-; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP20]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = sext i32 [[TMP10]] to i64
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP21]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP17]], <vscale x 16 x i8> [[TMP18]], <vscale x 16 x i8> [[TMP19]], <vscale x 16 x i8> [[TMP20]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> [[TMP9]])
+; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP22]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP1]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP4]])
+; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP21]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_TAIL_FOLDING: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll
index f0675a4..c8bbbdc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll
@@ -15,8 +15,6 @@ define void @inv_store_i16(ptr noalias %dst, ptr noalias readonly %src, i64 %N)
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -27,7 +25,7 @@ define void @inv_store_i16(ptr noalias %dst, ptr noalias readonly %src, i64 %N)
; CHECK-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], 1
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i16> [[WIDE_LOAD]], i32 [[TMP11]]
; CHECK-NEXT: store i16 [[TMP12]], ptr [[DST:%.*]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -64,8 +62,6 @@ define void @cond_inv_store_i32(ptr noalias %dst, ptr noalias readonly %src, i64
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -75,7 +71,7 @@ define void @cond_inv_store_i32(ptr noalias %dst, ptr noalias readonly %src, i64
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[TMP9]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
index 2b4aad1..76f33cf 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
@@ -19,8 +19,6 @@ define ptr @test(ptr %start.1, ptr %start.2, ptr %end) {
; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP7]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 8
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 8
@@ -35,7 +33,7 @@ define ptr @test(ptr %start.1, ptr %start.2, ptr %end) {
; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP30]], i64 [[TMP34]]
; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP30]], align 8
; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP35]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll
index ce7b78e..49f9870 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll
@@ -1,81 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
; RUN: opt -passes=loop-vectorize -S < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
define void @trip7_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
-; CHECK-LABEL: @trip7_i64(
-; CHECK: = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: = mul nuw i64
-; CHECK: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[VF:%.*]] = mul nuw i64 [[VSCALE]], 2
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
-; CHECK: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ {{%.*}}, %vector.ph ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %vector.body ]
-; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
-; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
-; CHECK: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> {{%.*}}, ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[VF]]
+; CHECK-LABEL: define void @trip7_i64(
+; CHECK-SAME: ptr noalias noundef captures(none) [[DST:%.*]], ptr noalias noundef readonly captures(none) [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 7)
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
+; CHECK-NEXT: [[TMP6:%.*]] = shl nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], splat (i64 1)
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
+; CHECK-NEXT: [[TMP8:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD1]], [[TMP6]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 7)
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NOT:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-NEXT: [[COND:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NOT]], i32 0
-; CHECK-NEXT: br i1 [[COND]], label %middle.block, label %vector.body
+; CHECK-NEXT: [[TMP9:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x i1> [[TMP9]], i32 0
+; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br [[EXIT:label %.*]]
+; CHECK: [[SCALAR_PH]]:
;
entry:
- br label %for.body
+ br label %loop
-for.body: ; preds = %entry, %for.body
- %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i64, ptr %src, i64 %i.06
- %0 = load i64, ptr %arrayidx, align 8
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds i64, ptr %src, i64 %iv
+ %0 = load i64, ptr %gep.src, align 8
%mul = shl nsw i64 %0, 1
- %arrayidx1 = getelementptr inbounds i64, ptr %dst, i64 %i.06
- %1 = load i64, ptr %arrayidx1, align 8
+ %gep.dst = getelementptr inbounds i64, ptr %dst, i64 %iv
+ %1 = load i64, ptr %gep.dst, align 8
%add = add nsw i64 %1, %mul
- store i64 %add, ptr %arrayidx1, align 8
- %inc = add nuw nsw i64 %i.06, 1
- %exitcond.not = icmp eq i64 %inc, 7
- br i1 %exitcond.not, label %for.end, label %for.body
+ store i64 %add, ptr %gep.dst, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 7
+ br i1 %ec, label %exit, label %loop
-for.end: ; preds = %for.body
+exit:
ret void
}
define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
-; CHECK-LABEL: @trip5_i8(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-LABEL: define void @trip5_i8(
+; CHECK-SAME: ptr noalias noundef captures(none) [[DST:%.*]], ptr noalias noundef readonly captures(none) [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[GEP_SRC]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP0]], 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP1]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
-; CHECK: for.end:
+; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 5
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
- br label %for.body
+ br label %loop
-for.body: ; preds = %entry, %for.body
- %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
- %0 = load i8, ptr %arrayidx, align 1
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds i8, ptr %src, i64 %iv
+ %0 = load i8, ptr %gep.src, align 1
%mul = shl i8 %0, 1
- %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
- %1 = load i8, ptr %arrayidx1, align 1
+ %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv
+ %1 = load i8, ptr %gep.dst, align 1
%add = add i8 %mul, %1
- store i8 %add, ptr %arrayidx1, align 1
- %inc = add nuw nsw i64 %i.08, 1
- %exitcond.not = icmp eq i64 %inc, 5
- br i1 %exitcond.not, label %for.end, label %for.body
+ store i8 %add, ptr %gep.dst, align 1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 5
+ br i1 %ec, label %exit, label %loop
-for.end: ; preds = %for.body
+exit:
ret void
}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll
index 993c048..26a1649 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-multiexit.ll
@@ -33,8 +33,6 @@ define void @multiple_exits_unique_exit_block(ptr %A, ptr %B, i32 %N) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP11]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -50,7 +48,7 @@ define void @multiple_exits_unique_exit_block(ptr %A, ptr %B, i32 %N) #0 {
; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP25]], i64 [[TMP29]]
; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP25]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD3]], ptr [[TMP30]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -105,8 +103,6 @@ define i32 @multiple_exits_multiple_exit_blocks(ptr %A, ptr %B, i32 %N) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP11]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -122,7 +118,7 @@ define i32 @multiple_exits_multiple_exit_blocks(ptr %A, ptr %B, i32 %N) #0 {
; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP25]], i64 [[TMP29]]
; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP25]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD3]], ptr [[TMP30]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll
index 893ebef..b8f44f6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll
@@ -46,8 +46,6 @@ define void @min_trip_count_due_to_runtime_checks_1(ptr %dst.1, ptr %dst.2, ptr
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], [[TMP16]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP49:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP50:%.*]] = mul nuw i64 [[TMP49]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -77,7 +75,7 @@ define void @min_trip_count_due_to_runtime_checks_1(ptr %dst.1, ptr %dst.2, ptr
; CHECK-NEXT: [[TMP48:%.*]] = getelementptr i64, ptr [[TMP45]], i64 [[TMP47]]
; CHECK-NEXT: store <vscale x 2 x i64> [[TMP35]], ptr [[TMP45]], align 8
; CHECK-NEXT: store <vscale x 2 x i64> [[TMP36]], ptr [[TMP48]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP50]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
; CHECK-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
index 1cda568..10fe67d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
@@ -46,12 +46,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll
index fb0447b..ed2c5cd 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll
@@ -10,12 +10,6 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -28,7 +22,7 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP11]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD1]], [[TMP10]]
; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP13]], ptr [[TMP11]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; CHECK-NEXT: [[TMP14:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 2 x i1> [[TMP14]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index d6f8b8e..6b1b04a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -12,12 +12,6 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -65,12 +59,6 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-IN-LOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-IN-LOOP: vector.ph:
-; CHECK-IN-LOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-IN-LOOP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-IN-LOOP-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-IN-LOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-IN-LOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-IN-LOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-IN-LOOP-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 4
; CHECK-IN-LOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -136,12 +124,6 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -188,12 +170,6 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-IN-LOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-IN-LOOP: vector.ph:
-; CHECK-IN-LOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-IN-LOOP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-IN-LOOP-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-IN-LOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-IN-LOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-IN-LOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-IN-LOOP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-IN-LOOP-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
; CHECK-IN-LOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -257,17 +233,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
@@ -322,17 +292,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: entry:
; CHECK-IN-LOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-IN-LOOP: vector.ph:
-; CHECK-IN-LOOP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-IN-LOOP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-IN-LOOP-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-IN-LOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP4]]
-; CHECK-IN-LOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-IN-LOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-IN-LOOP-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-IN-LOOP-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4
; CHECK-IN-LOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-IN-LOOP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-IN-LOOP-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
+; CHECK-IN-LOOP-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]]
; CHECK-IN-LOOP-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-IN-LOOP-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
index 4ec7d4d..01b864b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
@@ -10,12 +10,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP61:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP62:%.*]] = mul nuw i64 [[TMP61]], 16
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
@@ -101,12 +95,6 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP83:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP84:%.*]] = mul nuw i64 [[TMP83]], 16
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
index 672523e..e996535 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
@@ -12,12 +12,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
@@ -32,7 +26,7 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0
@@ -63,9 +57,6 @@ define void @simple_memset_v4i32(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], 3
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[UMAX]], 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[UMAX]], 4
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 [[TMP0]], i64 0
@@ -111,12 +102,6 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
@@ -131,7 +116,7 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[DST:%.*]], i64 [[INDEX1]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], ptr [[TMP13]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0
@@ -169,12 +154,6 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP5]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[TMP2]], [[TMP9]]
@@ -184,7 +163,7 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 4 x i64> [[TMP13]], splat (i64 4)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP15]]
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 4, [[TMP7]]
+; CHECK-NEXT: [[TMP18:%.*]] = mul i64 4, [[TMP4]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP18]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -196,7 +175,7 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP12]])
; CHECK-NEXT: [[TMP21:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
@@ -232,12 +211,6 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
@@ -254,7 +227,7 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[DST:%.*]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0
@@ -292,15 +265,9 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) #
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
@@ -313,7 +280,7 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) #
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP14:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[TMP14]], i32 0
@@ -351,15 +318,9 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
@@ -377,7 +338,7 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX1]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP18:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <vscale x 4 x i1> [[TMP18]], i32 0
@@ -423,15 +384,9 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n)
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N:%.*]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
@@ -444,7 +399,7 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[TMP13]], i32 0
@@ -479,12 +434,6 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
@@ -501,7 +450,7 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
; CHECK-NEXT: [[TMP15:%.*]] = fdiv <vscale x 4 x float> [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD2]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP15]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[TMP16]], i32 0
@@ -539,12 +488,6 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
@@ -562,7 +505,7 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD2]], <vscale x 4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP16:%.*]] = udiv <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], [[TMP15]]
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP16]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP17:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <vscale x 4 x i1> [[TMP17]], i32 0
@@ -601,8 +544,6 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -610,7 +551,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; CHECK-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 4
-; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
index 33fa360..9989209 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
@@ -19,13 +19,11 @@ define void @vector_reverse_f64(i64 %N, ptr noalias %a, ptr noalias %b) #0{
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 3
; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP4]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -98,13 +96,11 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP7]], 3
; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP7]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
index 352f4fe..db941a3 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
@@ -15,8 +15,6 @@ define void @vscale_mul_4(ptr noalias noundef readonly captures(none) %a, ptr no
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 4
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[B]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
@@ -70,8 +68,6 @@ define void @vscale_mul_8(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP10]]
@@ -141,8 +137,6 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -152,7 +146,7 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -211,8 +205,6 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -235,7 +227,7 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP20]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP12]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -294,8 +286,6 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -318,7 +308,7 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP20]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP12]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -379,8 +369,6 @@ define void @trip_count_with_overflow(ptr noalias noundef readonly captures(none
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -403,7 +391,7 @@ define void @trip_count_with_overflow(ptr noalias noundef readonly captures(none
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -460,8 +448,6 @@ define void @trip_count_too_big_for_element_count(ptr noalias noundef readonly c
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -484,7 +470,7 @@ define void @trip_count_too_big_for_element_count(ptr noalias noundef readonly c
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4
; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
index e214e82..b007db9 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
@@ -16,8 +16,6 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1000, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1000, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2
; CHECK-NEXT: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT0]], i64 0
; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -30,7 +28,7 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
; CHECK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
index 5c6328e..1012c10 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
@@ -26,8 +26,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 8
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP4]]
; CHECK-NEXT: [[IND_END2:%.*]] = getelementptr i8, ptr [[START_2:%.*]], i64 [[N_VEC]]
@@ -35,19 +33,19 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1)
-; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP15]]
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1)
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP8]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[VECTOR_GEP]], i64 1
-; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP16]], ptr [[NEXT_GEP]], align 8
-; CHECK-NEXT: [[TMP18:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP18]], align 1
-; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: store <vscale x 2 x i8> [[TMP20]], ptr [[TMP18]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[VECTOR_GEP]], i64 1
+; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP9]], ptr [[NEXT_GEP]], align 8
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP10]], align 1
+; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
+; CHECK-NEXT: store <vscale x 2 x i8> [[TMP12]], ptr [[TMP10]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP3]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -112,8 +110,6 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -126,8 +122,8 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP15]], align 1
; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
; CHECK-NEXT: store <vscale x 2 x i8> [[TMP17]], ptr [[TMP15]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP6]]
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP4]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP4]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 2c88e0e..11eef23 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -22,11 +22,10 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 3
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[TMP26:%.*]] = shl i64 [[N_VEC]], 3
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[TMP26]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -134,11 +133,10 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 3
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 2
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N_VEC]], 2
@@ -225,11 +223,10 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[SMAX]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -2
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], [[DOTNEG]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 1
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], [[DOTNOT]]
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 2
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N_VEC]], 3
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll
index 5848d31..c54511e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll
@@ -22,8 +22,6 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -32,7 +30,7 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: [[TMP14:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD1]] to <vscale x 4 x i64>
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP14]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP15]], i32 1, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -49,8 +47,6 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: [[TMP25:%.*]] = shl nuw i64 [[TMP24]], 1
; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], [[TMP25]]
; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[N]], [[N_MOD_VF2]]
-; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP17:%.*]] = shl nuw i64 [[TMP16]], 1
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[FOR_BODY]] ]
@@ -59,7 +55,7 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD5]] to <vscale x 2 x i64>
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 2 x i64> [[TMP19]]
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i32(<vscale x 2 x ptr> [[TMP20]], i32 1, <vscale x 2 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP17]]
+; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], [[TMP25]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]]
; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: vec.epilog.middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll
index abee8b9..baf050c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-too-many-deps.ll
@@ -70,10 +70,8 @@ define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %othe
; NORMAL_DEP_LIMIT-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP4]], 2
; NORMAL_DEP_LIMIT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NORMAL_DEP_LIMIT-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NORMAL_DEP_LIMIT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; NORMAL_DEP_LIMIT-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2
; NORMAL_DEP_LIMIT-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; NORMAL_DEP_LIMIT-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP6]] to i32
+; NORMAL_DEP_LIMIT-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32
; NORMAL_DEP_LIMIT-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP9]], i64 0
; NORMAL_DEP_LIMIT-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NORMAL_DEP_LIMIT-NEXT: br label [[FOR_BODY:%.*]]
@@ -91,7 +89,7 @@ define void @many_deps(ptr noalias %buckets, ptr %array, ptr %indices, ptr %othe
; NORMAL_DEP_LIMIT-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
; NORMAL_DEP_LIMIT-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD10]], [[VEC_IND]]
; NORMAL_DEP_LIMIT-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP14]], align 4, !alias.scope [[META7]], !noalias [[META0]]
-; NORMAL_DEP_LIMIT-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
+; NORMAL_DEP_LIMIT-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NORMAL_DEP_LIMIT-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NORMAL_DEP_LIMIT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; NORMAL_DEP_LIMIT-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index 9257e45..3b19e9ee 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -36,11 +36,10 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -86,11 +85,10 @@ define void @simple_histogram_inc_param(ptr noalias %buckets, ptr readonly %indi
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -136,11 +134,10 @@ define void @simple_histogram_sub(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -186,11 +183,10 @@ define void @conditional_histogram(ptr noalias %buckets, ptr readonly %indices,
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP3]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
@@ -248,11 +244,10 @@ define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N)
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP9]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
@@ -385,11 +380,10 @@ define void @simple_histogram_user_interleave(ptr noalias %buckets, ptr readonly
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 3
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -446,11 +440,10 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 {
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -500,11 +493,10 @@ define void @histogram_array_4op_gep_nonzero_const_idx(i64 noundef %N, ptr reado
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
@@ -611,11 +603,10 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr %
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP5]], [[TMP4]]
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP6]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP8]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; CHECK-NEXT: [[TMP11:%.*]] = trunc nuw nsw i64 [[TMP8]] to i32
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
@@ -714,11 +705,10 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -2
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 1
+; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
index 124abc6..cdd41a0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
@@ -22,8 +22,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; NONE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], [[TMP3]]
; NONE-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
-; NONE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NONE-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NONE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0
; NONE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NONE-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -31,7 +29,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; NONE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
; NONE-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; NONE-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 4
-; NONE-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP8]]
+; NONE-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]]
; NONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
; NONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NONE: middle.block:
@@ -61,8 +59,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]]
; DATA-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; DATA-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; DATA-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; DATA-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0
; DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; DATA-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -71,7 +67,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[UMAX]])
; DATA-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; DATA-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; DATA-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP13]]
+; DATA-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP5]]
; DATA-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
; DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DATA: middle.block:
@@ -100,8 +96,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_NO_LANEMASK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]]
; DATA_NO_LANEMASK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; DATA_NO_LANEMASK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; DATA_NO_LANEMASK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; DATA_NO_LANEMASK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
; DATA_NO_LANEMASK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[UMAX]], 1
; DATA_NO_LANEMASK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; DATA_NO_LANEMASK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -118,7 +112,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_NO_LANEMASK-NEXT: [[TMP12:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]
; DATA_NO_LANEMASK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; DATA_NO_LANEMASK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT5]], ptr [[TMP13]], i32 4, <vscale x 4 x i1> [[TMP12]])
-; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT6]] = add i64 [[INDEX1]], [[TMP16]]
+; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT6]] = add i64 [[INDEX1]], [[TMP5]]
; DATA_NO_LANEMASK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]]
; DATA_NO_LANEMASK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DATA_NO_LANEMASK: middle.block:
@@ -143,12 +137,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL: vector.ph:
; DATA_AND_CONTROL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; DATA_AND_CONTROL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; DATA_AND_CONTROL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1
-; DATA_AND_CONTROL-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]]
-; DATA_AND_CONTROL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; DATA_AND_CONTROL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; DATA_AND_CONTROL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; DATA_AND_CONTROL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; DATA_AND_CONTROL-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[UMAX]])
; DATA_AND_CONTROL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0
; DATA_AND_CONTROL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -158,7 +146,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; DATA_AND_CONTROL-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; DATA_AND_CONTROL-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP13]]
+; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP5]]
; DATA_AND_CONTROL-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT2]], i64 [[UMAX]])
; DATA_AND_CONTROL-NEXT: [[TMP14:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; DATA_AND_CONTROL-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[TMP14]], i32 0
@@ -185,12 +173,6 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL_NO_RT_CHECK: vector.ph:
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP4]]
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
@@ -205,7 +187,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX1]]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP14]]
+; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
index bba9293..4ed9580 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
@@ -73,7 +73,6 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: }
; CHECK: VPlan 'Final VPlan for VF={8,16},UF={1}' {
; CHECK-NEXT: Live-in ir<[[EP_VFxUF:.+]]> = VF * UF
-; CHECK-NEXT: Live-in ir<[[EP_VEC_TC:.+]]> = vector-trip-count
; CHECK-NEXT: Live-in ir<1024> = original trip-count
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<entry>:
@@ -83,7 +82,9 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph>
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<vector.ph>:
-; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<4>
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<1024>, ir<16>
+; CHECK-NEXT: EMIT vp<[[VEC_TC:%.+]]> = sub ir<1024>, vp<%n.mod.vf>
+; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<4>
; CHECK-NEXT: Successor(s): vector.body
; CHECK-EMPTY:
; CHECK-NEXT: vector.body:
@@ -98,12 +99,12 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul>
; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16>
-; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, ir<1024>
+; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, vp<[[VEC_TC]]>
; CHECK-NEXT: Successor(s): middle.block, vector.body
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, ir<%add>
-; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, ir<1024>
+; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, vp<[[VEC_TC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
; CHECK-EMPTY:
@@ -112,7 +113,7 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: No successors
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<scalar.ph>:
-; CHECK-NEXT: EMIT-SCALAR vp<[[EP_RESUME:%.+]]> = phi [ ir<1024>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<[[EP_RESUME:%.+]]> = phi [ vp<[[VEC_TC]]>, middle.block ], [ ir<0>, ir-bb<entry> ]
; CHECK-NEXT: EMIT-SCALAR vp<[[EP_MERGE:%.+]]> = phi [ vp<[[RED_RESULT]]>, middle.block ], [ ir<0>, ir-bb<entry> ]
; CHECK-NEXT: Successor(s): ir-bb<for.body>
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
index c1d4317..9edd6ce 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll
@@ -13,8 +13,6 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #1 {
; WIDE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; WIDE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; WIDE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; WIDE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; WIDE-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; WIDE-NEXT: br label [[VECTOR_BODY:%.*]]
; WIDE: vector.body:
; WIDE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -24,7 +22,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #1 {
; WIDE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x float> @foo_vector(<vscale x 4 x float> [[TMP5]], <vscale x 4 x i1> splat (i1 true))
; WIDE-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; WIDE-NEXT: store <vscale x 4 x float> [[TMP6]], ptr [[TMP7]], align 4
-; WIDE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; WIDE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; WIDE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; WIDE-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; WIDE: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
index f4102ff..fe3504b 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
@@ -239,8 +239,8 @@ for.body: ; preds = %entry, %for.body
define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 {
%2 = alloca [101 x i32], align 4
%3 = alloca [21 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 404, ptr nonnull %2)
- call void @llvm.lifetime.start.p0(i64 84, ptr nonnull %3)
+ call void @llvm.lifetime.start.p0(ptr nonnull %2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %3)
%4 = icmp sgt i32 %0, 0
br i1 %4, label %5, label %159
@@ -433,8 +433,8 @@ define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 {
br label %159
159: ; preds = %158, %1
- call void @llvm.lifetime.end.p0(i64 84, ptr nonnull %3)
- call void @llvm.lifetime.end.p0(i64 404, ptr nonnull %2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %2)
ret i32 0
}
@@ -472,7 +472,7 @@ while.end: ; preds = %while.end.loopexit,
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
attributes #0 = { "target-features"="+mve.fp" }
diff --git a/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll b/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll
index 1ac556a..26bab4d 100644
--- a/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll
+++ b/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll
@@ -11,14 +11,14 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i
@g0 = external dso_local local_unnamed_addr global ptr, align 4
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
; Function Attrs: nounwind
define hidden fastcc void @f0(ptr nocapture %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i8 zeroext %a5) unnamed_addr #1 {
b0:
%v0 = alloca [4 x [9 x i16]], align 8
- call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %v0) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %v0) #2
%v2 = add i32 %a1, -2
%v3 = add i32 %a3, -9
%v4 = icmp ugt i32 %v2, %v3
@@ -147,7 +147,7 @@ b1: ; preds = %b1, %b0
br i1 %v120, label %b2, label %b1
b2: ; preds = %b1, %b0
- call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %v0) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %v0) #2
ret void
}
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll
index 1bacb57..6b72f20 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll
@@ -10,11 +10,11 @@ define zeroext i32 @test() #0 {
entry:
%a = alloca [1600 x i32], align 4
%c = alloca [1600 x i32], align 4
- call void @llvm.lifetime.start(i64 6400, ptr %a) #3
+ call void @llvm.lifetime.start(ptr %a) #3
br label %for.body
for.cond.cleanup: ; preds = %for.body
- call void @llvm.lifetime.start(i64 6400, ptr %c) #3
+ call void @llvm.lifetime.start(ptr %c) #3
%call = call signext i32 @bar(ptr %a, ptr %c) #3
br label %for.body6
@@ -28,8 +28,8 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond27, label %for.cond.cleanup, label %for.body
for.cond.cleanup5: ; preds = %for.body6
- call void @llvm.lifetime.end(i64 6400, ptr nonnull %c) #3
- call void @llvm.lifetime.end(i64 6400, ptr %a) #3
+ call void @llvm.lifetime.end(ptr nonnull %c) #3
+ call void @llvm.lifetime.end(ptr %a) #3
ret i32 %add
for.body6: ; preds = %for.body6, %for.cond.cleanup
@@ -44,10 +44,10 @@ for.body6: ; preds = %for.body6, %for.con
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start(ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end(ptr nocapture) #1
declare signext i32 @bar(ptr, ptr) #2
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
index 162440a..f3e0a5a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFBFMIN
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -S | FileCheck %s -check-prefix=ZVFBFMIN
@@ -25,37 +26,33 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFBFMIN-LABEL: define void @fadd(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
-; ZVFBFMIN-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFBFMIN-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; ZVFBFMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP8]]
-; ZVFBFMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ZVFBFMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; ZVFBFMIN: [[VECTOR_PH]]:
-; ZVFBFMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFBFMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
-; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; ZVFBFMIN-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; ZVFBFMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 8
; ZVFBFMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFBFMIN: [[VECTOR_BODY]]:
-; ZVFBFMIN-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFBFMIN-NEXT: [[TMP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[TMP0]]
; ZVFBFMIN-NEXT: [[TMP2:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[TMP0]]
-; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP1]], align 2
-; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP2]], align 2
+; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; ZVFBFMIN-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x bfloat> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; ZVFBFMIN-NEXT: store <vscale x 8 x bfloat> [[TMP11]], ptr [[TMP1]], align 2
-; ZVFBFMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], [[TMP5]]
-; ZVFBFMIN-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; ZVFBFMIN-NEXT: call void @llvm.vp.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[TMP11]], ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFBFMIN-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
+; ZVFBFMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[TMP0]]
+; ZVFBFMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; ZVFBFMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; ZVFBFMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFBFMIN: [[MIDDLE_BLOCK]]:
-; ZVFBFMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFBFMIN-NEXT: br label %[[EXIT:.*]]
; ZVFBFMIN: [[SCALAR_PH]]:
-; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; ZVFBFMIN-NEXT: br label %[[LOOP:.*]]
; ZVFBFMIN: [[LOOP]]:
-; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
@@ -64,7 +61,7 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFBFMIN-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2
; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVFBFMIN: [[EXIT]]:
; ZVFBFMIN-NEXT: ret void
;
@@ -137,41 +134,37 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
-; ZVFBFMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFBFMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; ZVFBFMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; ZVFBFMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ZVFBFMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; ZVFBFMIN: [[VECTOR_PH]]:
-; ZVFBFMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFBFMIN-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; ZVFBFMIN-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; ZVFBFMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; ZVFBFMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFBFMIN: [[VECTOR_BODY]]:
-; ZVFBFMIN-NEXT: [[TMP6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; ZVFBFMIN-NEXT: [[TMP7:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[TMP6]]
; ZVFBFMIN-NEXT: [[TMP8:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[TMP6]]
; ZVFBFMIN-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[C]], i64 [[TMP6]]
-; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x bfloat>, ptr [[TMP7]], align 2
-; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x bfloat>, ptr [[TMP8]], align 2
-; ZVFBFMIN-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
+; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr align 2 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr align 2 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; ZVFBFMIN-NEXT: [[WIDE_LOAD2:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; ZVFBFMIN-NEXT: [[TMP13:%.*]] = fpext <vscale x 4 x bfloat> [[WIDE_LOAD]] to <vscale x 4 x float>
; ZVFBFMIN-NEXT: [[TMP14:%.*]] = fpext <vscale x 4 x bfloat> [[WIDE_LOAD1]] to <vscale x 4 x float>
; ZVFBFMIN-NEXT: [[TMP15:%.*]] = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[TMP13]], <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[WIDE_LOAD2]])
-; ZVFBFMIN-NEXT: store <vscale x 4 x float> [[TMP15]], ptr [[TMP9]], align 4
-; ZVFBFMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP6]], [[TMP5]]
-; ZVFBFMIN-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; ZVFBFMIN-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP15]], ptr align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; ZVFBFMIN-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+; ZVFBFMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[TMP6]]
+; ZVFBFMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; ZVFBFMIN-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; ZVFBFMIN-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; ZVFBFMIN: [[MIDDLE_BLOCK]]:
-; ZVFBFMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFBFMIN-NEXT: br label %[[EXIT:.*]]
; ZVFBFMIN: [[SCALAR_PH]]:
-; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; ZVFBFMIN-NEXT: br label %[[LOOP:.*]]
; ZVFBFMIN: [[LOOP]]:
-; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
; ZVFBFMIN-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]]
@@ -184,7 +177,7 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; ZVFBFMIN-NEXT: store float [[FMULADD]], ptr [[C_GEP]], align 4
; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; ZVFBFMIN: [[EXIT]]:
; ZVFBFMIN-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
index 75ae6df..9f7ac7a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
@@ -62,50 +62,10 @@ define i32 @any_of_reduction_used_in_blend_with_multiple_phis(ptr %src, i64 %N,
; CHECK-LABEL: define i32 @any_of_reduction_used_in_blend_with_multiple_phis(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i1 [[C_0:%.*]], i1 [[C_1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_0]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true)
-; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i1> zeroinitializer
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[SRC]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x ptr> @llvm.masked.gather.nxv2p0.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x ptr> poison)
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x ptr> [[WIDE_MASKED_GATHER]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 2 x i1> [[VEC_PHI]], [[TMP9]]
-; CHECK-NEXT: [[PREDPHI]] = select <vscale x 2 x i1> [[TMP8]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv2i1(<vscale x 2 x i1> [[PREDPHI]])
-; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 0, i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
-; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[ANY_OF_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ANY_OF_RED_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[ANY_OF_RED:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ANY_OF_RED_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_0]], label %[[X_1:.*]], label %[[ELSE_1:.*]]
; CHECK: [[ELSE_1]]:
; CHECK-NEXT: br i1 [[C_1]], label %[[X_1]], label %[[ELSE_2:.*]]
@@ -121,9 +81,9 @@ define i32 @any_of_reduction_used_in_blend_with_multiple_phis(ptr %src, i64 %N,
; CHECK-NEXT: [[ANY_OF_RED_NEXT]] = phi i32 [ [[P]], %[[X_1]] ], [ [[SEL]], %[[ELSE_2]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ANY_OF_RED_NEXT]], %[[LOOP_LATCH]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ANY_OF_RED_NEXT]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: ret i32 [[RES]]
;
entry:
@@ -159,9 +119,3 @@ exit:
}
attributes #0 = { "target-cpu"="sifive-p670" }
-;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index aad9128..606b863 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -11,45 +11,40 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP17]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -62,7 +57,7 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -96,45 +91,40 @@ define void @block_with_dead_inst_2(ptr %src) #0 {
; CHECK-LABEL: define void @block_with_dead_inst_2(
; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -147,7 +137,7 @@ define void @block_with_dead_inst_2(ptr %src) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -181,45 +171,40 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 {
; CHECK-LABEL: define void @multiple_blocks_with_dead_insts_3(
; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -235,7 +220,7 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -276,45 +261,40 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP17]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -332,7 +312,7 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -373,45 +353,40 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 {
; CHECK-LABEL: define void @multiple_blocks_with_dead_inst_multiple_successors_5(
; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -431,7 +406,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -478,45 +453,56 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[IC]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = xor <vscale x 8 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP27]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP12]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 8 x i32> [[TMP14]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]])
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_MASKED_GATHER]], zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> [[TMP8]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = xor <vscale x 8 x i1> [[TMP17]], splat (i1 true)
+; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]]
+; CHECK-NEXT: [[TMP23:%.*]] = select <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> [[BROADCAST_SPLAT]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = or <vscale x 8 x i1> [[TMP22]], [[TMP23]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> [[TMP24]], i32 [[TMP27]])
+; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -536,7 +522,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -580,38 +566,39 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 {
; CHECK-LABEL: define void @empty_block_with_phi_1(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 8 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP10]], align 2
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> splat (i16 99), <vscale x 8 x i16> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP10]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[TMP5]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <vscale x 8 x i16> [[VP_OP_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i16> [[VP_OP_LOAD]], <vscale x 8 x i16> splat (i16 99)
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> [[PREDPHI]], ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[TMP9]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i32 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -624,7 +611,7 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -658,38 +645,39 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 {
; CHECK-LABEL: define void @empty_block_with_phi_2(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 8 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP10]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> [[WIDE_LOAD]], <vscale x 8 x i16> splat (i16 99)
-; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP10]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i16> [[WIDE_LOAD]], <vscale x 8 x i16> splat (i16 99)
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> [[PREDPHI]], ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[TMP9]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i32 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -702,7 +690,7 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -743,11 +731,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[UMIN7]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umax.i64(i64 40, i64 [[TMP5]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP6]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[N_EXT]], i64 1)
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N_EXT]], [[UMIN]]
@@ -770,34 +754,33 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT6]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP15]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 2
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 2 x i64> [[TMP18]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP20]]
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP23]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP16]], 4
+; CHECK-NEXT: [[TMP24:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK-NEXT: [[TMP25:%.*]] = mul <vscale x 4 x i64> [[TMP24]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP25]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> [[TMP24]], i32 4, <vscale x 2 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META21:![0-9]+]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP3]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP18]] to i64
+; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP23]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]]), !alias.scope [[META19:![0-9]+]], !noalias [[META22:![0-9]+]]
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP18]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP3]]
+; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
@@ -813,7 +796,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N_EXT]]
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -845,30 +828,31 @@ exit:
attributes #0 = { "target-features"="+64bit,+v" }
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
-; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
-; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
-; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
-; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
-; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]}
-; CHECK: [[META18]] = !{[[META19:![0-9]+]]}
-; CHECK: [[META19]] = distinct !{[[META19]], [[META20:![0-9]+]]}
-; CHECK: [[META20]] = distinct !{[[META20]], !"LVerDomain"}
-; CHECK: [[META21]] = !{[[META22:![0-9]+]], [[META23:![0-9]+]]}
-; CHECK: [[META22]] = distinct !{[[META22]], [[META20]]}
-; CHECK: [[META23]] = distinct !{[[META23]], [[META20]]}
-; CHECK: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]}
-; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META3]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META3]], [[META1]]}
+; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META3]], [[META1]]}
+; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META3]], [[META1]]}
+; CHECK: [[META19]] = !{[[META20:![0-9]+]]}
+; CHECK: [[META20]] = distinct !{[[META20]], [[META21:![0-9]+]]}
+; CHECK: [[META21]] = distinct !{[[META21]], !"LVerDomain"}
+; CHECK: [[META22]] = !{[[META23:![0-9]+]], [[META24:![0-9]+]]}
+; CHECK: [[META23]] = distinct !{[[META23]], [[META21]]}
+; CHECK: [[META24]] = distinct !{[[META24]], [[META21]]}
+; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP26]] = distinct !{[[LOOP26]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index ab8875b..fcfd02b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -18,52 +18,46 @@ define void @dead_load(ptr %p, i16 %start) {
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP9]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 8
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[START_EXT]], [[TMP18]]
; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[START_EXT]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 8 x i64> [[TMP15]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP17]]
-; CHECK-NEXT: [[TMP20:%.*]] = mul i64 3, [[TMP14]]
-; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP20]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT: [[TMP20:%.*]] = mul i64 3, [[TMP19]]
+; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP20]], i64 0
+; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[P]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP21]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP21]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP16]])
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT2]]
-; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[START_EXT]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[START_EXT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV]], 111
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -94,7 +88,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 8, i32 [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 6, i32 [[TMP1]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 252, [[TMP2]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -111,13 +105,11 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 [[N_MOD_VF]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 252, [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 4
; CHECK-NEXT: [[IND_END:%.*]] = mul i32 [[N_VEC]], 4
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i32> [[TMP9]], splat (i32 4)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP11]]
-; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP4]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -126,11 +118,11 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i32> [[VEC_IND]] to <vscale x 4 x i64>
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], <vscale x 4 x i64> [[TMP15]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[TMP16]], i32 1, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[TMP16]], i32 1, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META5:![0-9]+]], !noalias [[META8:![0-9]+]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -145,7 +137,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV]], 1001
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ]
; CHECK-NEXT: ret i8 [[R]]
@@ -181,7 +173,7 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 770, [[UMAX3]]
; CHECK-NEXT: [[SMAX4:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0)
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[SMAX4]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 24
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 19
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1
@@ -210,48 +202,48 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]]
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
; CHECK: [[PRED_STORE_IF]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11:![0-9]+]], !noalias [[META14:![0-9]+]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12:![0-9]+]], !noalias [[META15:![0-9]+]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]]
; CHECK: [[PRED_STORE_CONTINUE]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
; CHECK: [[PRED_STORE_IF5]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE6]]
; CHECK: [[PRED_STORE_CONTINUE6]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
; CHECK: [[PRED_STORE_IF7]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE8]]
; CHECK: [[PRED_STORE_CONTINUE8]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
; CHECK: [[PRED_STORE_IF9]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE10]]
; CHECK: [[PRED_STORE_CONTINUE10]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
; CHECK: [[PRED_STORE_IF11]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE12]]
; CHECK: [[PRED_STORE_CONTINUE12]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
; CHECK: [[PRED_STORE_IF13]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE14]]
; CHECK: [[PRED_STORE_CONTINUE14]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]]
; CHECK: [[PRED_STORE_IF15]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE16]]
; CHECK: [[PRED_STORE_CONTINUE16]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18]]
; CHECK: [[PRED_STORE_IF17]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE18]]
; CHECK: [[PRED_STORE_CONTINUE18]]:
-; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> zeroinitializer, ptr [[TMP11]], i32 4, <8 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META14]]
+; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> zeroinitializer, ptr [[TMP11]], i32 4, <8 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META15]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -273,7 +265,7 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0
; CHECK-NEXT: [[EC:%.*]] = icmp slt i32 [[IV]], [[SUB]]
; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_LATCH]], label %[[EXIT:.*]]
; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br label %[[LOOP_HEADER]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: br label %[[RETURN:.*]]
; CHECK: [[RETURN]]:
@@ -315,45 +307,49 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-LABEL: define void @test_phi_in_latch_redundant(
; CHECK-SAME: ptr [[DST:%.*]], i32 [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 37, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 37, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 37, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[A]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 9
-; CHECK-NEXT: [[TMP10:%.*]] = xor <vscale x 2 x i32> [[BROADCAST_SPLAT]], splat (i32 -1)
-; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 9)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
-; CHECK-NEXT: [[TMP9:%.*]] = mul i64 9, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = xor <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 -1)
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 9)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> [[TMP10]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 37, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP8]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 9, [[TMP5]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP11]], [[BROADCAST_SPLAT4]]
+; CHECK-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = or <vscale x 4 x i1> [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> [[TMP19]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[PREDPHI]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> [[TMP15]], i32 [[TMP8]])
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP17]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 37
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 37, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: br i1 false, label %[[LOOP_LATCH]], label %[[THEN:.*]]
; CHECK: [[THEN]]:
; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 [[A]], -1
@@ -364,7 +360,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: store i32 [[P]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 9
; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], 322
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -400,49 +396,50 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[SMAX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 2
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i64> [[TMP9]], splat (i64 2)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP11]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP8]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i8>, ptr [[TMP15]], align 1
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP16]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP13]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 2
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP22]], i32 1, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i8> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[TMP17]] to <vscale x 4 x i32>
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP13]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1
; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1
@@ -453,7 +450,7 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2
; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -481,26 +478,27 @@ exit:
attributes #0 = { "target-features"="+64bit,+v" }
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; CHECK: [[META4]] = !{[[META5:![0-9]+]]}
-; CHECK: [[META5]] = distinct !{[[META5]], [[META6:![0-9]+]]}
-; CHECK: [[META6]] = distinct !{[[META6]], !"LVerDomain"}
-; CHECK: [[META7]] = !{[[META8:![0-9]+]]}
-; CHECK: [[META8]] = distinct !{[[META8]], [[META6]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]}
-; CHECK: [[META11]] = !{[[META12:![0-9]+]]}
-; CHECK: [[META12]] = distinct !{[[META12]], [[META13:![0-9]+]]}
-; CHECK: [[META13]] = distinct !{[[META13]], !"LVerDomain"}
-; CHECK: [[META14]] = !{[[META15:![0-9]+]]}
-; CHECK: [[META15]] = distinct !{[[META15]], [[META13]]}
-; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
-; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]}
-; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
-; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META2]], [[META1]]}
-; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]}
-; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[META5]] = !{[[META6:![0-9]+]]}
+; CHECK: [[META6]] = distinct !{[[META6]], [[META7:![0-9]+]]}
+; CHECK: [[META7]] = distinct !{[[META7]], !"LVerDomain"}
+; CHECK: [[META8]] = !{[[META9:![0-9]+]]}
+; CHECK: [[META9]] = distinct !{[[META9]], [[META7]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META3]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
+; CHECK: [[META12]] = !{[[META13:![0-9]+]]}
+; CHECK: [[META13]] = distinct !{[[META13]], [[META14:![0-9]+]]}
+; CHECK: [[META14]] = distinct !{[[META14]], !"LVerDomain"}
+; CHECK: [[META15]] = !{[[META16:![0-9]+]]}
+; CHECK: [[META16]] = distinct !{[[META16]], [[META14]]}
+; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META3]]}
+; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]]}
+; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META3]], [[META1]]}
+; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
index db3215a6..2a30724 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
@@ -13,15 +13,8 @@ target triple = "riscv64"
define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK-LABEL: @vector_add(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -29,28 +22,31 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -74,44 +70,41 @@ for.end:
define i64 @vector_add_reduce(ptr noalias nocapture %a) {
; CHECK-LABEL: @vector_add_reduce(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_LOAD]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[VP_OP_LOAD]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP8]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
index f02e5de..d86eb91 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
@@ -10,15 +10,8 @@ target triple = "riscv64"
define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_udiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -26,28 +19,31 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -61,14 +57,10 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FIXED: middle.block:
@@ -108,15 +100,8 @@ for.end:
define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_sdiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -124,28 +109,31 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -159,14 +147,10 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FIXED: middle.block:
@@ -206,15 +190,8 @@ for.end:
define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_urem(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -222,28 +199,31 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -257,14 +237,10 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = urem <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = urem <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; FIXED: middle.block:
@@ -304,15 +280,8 @@ for.end:
define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_srem(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -320,28 +289,31 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -355,14 +327,10 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = srem <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = srem <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; FIXED: middle.block:
@@ -402,40 +370,41 @@ for.end:
define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @predicated_udiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
@@ -448,7 +417,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -464,16 +433,11 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
-; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[TMP5]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; FIXED-NEXT: [[TMP8:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]]
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP2]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP4]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP2]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; FIXED: middle.block:
@@ -525,40 +489,41 @@ for.end:
define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @predicated_sdiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
@@ -571,7 +536,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -587,16 +552,11 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
-; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[TMP5]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; FIXED-NEXT: [[TMP8:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]]
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP2]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP4]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP2]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; FIXED: middle.block:
@@ -648,37 +608,38 @@ for.end:
define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-LABEL: @predicated_udiv_by_constant(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP14]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 27)
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i64> [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
@@ -691,7 +652,7 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -703,18 +664,12 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD]], splat (i64 42)
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD1]], splat (i64 42)
-; FIXED-NEXT: [[TMP6:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], splat (i64 27)
; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], splat (i64 27)
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i64> [[TMP6]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; FIXED: middle.block:
@@ -766,37 +721,38 @@ for.end:
define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-LABEL: @predicated_sdiv_by_constant(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP14]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 27)
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i64> [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
@@ -809,7 +765,7 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -821,18 +777,12 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD]], splat (i64 42)
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD1]], splat (i64 42)
-; FIXED-NEXT: [[TMP6:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], splat (i64 27)
; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], splat (i64 27)
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i64> [[TMP6]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; FIXED: middle.block:
@@ -884,38 +834,39 @@ for.end:
define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; CHECK-LABEL: @predicated_sdiv_by_minus_one(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 16 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP12]])
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 -128)
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i8> splat (i8 1)
+; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 16 x i1> [[TMP15]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i8> splat (i8 1)
; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 16 x i8> [[PREDPHI]], ptr [[TMP7]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[WIDE_LOAD]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[PREDPHI]], ptr align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128
@@ -928,7 +879,7 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -940,20 +891,13 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP3]], align 1
-; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <32 x i8> [[WIDE_LOAD]], splat (i8 -128)
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1
; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[WIDE_LOAD1]], splat (i8 -128)
-; FIXED-NEXT: [[TMP6:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> splat (i8 -1), <32 x i8> splat (i8 1)
; FIXED-NEXT: [[TMP7:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> splat (i8 -1), <32 x i8> splat (i8 1)
-; FIXED-NEXT: [[TMP8:%.*]] = sdiv <32 x i8> [[WIDE_LOAD]], [[TMP6]]
; FIXED-NEXT: [[TMP9:%.*]] = sdiv <32 x i8> [[WIDE_LOAD1]], [[TMP7]]
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> [[TMP8]], <32 x i8> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP9]], <32 x i8> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <32 x i8> [[PREDPHI]], ptr [[TMP1]], align 1
-; FIXED-NEXT: store <32 x i8> [[PREDPHI2]], ptr [[TMP3]], align 1
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; FIXED-NEXT: store <32 x i8> [[PREDPHI2]], ptr [[TMP1]], align 1
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; FIXED: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index 052e841..ea8af94 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -10,12 +10,6 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
@@ -76,12 +70,6 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -110,8 +98,8 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[B]], [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
index effaf57..1220459 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFHMIN
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -S | FileCheck %s -check-prefix=ZVFHMIN
@@ -25,37 +26,33 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN-LABEL: define void @fadd(
; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
-; ZVFHMIN-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP8]]
-; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; ZVFHMIN: [[VECTOR_PH]]:
-; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
-; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; ZVFHMIN-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 8
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
-; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFHMIN-NEXT: [[TMP1:%.*]] = getelementptr half, ptr [[A]], i64 [[INDEX]]
; ZVFHMIN-NEXT: [[TMP2:%.*]] = getelementptr half, ptr [[B]], i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP1]], align 2
-; ZVFHMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFHMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; ZVFHMIN-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x half> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP11]], ptr [[TMP1]], align 2
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; ZVFHMIN-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP11]], ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFHMIN-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
+; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[INDEX]]
+; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
-; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; ZVFHMIN-NEXT: br label %[[LOOP:.*]]
; ZVFHMIN: [[LOOP]]:
-; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; ZVFHMIN-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
; ZVFHMIN-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
; ZVFHMIN-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
@@ -64,7 +61,7 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
; ZVFHMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; ZVFHMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll
index ce58ae1..aca00a9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll
@@ -15,7 +15,6 @@ define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1
; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]]
@@ -28,7 +27,7 @@ define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 {
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 1 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
index c9ba2af..713105f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
@@ -12,7 +12,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 4
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -31,8 +31,6 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -43,7 +41,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP10]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -75,7 +73,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -94,8 +92,6 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -106,7 +102,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; ZVFHMIN-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP18]], align 4
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
@@ -161,7 +157,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 4
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -180,8 +176,6 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -192,7 +186,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.maximumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP10]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -224,7 +218,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -243,8 +237,6 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -255,7 +247,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 4 x float> @llvm.maximumnum.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; ZVFHMIN-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP18]], align 4
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
@@ -310,7 +302,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -329,8 +321,6 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -341,7 +331,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP10]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -373,7 +363,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -392,8 +382,6 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2
; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -404,7 +392,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; ZVFHMIN-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP18]], align 8
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
@@ -459,7 +447,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -478,8 +466,6 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP9]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP18]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP19]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -490,7 +476,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP10]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -522,7 +508,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -541,8 +527,6 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2
; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
-; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -553,7 +537,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x double> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
; ZVFHMIN-NEXT: store <vscale x 2 x double> [[TMP17]], ptr [[TMP18]], align 8
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
@@ -606,11 +590,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
@@ -623,30 +603,29 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP4]], align 2
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP7]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -659,7 +638,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -669,11 +648,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
-; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
@@ -686,30 +661,29 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; ZVFHMIN: [[VECTOR_PH]]:
-; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
-; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP13]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP15]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP18]], align 2
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
+; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
-; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
; ZVFHMIN: [[FOR_BODY]]:
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -722,7 +696,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
-; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
@@ -755,11 +729,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
@@ -772,30 +742,29 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP4]], align 2
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP7]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -808,7 +777,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -818,11 +787,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
-; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
@@ -835,30 +800,29 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; ZVFHMIN-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; ZVFHMIN: [[VECTOR_PH]]:
-; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
-; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP13]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP15]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP18]], align 2
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
+; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
+; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
-; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
; ZVFHMIN: [[FOR_BODY]]:
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -871,7 +835,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
-; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
@@ -907,10 +871,11 @@ declare half @llvm.maximumnum.f16(half, half)
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
-; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META11:![0-9]+]], [[META2]]}
+; CHECK: [[META11]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META11]], [[META2]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]}
;.
; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -922,8 +887,9 @@ declare half @llvm.maximumnum.f16(half, half)
; ZVFHMIN: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
; ZVFHMIN: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
; ZVFHMIN: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
-; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; ZVFHMIN: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
-; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META11:![0-9]+]], [[META2]]}
+; ZVFHMIN: [[META11]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
+; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META11]], [[META2]]}
+; ZVFHMIN: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll b/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll
index 1ea70b6d..7819ae2 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll
@@ -3,8 +3,8 @@
; CHECK: LV: Loop hints: force=enabled
; CHECK: LV: Scalar loop costs: 4.
-; ChosenFactor.Cost is 4, but the real cost will be divided by the width, which is 2.
-; CHECK: Cost for VF 2: 4 (Estimated cost per lane: 2.0)
+; ChosenFactor.Cost is 9, but the real cost will be divided by the width, which is 2.2.
+; CHECK: Cost for VF vscale x 2: 9
; Regardless of force vectorization or not, this loop will eventually be vectorized because of the cost model.
; Therefore, the following message does not need to be printed even if vectorization is explicitly forced in the metadata.
; CHECK-NOT: LV: Vectorization seems to be not beneficial, but was forced by a user.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
index e6825fa..60f7c24 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
@@ -20,11 +20,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 128, i64 [[TMP7]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1
; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]]
@@ -57,42 +53,36 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT19]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP46]]
-; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP48]]
; CHECK-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 8
-; CHECK-NEXT: [[TMP49:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[X_I64]], [[TMP49]]
-; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; CHECK-NEXT: [[TMP50:%.*]] = mul i32 [[DOTCAST]], 3
-; CHECK-NEXT: [[IND_END22:%.*]] = add i32 [[X_I32]], [[TMP50]]
; CHECK-NEXT: [[TMP53:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X_I64]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP55:%.*]] = mul <vscale x 8 x i64> [[TMP53]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP55]]
-; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP52]]
-; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0
-; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP28:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP28]]
+; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0
+; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP59]], i32 2, <vscale x 8 x i1> splat (i1 true)), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP52]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP59]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
+; CHECK-NEXT: [[TMP47:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP47]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP47]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT25]]
-; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP60]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
@@ -107,7 +97,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
; CHECK-NEXT: [[TMP64]] = trunc i64 [[IV_NEXT]] to i32
; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[IV]], 99
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -144,8 +134,9 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" }
; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META5:![0-9]+]]}
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
; CHECK: [[META5]] = distinct !{[[META5]], [[META2]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]]}
; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]}
+; CHECK: [[META8]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 1addff6..1b2ac80 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP
-; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP
+; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -prefer-predicate-over-epilogue=scalar-epilogue -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP
+; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -prefer-predicate-over-epilogue=scalar-epilogue -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP
; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-OUTLOOP %s
; RUN: opt -passes=loop-vectorize -prefer-inloop-reductions -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-INLOOP %s
@@ -23,8 +23,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; OUTLOOP-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4
; OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP3]]
; OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
-; OUTLOOP-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4
; OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; OUTLOOP: vector.body:
; OUTLOOP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -33,7 +31,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i16>, ptr [[TMP7]], align 2
; OUTLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i32>
; OUTLOOP-NEXT: [[TMP10]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP9]]
-; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; OUTLOOP: middle.block:
@@ -75,8 +73,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 8
; INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP3]]
; INLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
-; INLOOP-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 8
; INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; INLOOP: vector.body:
; INLOOP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -86,7 +82,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; INLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i32>
; INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP9]])
; INLOOP-NEXT: [[TMP11]] = add i32 [[VEC_PHI]], [[TMP10]]
-; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; INLOOP: middle.block:
@@ -120,12 +116,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP: for.body.preheader:
; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; IF-EVL-OUTLOOP-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], [[TMP2]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -174,12 +164,6 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP: for.body.preheader:
; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; IF-EVL-INLOOP-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 8
-; IF-EVL-INLOOP-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], [[TMP2]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 8
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -252,8 +236,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; OUTLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
; OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -264,7 +246,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; OUTLOOP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; OUTLOOP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; OUTLOOP: middle.block:
@@ -300,8 +282,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; INLOOP: vector.body:
; INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -310,7 +290,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; INLOOP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP9]], i32 [[VEC_PHI]])
-; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; INLOOP: middle.block:
@@ -338,12 +318,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: entry:
; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
@@ -352,7 +326,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -389,19 +363,13 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: entry:
; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
index e226eea..8046394 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
@@ -6,41 +6,43 @@
define void @load_store_factor2_i32(ptr %p) {
; CHECK-LABEL: @load_store_factor2_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP14:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP14]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], splat (i32 1)
; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP11]])
-; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i32.p0(<vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -53,7 +55,7 @@ define void @load_store_factor2_i32(ptr %p) {
; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -102,41 +104,43 @@ define void @load_store_factor2_i32(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor2_i32(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP14:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP14]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], splat (i32 1)
; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP11]])
-; SCALABLE-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i32.p0(<vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -149,7 +153,7 @@ define void @load_store_factor2_i32(ptr %p) {
; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -180,41 +184,43 @@ exit:
define void @load_store_factor2_i64(ptr %p) {
; CHECK-LABEL: @load_store_factor2_i64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP11]])
-; CHECK-NEXT: store <vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[TMP11]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -227,7 +233,7 @@ define void @load_store_factor2_i64(ptr %p) {
; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -276,41 +282,43 @@ define void @load_store_factor2_i64(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor2_i64(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 1)
; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP11]])
-; SCALABLE-NEXT: store <vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[TMP11]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -323,7 +331,7 @@ define void @load_store_factor2_i64(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -354,24 +362,24 @@ exit:
define void @load_store_factor3_i32(ptr %p) {
; CHECK-LABEL: @load_store_factor3_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 12 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP16]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.masked.load.nxv12i32.p0(ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK]], <vscale x 12 x i32> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2
@@ -379,18 +387,20 @@ define void @load_store_factor3_i32(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP10]], splat (i32 3)
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[TMP12]], <vscale x 4 x i32> [[TMP13]])
-; CHECK-NEXT: store <vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv12i32.p0(<vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -408,7 +418,7 @@ define void @load_store_factor3_i32(ptr %p) {
; CHECK-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -466,24 +476,24 @@ define void @load_store_factor3_i32(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor3_i32(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 12 x i32>, ptr [[TMP1]], align 4
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 3
+; SCALABLE-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP16]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.masked.load.nxv12i32.p0(ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK]], <vscale x 12 x i32> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2
@@ -491,18 +501,20 @@ define void @load_store_factor3_i32(ptr %p) {
; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP10]], splat (i32 3)
; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[TMP12]], <vscale x 4 x i32> [[TMP13]])
-; SCALABLE-NEXT: store <vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 4
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv12i32.p0(<vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -520,7 +532,7 @@ define void @load_store_factor3_i32(ptr %p) {
; SCALABLE-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -557,43 +569,45 @@ exit:
define void @load_store_factor3_i64(ptr %p) {
; CHECK-LABEL: @load_store_factor3_i64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP25:%.*]] = add <vscale x 2 x i64> [[TMP23]], splat (i64 1)
; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 3)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
-; CHECK-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP25]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -611,7 +625,7 @@ define void @load_store_factor3_i64(ptr %p) {
; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -669,43 +683,45 @@ define void @load_store_factor3_i64(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor3_i64(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 3
+; SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP25:%.*]] = add <vscale x 2 x i64> [[TMP23]], splat (i64 1)
; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 3)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
-; SCALABLE-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP25]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -723,7 +739,7 @@ define void @load_store_factor3_i64(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -760,45 +776,47 @@ exit:
define void @load_store_factor4(ptr %p) {
; CHECK-LABEL: @load_store_factor4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP24:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 1)
+; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 2 x i64> [[TMP24]], splat (i64 1)
; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i64> [[TMP11]], splat (i64 2)
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP12]], splat (i64 3)
; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i64> [[TMP13]], splat (i64 4)
-; CHECK-NEXT: [[INTERLEAVED_VEC4:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
-; CHECK-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC4]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP26]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -821,7 +839,7 @@ define void @load_store_factor4(ptr %p) {
; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -886,45 +904,47 @@ define void @load_store_factor4(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor4(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 4
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 4
+; SCALABLE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP24:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP26:%.*]] = add <vscale x 2 x i64> [[TMP24]], splat (i64 1)
; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i64> [[TMP11]], splat (i64 2)
; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP12]], splat (i64 3)
; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i64> [[TMP13]], splat (i64 4)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC4:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
-; SCALABLE-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC4]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP26]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP22:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; SCALABLE-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -947,7 +967,7 @@ define void @load_store_factor4(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -990,44 +1010,48 @@ exit:
define void @load_store_factor5(ptr %p) {
; CHECK-LABEL: @load_store_factor5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 5
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 5 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP10]], <vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]])
-; CHECK-NEXT: store <vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP18]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 5
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.masked.load.nxv5i64.p0(ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK]], <vscale x 5 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv5i64.p0(<vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP25]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1055,7 +1079,7 @@ define void @load_store_factor5(ptr %p) {
; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1129,44 +1153,48 @@ define void @load_store_factor5(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor5(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 5
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 5 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP10]], <vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]])
-; SCALABLE-NEXT: store <vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP18:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP18]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 5
+; SCALABLE-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.masked.load.nxv5i64.p0(ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK]], <vscale x 5 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv5i64.p0(<vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP25:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP25]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
+; SCALABLE-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1194,7 +1222,7 @@ define void @load_store_factor5(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1243,46 +1271,50 @@ exit:
define void @load_store_factor6(ptr %p) {
; CHECK-LABEL: @load_store_factor6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 6
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]])
-; CHECK-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP20]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 6
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP28:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP28]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
+; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1315,7 +1347,7 @@ define void @load_store_factor6(ptr %p) {
; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1397,46 +1429,50 @@ define void @load_store_factor6(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor6(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 6
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]])
-; SCALABLE-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP20:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP20]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 6
+; SCALABLE-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP28:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP28]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
+; SCALABLE-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1469,7 +1505,7 @@ define void @load_store_factor6(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1524,48 +1560,52 @@ exit:
define void @load_store_factor7(ptr %p) {
; CHECK-LABEL: @load_store_factor7(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 7
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 7 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 7)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]])
-; CHECK-NEXT: store <vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP22]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 7
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.masked.load.nxv7i64.p0(ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK]], <vscale x 7 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv7i64.p0(<vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP31:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP31]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1603,7 +1643,7 @@ define void @load_store_factor7(ptr %p) {
; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1694,48 +1734,52 @@ define void @load_store_factor7(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor7(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 7
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 7 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 7)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]])
-; SCALABLE-NEXT: store <vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP22:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP22]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 7
+; SCALABLE-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.masked.load.nxv7i64.p0(ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK]], <vscale x 7 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv7i64.p0(<vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP31:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP31]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP31]]
+; SCALABLE-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1773,7 +1817,7 @@ define void @load_store_factor7(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP18:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1834,50 +1878,54 @@ exit:
define void @load_store_factor8(ptr %p) {
; CHECK-LABEL: @load_store_factor8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
-; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 1)
-; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 2)
-; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 3)
-; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 4)
-; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 5)
-; CHECK-NEXT: [[TMP24:%.*]] = add <vscale x 1 x i64> [[TMP16]], splat (i64 6)
-; CHECK-NEXT: [[TMP25:%.*]] = add <vscale x 1 x i64> [[TMP17]], splat (i64 7)
-; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 1 x i64> [[TMP18]], splat (i64 8)
-; CHECK-NEXT: [[INTERLEAVED_VEC12:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]], <vscale x 1 x i64> [[TMP24]], <vscale x 1 x i64> [[TMP25]], <vscale x 1 x i64> [[TMP26]])
-; CHECK-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC12]], ptr [[TMP4]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 8)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP34:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP34]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP34]]
+; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1920,7 +1968,7 @@ define void @load_store_factor8(ptr %p) {
; CHECK-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2017,50 +2065,54 @@ define void @load_store_factor8(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor8(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 3
-; SCALABLE-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP3]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; SCALABLE-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; SCALABLE-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
-; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 5)
-; SCALABLE-NEXT: [[TMP24:%.*]] = add <vscale x 1 x i64> [[TMP16]], splat (i64 6)
-; SCALABLE-NEXT: [[TMP25:%.*]] = add <vscale x 1 x i64> [[TMP17]], splat (i64 7)
-; SCALABLE-NEXT: [[TMP26:%.*]] = add <vscale x 1 x i64> [[TMP18]], splat (i64 8)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC12:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]], <vscale x 1 x i64> [[TMP24]], <vscale x 1 x i64> [[TMP25]], <vscale x 1 x i64> [[TMP26]])
-; SCALABLE-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC12]], ptr [[TMP4]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP4:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3
+; SCALABLE-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; SCALABLE-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; SCALABLE-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 8)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP34:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP34]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP34]]
+; SCALABLE-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -2103,7 +2155,7 @@ define void @load_store_factor8(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -2170,40 +2222,41 @@ exit:
define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; CHECK-LABEL: @combine_load_factor2_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP13]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> [[TMP12]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP11]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -2215,7 +2268,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: store i32 [[RES]], ptr [[DST]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2226,24 +2279,15 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; FIXED-NEXT: br label [[VECTOR_BODY:%.*]]
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; FIXED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 8
-; FIXED-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1
-; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0]], 1
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP1]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP2]]
-; FIXED-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4
-; FIXED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-; FIXED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1
+; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP2]]
; FIXED-NEXT: [[WIDE_VEC2:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4
; FIXED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; FIXED-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-; FIXED-NEXT: [[TMP5:%.*]] = add <8 x i32> [[STRIDED_VEC]], [[STRIDED_VEC1]]
; FIXED-NEXT: [[TMP6:%.*]] = add <8 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC4]]
; FIXED-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP7]], i32 8
-; FIXED-NEXT: store <8 x i32> [[TMP5]], ptr [[TMP7]], align 4
-; FIXED-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP9]], align 4
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; FIXED-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP7]], align 4
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; FIXED: middle.block:
@@ -2270,40 +2314,41 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
;
; SCALABLE-LABEL: @combine_load_factor2_i32(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP13]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> [[TMP12]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[TMP9]]
; SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP11]], align 4
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -2315,7 +2360,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; SCALABLE-NEXT: store i32 [[RES]], ptr [[DST]], align 4
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -2347,40 +2392,41 @@ exit:
define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; CHECK-LABEL: @combine_load_factor2_i64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP13]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> [[TMP12]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP15]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -2392,7 +2438,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: store i64 [[RES]], ptr [[DST]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2403,24 +2449,15 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; FIXED-NEXT: br label [[VECTOR_BODY:%.*]]
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; FIXED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 4
-; FIXED-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1
-; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0]], 1
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP1]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]]
-; FIXED-NEXT: [[WIDE_VEC:%.*]] = load <8 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i64> [[WIDE_VEC]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-; FIXED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i64> [[WIDE_VEC]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1
+; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP2]]
; FIXED-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i64>, ptr [[TMP4]], align 8
; FIXED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i64> [[WIDE_VEC2]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; FIXED-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i64> [[WIDE_VEC2]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-; FIXED-NEXT: [[TMP5:%.*]] = add <4 x i64> [[STRIDED_VEC]], [[STRIDED_VEC1]]
; FIXED-NEXT: [[TMP6:%.*]] = add <4 x i64> [[STRIDED_VEC3]], [[STRIDED_VEC4]]
; FIXED-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP7]], i32 4
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP7]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP9]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP7]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; FIXED: middle.block:
@@ -2447,40 +2484,41 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
;
; SCALABLE-LABEL: @combine_load_factor2_i64(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP13]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> [[TMP12]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP15]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], [[TMP9]]
; SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -2492,7 +2530,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; SCALABLE-NEXT: store i64 [[RES]], ptr [[DST]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP24:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
index d6f16bf..ee91f75 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
@@ -7,22 +7,31 @@
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf6-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF6
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf7-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF7
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf8-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF8
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,-optimized-nf2-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-NO-OPT
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF2
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf3-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF3
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf4-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF4
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf5-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF5
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf6-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF6
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf7-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF7
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf8-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF8
%i8.2 = type {i8, i8}
define void @i8_factor_2(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF2-LABEL: Checking a loop in 'i8_factor_2'
+; FIXED-OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2-LABEL: Checking a loop in 'i8_factor_2'
-; OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2: Cost of 3 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
; OPT-NF2: Cost of 3 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2: Cost of 3 for VF vscale x 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
@@ -33,17 +42,18 @@ entry:
; OPT-NF2: Cost of 4 for VF vscale x 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2: Cost of 8 for VF vscale x 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
; OPT-NF2: Cost of 8 for VF vscale x 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_2'
+; FIXED-NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_2'
-; NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; NO-OPT: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
; NO-OPT: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; NO-OPT: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
@@ -76,28 +86,50 @@ for.end:
define void @i8_factor_3(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF3-LABEL: Checking a loop in 'i8_factor_3'
+; FIXED-OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; OPT-NF3-LABEL: Checking a loop in 'i8_factor_3'
-; OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 5 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 5 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_3'
+; FIXED-NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_3'
-; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.3, ptr %data, i64 %i, i32 0
@@ -124,28 +156,50 @@ for.end:
define void @i8_factor_4(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF4-LABEL: Checking a loop in 'i8_factor_4'
+; FIXED-OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; OPT-NF4-LABEL: Checking a loop in 'i8_factor_4'
-; OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 6 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 6 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_4'
+; FIXED-NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_4'
-; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.4, ptr %data, i64 %i, i32 0
@@ -176,24 +230,42 @@ for.end:
define void @i8_factor_5(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF5-LABEL: Checking a loop in 'i8_factor_5'
+; FIXED-OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; OPT-NF5-LABEL: Checking a loop in 'i8_factor_5'
-; OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_5'
+; FIXED-NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_5'
-; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 40 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 40 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 80 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 80 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.5, ptr %data, i64 %i, i32 0
@@ -228,24 +300,42 @@ for.end:
define void @i8_factor_6(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF6-LABEL: Checking a loop in 'i8_factor_6'
+; FIXED-OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; OPT-NF6-LABEL: Checking a loop in 'i8_factor_6'
-; OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_6'
+; FIXED-NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_6'
-; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.6, ptr %data, i64 %i, i32 0
@@ -284,24 +374,42 @@ for.end:
define void @i8_factor_7(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF7-LABEL: Checking a loop in 'i8_factor_7'
+; FIXED-OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; OPT-NF7-LABEL: Checking a loop in 'i8_factor_7'
-; OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_7'
+; FIXED-NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_7'
-; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 56 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 56 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 112 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 112 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.7, ptr %data, i64 %i, i32 0
@@ -344,24 +452,42 @@ for.end:
define void @i8_factor_8(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF8-LABEL: Checking a loop in 'i8_factor_8'
+; FIXED-OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; OPT-NF8-LABEL: Checking a loop in 'i8_factor_8'
-; OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_8'
+; FIXED-NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_8'
-; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.8, ptr %data, i64 %i, i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
index 976ce77..1bceb87 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
@@ -18,37 +18,35 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; SCALAR_EPILOGUE-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4
; SCALAR_EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]]
; SCALAR_EPILOGUE-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
-; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = shl nuw i32 [[TMP3]], 4
; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP4]], i64 0
+; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0
; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_EPILOGUE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALAR_EPILOGUE: vector.body:
; SCALAR_EPILOGUE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 1
-; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
-; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]]
-; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP9]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
+; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1
+; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
+; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP7]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
; SCALAR_EPILOGUE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]])
-; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]])
-; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = sext i32 [[TMP7]] to i64
-; SCALAR_EPILOGUE-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP13]]
-; SCALAR_EPILOGUE-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]]
-; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP15]])
-; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
-; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
+; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]])
+; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP11]]
+; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP10]]
+; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP13]])
+; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
+; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]]
; SCALAR_EPILOGUE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; SCALAR_EPILOGUE-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; SCALAR_EPILOGUE-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALAR_EPILOGUE-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; SCALAR_EPILOGUE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALAR_EPILOGUE: middle.block:
; SCALAR_EPILOGUE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; SCALAR_EPILOGUE-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -65,39 +63,37 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023
; PREDICATED_DATA-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
; PREDICATED_DATA-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
-; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0
+; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_DATA-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_DATA: vector.body:
; PREDICATED_DATA-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024)
-; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 1
-; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
-; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]]
-; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP9]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
+; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1
+; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
+; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8.p0(ptr [[TMP7]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK]], <vscale x 32 x i8> poison)
; PREDICATED_DATA-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> [[WIDE_MASKED_VEC]])
-; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]])
-; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = sext i32 [[TMP7]] to i64
-; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP13]]
-; PREDICATED_DATA-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]]
-; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP15]])
-; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
-; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
+; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]])
+; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = sext i32 [[TMP5]] to i64
+; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP11]]
+; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP10]]
+; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP13]])
+; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr [[TMP12]], i32 1, <vscale x 32 x i1> [[INTERLEAVED_MASK3]])
+; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]]
; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; PREDICATED_DATA-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; PREDICATED_DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; PREDICATED_DATA: middle.block:
; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_DATA: scalar.ph:
@@ -197,41 +193,39 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; SCALAR_EPILOGUE-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 4
; SCALAR_EPILOGUE-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP2]]
; SCALAR_EPILOGUE-NEXT: [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
-; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
-; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = shl nuw i32 [[TMP3]], 4
; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP4]], i64 0
+; SCALAR_EPILOGUE-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP2]], i64 0
; SCALAR_EPILOGUE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; SCALAR_EPILOGUE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALAR_EPILOGUE: vector.body:
; SCALAR_EPILOGUE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 2
-; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
-; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]]
-; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP9]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
+; SCALAR_EPILOGUE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALAR_EPILOGUE-NEXT: [[TMP4:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; SCALAR_EPILOGUE-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 2
+; SCALAR_EPILOGUE-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_EPILOGUE-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
+; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_EPILOGUE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP7]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
; SCALAR_EPILOGUE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]])
-; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
-; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
+; SCALAR_EPILOGUE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; SCALAR_EPILOGUE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; SCALAR_EPILOGUE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
+; SCALAR_EPILOGUE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
+; SCALAR_EPILOGUE-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]])
+; SCALAR_EPILOGUE-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]]
; SCALAR_EPILOGUE-NEXT: [[TMP14:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]])
; SCALAR_EPILOGUE-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP14]]
-; SCALAR_EPILOGUE-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]])
-; SCALAR_EPILOGUE-NEXT: [[TMP17:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP16]]
-; SCALAR_EPILOGUE-NEXT: [[TMP18:%.*]] = sext i32 [[TMP7]] to i64
-; SCALAR_EPILOGUE-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]]
-; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]])
-; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
-; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
+; SCALAR_EPILOGUE-NEXT: [[TMP16:%.*]] = sext i32 [[TMP5]] to i64
+; SCALAR_EPILOGUE-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
+; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]])
+; SCALAR_EPILOGUE-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; SCALAR_EPILOGUE-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
+; SCALAR_EPILOGUE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]]
; SCALAR_EPILOGUE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; SCALAR_EPILOGUE-NEXT: [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; SCALAR_EPILOGUE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; SCALAR_EPILOGUE-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; SCALAR_EPILOGUE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALAR_EPILOGUE: middle.block:
; SCALAR_EPILOGUE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
; SCALAR_EPILOGUE-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -248,43 +242,41 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023
; PREDICATED_DATA-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
; PREDICATED_DATA-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
-; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
-; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0
+; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
; PREDICATED_DATA-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_DATA: vector.body:
; PREDICATED_DATA-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024)
-; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer
-; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = shl i32 [[INDEX]], 2
-; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
-; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP8]]
-; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP9]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
+; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> zeroinitializer
+; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 2
+; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP6]]
+; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; PREDICATED_DATA-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8.p0(ptr [[TMP7]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x i8> poison)
; PREDICATED_DATA-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> [[WIDE_MASKED_VEC]])
-; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
-; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
-; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
-; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
+; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 0
+; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 1
+; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 2
+; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[STRIDED_VEC]], 3
+; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP8]], <vscale x 16 x i8> [[TMP9]])
+; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP12]]
; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP10]], <vscale x 16 x i8> [[TMP11]])
; PREDICATED_DATA-NEXT: [[TMP15:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP14]]
-; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]])
-; PREDICATED_DATA-NEXT: [[TMP17:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP16]]
-; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = sext i32 [[TMP7]] to i64
-; PREDICATED_DATA-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]]
-; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]], <vscale x 16 x i8> [[TMP16]], <vscale x 16 x i8> [[TMP17]])
-; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1> [[TMP6]])
-; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
-; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
+; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = sext i32 [[TMP5]] to i64
+; PREDICATED_DATA-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
+; PREDICATED_DATA-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv64i8(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x i8> [[TMP13]], <vscale x 16 x i8> [[TMP14]], <vscale x 16 x i8> [[TMP15]])
+; PREDICATED_DATA-NEXT: [[INTERLEAVED_MASK3:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[TMP4]])
+; PREDICATED_DATA-NEXT: call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 1, <vscale x 64 x i1> [[INTERLEAVED_MASK3]])
+; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]]
; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_DATA-NEXT: [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; PREDICATED_DATA-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; PREDICATED_DATA: middle.block:
; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_DATA: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
index 93e0f90..f731d39 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
@@ -15,7 +15,6 @@ define void @load_store(ptr %p) {
; LMUL1-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; LMUL1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]]
; LMUL1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; LMUL1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; LMUL1-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL1: vector.body:
; LMUL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -23,7 +22,7 @@ define void @load_store(ptr %p) {
; LMUL1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 1 x i64>, ptr [[TMP3]], align 8
; LMUL1-NEXT: [[TMP5:%.*]] = add <vscale x 1 x i64> [[WIDE_LOAD]], splat (i64 1)
; LMUL1-NEXT: store <vscale x 1 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; LMUL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; LMUL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; LMUL1-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; LMUL1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL1: middle.block:
@@ -46,124 +45,112 @@ define void @load_store(ptr %p) {
;
; LMUL2-LABEL: @load_store(
; LMUL2-NEXT: entry:
-; LMUL2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; LMUL2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; LMUL2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; LMUL2-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; LMUL2: vector.ph:
-; LMUL2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; LMUL2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; LMUL2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; LMUL2-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; LMUL2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
; LMUL2-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL2: vector.body:
; LMUL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL2-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL2-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; LMUL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]]
-; LMUL2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP5]], align 8
+; LMUL2-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; LMUL2-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 1)
-; LMUL2-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP5]], align 8
-; LMUL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; LMUL2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; LMUL2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; LMUL2-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
+; LMUL2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64
+; LMUL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; LMUL2-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; LMUL2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; LMUL2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL2: middle.block:
-; LMUL2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; LMUL2-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; LMUL2-NEXT: br label [[FOR_END:%.*]]
; LMUL2: scalar.ph:
-; LMUL2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; LMUL2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; LMUL2-NEXT: br label [[FOR_BODY:%.*]]
; LMUL2: for.body:
-; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; LMUL2-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
; LMUL2-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
; LMUL2-NEXT: [[W:%.*]] = add i64 [[V]], 1
; LMUL2-NEXT: store i64 [[W]], ptr [[Q]], align 8
; LMUL2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; LMUL2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; LMUL2: for.end:
; LMUL2-NEXT: ret void
;
; LMUL4-LABEL: @load_store(
; LMUL4-NEXT: entry:
-; LMUL4-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL4-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; LMUL4-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; LMUL4-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; LMUL4-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; LMUL4: vector.ph:
-; LMUL4-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL4-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; LMUL4-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; LMUL4-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; LMUL4-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; LMUL4-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; LMUL4-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL4: vector.body:
; LMUL4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL4-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL4-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; LMUL4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]]
-; LMUL4-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i64>, ptr [[TMP5]], align 8
+; LMUL4-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr align 8 [[TMP5]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; LMUL4-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i64> [[WIDE_LOAD]], splat (i64 1)
-; LMUL4-NEXT: store <vscale x 4 x i64> [[TMP7]], ptr [[TMP5]], align 8
-; LMUL4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; LMUL4-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; LMUL4-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; LMUL4-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; LMUL4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64
+; LMUL4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; LMUL4-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; LMUL4-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; LMUL4-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL4: middle.block:
-; LMUL4-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; LMUL4-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; LMUL4-NEXT: br label [[FOR_END:%.*]]
; LMUL4: scalar.ph:
-; LMUL4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; LMUL4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; LMUL4-NEXT: br label [[FOR_BODY:%.*]]
; LMUL4: for.body:
-; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; LMUL4-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
; LMUL4-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
; LMUL4-NEXT: [[W:%.*]] = add i64 [[V]], 1
; LMUL4-NEXT: store i64 [[W]], ptr [[Q]], align 8
; LMUL4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; LMUL4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; LMUL4: for.end:
; LMUL4-NEXT: ret void
;
; LMUL8-LABEL: @load_store(
; LMUL8-NEXT: entry:
-; LMUL8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; LMUL8-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; LMUL8-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; LMUL8-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; LMUL8: vector.ph:
-; LMUL8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; LMUL8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; LMUL8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; LMUL8-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; LMUL8-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8
; LMUL8-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL8: vector.body:
; LMUL8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL8-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]]
-; LMUL8-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP5]], align 8
+; LMUL8-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr align 8 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; LMUL8-NEXT: [[TMP7:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], splat (i64 1)
-; LMUL8-NEXT: store <vscale x 8 x i64> [[TMP7]], ptr [[TMP5]], align 8
-; LMUL8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; LMUL8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; LMUL8-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; LMUL8-NEXT: call void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; LMUL8-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64
+; LMUL8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; LMUL8-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; LMUL8-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; LMUL8-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL8: middle.block:
-; LMUL8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; LMUL8-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; LMUL8-NEXT: br label [[FOR_END:%.*]]
; LMUL8: scalar.ph:
-; LMUL8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; LMUL8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; LMUL8-NEXT: br label [[FOR_BODY:%.*]]
; LMUL8: for.body:
-; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; LMUL8-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
; LMUL8-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
; LMUL8-NEXT: [[W:%.*]] = add i64 [[V]], 1
; LMUL8-NEXT: store i64 [[W]], ptr [[Q]], align 8
; LMUL8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; LMUL8-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; LMUL8: for.end:
; LMUL8-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 32cb426..3de3943 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -123,12 +123,6 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 8, [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -185,13 +179,16 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1:%.*]], align 1
-; CHECK-NEXT: [[TMP2:%.*]] = shl <16 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4:%.*]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i8> [[TMP2]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <16 x i8> [[TMP5]], ptr [[TMP4]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 16, i32 8, i1 true)
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP1:%.*]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 8 x i8> [[VP_OP_LOAD]], splat (i8 1)
+; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP4:%.*]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 8 x i8> [[TMP6]], [[VP_OP_LOAD1]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
@@ -199,7 +196,7 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
@@ -239,13 +236,16 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1:%.*]], align 1
-; CHECK-NEXT: [[TMP2:%.*]] = shl <32 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP4:%.*]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = add <32 x i8> [[TMP2]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <32 x i8> [[TMP5]], ptr [[TMP4]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 32, i32 16, i1 true)
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP1:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 16 x i8> [[VP_OP_LOAD]], splat (i8 1)
+; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP4:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 16 x i8> [[TMP6]], [[VP_OP_LOAD1]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
@@ -253,7 +253,7 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
@@ -292,26 +292,24 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = shl <8 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1
-; CHECK-NEXT: [[TMP6:%.*]] = add <8 x i8> [[TMP3]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <8 x i8> [[TMP6]], ptr [[TMP4]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 24, i32 16, i1 true)
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[SRC:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 16 x i8> [[VP_OP_LOAD]], splat (i8 1)
+; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[DST:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 16 x i8> [[TMP6]], [[VP_OP_LOAD1]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP7]], ptr align 1 [[DST]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1
@@ -321,7 +319,7 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -360,35 +358,31 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
-; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ule <16 x i64> [[VEC_IV]], splat (i64 9)
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
-; CHECK-NEXT: [[TMP2]] = mul <16 x i8> [[WIDE_MASKED_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
+; CHECK-NEXT: [[TMP1]] = mul <8 x i8> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
+; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> [[TMP3]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> [[TMP1]])
+; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 8, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ 2, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ 2, [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1
; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]]
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i8 [[MUL_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
index 10ba208..58506f7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
@@ -11,15 +11,8 @@ target triple = "riscv64"
define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-LABEL: @test(
; VLENUNK-NEXT: entry:
-; VLENUNK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; VLENUNK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; VLENUNK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; VLENUNK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; VLENUNK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; VLENUNK: vector.ph:
-; VLENUNK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; VLENUNK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; VLENUNK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; VLENUNK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; VLENUNK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; VLENUNK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0
@@ -27,32 +20,41 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; VLENUNK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1)
; VLENUNK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
-; VLENUNK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP5]]
-; VLENUNK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
-; VLENUNK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; VLENUNK-NEXT: br label [[VECTOR_BODY:%.*]]
; VLENUNK: vector.body:
; VLENUNK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VLENUNK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VLENUNK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VLENUNK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; VLENUNK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; VLENUNK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
+; VLENUNK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP15]]
+; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; VLENUNK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; VLENUNK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; VLENUNK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 4 x i32> [[TMP10]], [[BROADCAST_SPLAT4]]
; VLENUNK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i64> [[VEC_IND]], splat (i64 512)
+; VLENUNK-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> zeroinitializer
; VLENUNK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
-; VLENUNK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP14]], i32 4, <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i32> poison)
-; VLENUNK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
+; VLENUNK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> [[TMP13]], i32 [[TMP7]])
+; VLENUNK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i32> zeroinitializer
; VLENUNK-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[PREDPHI]], [[BROADCAST_SPLAT]]
; VLENUNK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
-; VLENUNK-NEXT: store <vscale x 4 x i32> [[TMP17]], ptr [[TMP18]], align 4
-; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; VLENUNK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP17]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; VLENUNK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; VLENUNK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VLENUNK: middle.block:
-; VLENUNK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; VLENUNK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; VLENUNK-NEXT: br label [[FOR_END:%.*]]
; VLENUNK: scalar.ph:
-; VLENUNK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; VLENUNK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; VLENUNK-NEXT: br label [[FOR_BODY:%.*]]
; VLENUNK: for.body:
-; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; VLENUNK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 512
; VLENUNK-NEXT: br i1 [[ICMP]], label [[DO_LOAD:%.*]], label [[LATCH]]
; VLENUNK: do_load:
@@ -66,7 +68,7 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; VLENUNK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; VLENUNK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VLENUNK: for.end:
; VLENUNK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index 6800a93..f853cf1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -16,11 +16,7 @@
define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture readonly %trigger) local_unnamed_addr #0 {
; RV32-LABEL: @foo4(
; RV32-NEXT: entry:
-; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; RV32-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; RV32: vector.memcheck:
; RV32-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 79880
; RV32-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i32 39940
@@ -34,42 +30,41 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
; RV32-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; RV32: vector.ph:
-; RV32-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 625, [[TMP4]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 625, [[N_MOD_VF]]
; RV32-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; RV32-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 16
; RV32-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV32-NEXT: [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV32-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]]
-; RV32-NEXT: [[TMP12:%.*]] = mul i64 16, [[TMP6]]
-; RV32-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0
-; RV32-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV32-NEXT: br label [[VECTOR_BODY:%.*]]
; RV32: vector.body:
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; RV32-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; RV32-NEXT: [[TMP11:%.*]] = mul i64 16, [[TMP8]]
+; RV32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
+; RV32-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> [[TMP13]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> poison), !alias.scope [[META0:![0-9]+]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
; RV32-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
; RV32-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP16]], i32 8, <vscale x 2 x i1> [[TMP14]], <vscale x 2 x double> poison), !alias.scope [[META3:![0-9]+]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
; RV32-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV32-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
-; RV32-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> [[TMP19]], i32 8, <vscale x 2 x i1> [[TMP14]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; RV32-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
+; RV32-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64
+; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; RV32-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; RV32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV32-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 625
+; RV32-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; RV32: middle.block:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 625, [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; RV32-NEXT: br label [[FOR_END:%.*]]
; RV32: scalar.ph:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label [[FOR_BODY:%.*]]
; RV32: for.body:
; RV32-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
@@ -89,17 +84,13 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32: for.inc:
; RV32-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
; RV32-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
-; RV32-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP11:![0-9]+]]
+; RV32-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP12:![0-9]+]]
; RV32: for.end:
; RV32-NEXT: ret void
;
; RV64-LABEL: @foo4(
; RV64-NEXT: entry:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; RV64-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; RV64: vector.memcheck:
; RV64-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880
; RV64-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940
@@ -113,42 +104,41 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
; RV64-NEXT: br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; RV64: vector.ph:
-; RV64-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 625, [[TMP4]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 625, [[N_MOD_VF]]
; RV64-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; RV64-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 16
; RV64-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV64-NEXT: [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV64-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]]
-; RV64-NEXT: [[TMP12:%.*]] = mul i64 16, [[TMP6]]
-; RV64-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0
-; RV64-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV64-NEXT: br label [[VECTOR_BODY:%.*]]
; RV64: vector.body:
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; RV64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; RV64-NEXT: [[TMP11:%.*]] = mul i64 16, [[TMP8]]
+; RV64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
+; RV64-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> [[TMP13]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> poison), !alias.scope [[META0:![0-9]+]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
; RV64-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
; RV64-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
; RV64-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP16]], i32 8, <vscale x 2 x i1> [[TMP14]], <vscale x 2 x double> poison), !alias.scope [[META3:![0-9]+]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
; RV64-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV64-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
-; RV64-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> [[TMP19]], i32 8, <vscale x 2 x i1> [[TMP14]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; RV64-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
+; RV64-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64
+; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; RV64-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV64-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 625
+; RV64-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; RV64: middle.block:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 625, [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; RV64-NEXT: br label [[FOR_END:%.*]]
; RV64: scalar.ph:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label [[FOR_BODY:%.*]]
; RV64: for.body:
; RV64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
@@ -168,7 +158,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64: for.inc:
; RV64-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
; RV64-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
-; RV64-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP11:![0-9]+]]
+; RV64-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP12:![0-9]+]]
; RV64: for.end:
; RV64-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
index ee6b950..bbd78a4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 4
-; RUN: opt -passes=loop-vectorize -mattr=+v -S < %s | FileCheck %s --check-prefixes=CHECK,V
-; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -S < %s | FileCheck %s --check-prefixes=CHECK,ZVQDOTQ
-; RUN: opt -passes=loop-vectorize -mattr=+v -scalable-vectorization=off -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-V
-; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -scalable-vectorization=off -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-ZVQDOTQ
+; RUN: opt -passes=loop-vectorize -mattr=+v -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=CHECK,V
+; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=CHECK,ZVQDOTQ
+; RUN: opt -passes=loop-vectorize -mattr=+v -scalable-vectorization=off -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-V
+; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -scalable-vectorization=off -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-ZVQDOTQ
+
+; TODO: Remove -prefer-predicate-over-epilogue=scalar-epilogue when partial reductions with EVL tail folding is supported.
target triple = "riscv64-none-unknown-elf"
@@ -19,8 +21,6 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; V-NEXT: br label [[VECTOR_BODY:%.*]]
; V: vector.body:
; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -33,7 +33,7 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]]
-; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; V: middle.block:
@@ -54,8 +54,6 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]]
; ZVQDOTQ: vector.body:
; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -68,7 +66,7 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
-; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVQDOTQ: middle.block:
@@ -183,8 +181,6 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; V-NEXT: br label [[VECTOR_BODY:%.*]]
; V: vector.body:
; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -197,7 +193,7 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]]
-; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; V: middle.block:
@@ -218,8 +214,6 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]]
; ZVQDOTQ: vector.body:
; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -232,7 +226,7 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
-; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVQDOTQ: middle.block:
@@ -347,8 +341,6 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; V-NEXT: br label [[VECTOR_BODY:%.*]]
; V: vector.body:
; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -361,7 +353,7 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]]
-; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; V: middle.block:
@@ -382,8 +374,6 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]]
; ZVQDOTQ: vector.body:
; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -396,7 +386,7 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
-; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; ZVQDOTQ: middle.block:
@@ -510,8 +500,6 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; V-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; V-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; V-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; V-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; V-NEXT: br label [[VECTOR_BODY:%.*]]
; V: vector.body:
; V-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -524,7 +512,7 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; V-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; V-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; V-NEXT: [[TMP13]] = add <vscale x 4 x i32> [[TMP12]], [[VEC_PHI]]
-; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; V: middle.block:
@@ -545,8 +533,6 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; ZVQDOTQ-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; ZVQDOTQ-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; ZVQDOTQ-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; ZVQDOTQ-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; ZVQDOTQ-NEXT: br label [[VECTOR_BODY:%.*]]
; ZVQDOTQ: vector.body:
; ZVQDOTQ-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -559,7 +545,7 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.experimental.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
-; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; ZVQDOTQ-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; ZVQDOTQ-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; ZVQDOTQ: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
index b5b62d0..c95adf2 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
@@ -9,15 +9,8 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK-LABEL: define void @pr87378_vpinstruction_or_drop_poison_generating_flags(
; CHECK-SAME: ptr [[ARG:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1001, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1001, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1001, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[A]], i64 0
@@ -29,39 +22,49 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP6]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP8]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1001, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP25]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT7]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 8 x i32> [[TMP10]], [[BROADCAST_SPLAT8]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP28:%.*]] = select <vscale x 8 x i1> [[TMP11]], <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 8 x i1> [[TMP13]], splat (i1 true)
-; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP29:%.*]] = select <vscale x 8 x i1> [[TMP11]], <vscale x 8 x i1> [[TMP16]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP29]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = xor <vscale x 8 x i1> [[TMP14]], splat (i1 true)
-; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <vscale x 8 x i1> [[TMP21]], i32 0
; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP23]], i64 poison, i64 [[INDEX]]
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[PREDPHI]]
-; CHECK-NEXT: call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr [[TMP24]], i32 2, <vscale x 8 x i1> [[TMP22]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr align 2 [[TMP24]], <vscale x 8 x i1> [[TMP22]], i32 [[TMP25]])
+; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP26]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1001
+; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1001, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]]
; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]]
; CHECK: then.1:
@@ -80,7 +83,7 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK: loop.latch:
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000
-; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -118,8 +121,9 @@ exit:
ret void
}
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index d41d47a..ca94fce 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -7,12 +7,6 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 9, [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i8> poison, i8 [[B]], i64 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
index 554ce7b..2028df7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
@@ -9,46 +9,43 @@ define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @add(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
@@ -74,46 +71,43 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @or(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[OR_LCSSA]]
;
entry:
@@ -139,46 +133,43 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @and(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[AND_LCSSA]]
;
entry:
@@ -204,46 +195,43 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @xor(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
;
entry:
@@ -269,48 +257,45 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @smin(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -337,48 +322,45 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @umax(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -405,46 +387,43 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
; CHECK-LABEL: define float @fadd_fast(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP7]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[ADD_LCSSA]]
;
entry:
@@ -468,46 +447,43 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar
; CHECK-LABEL: define half @fadd_fast_half_zvfh(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP7]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[ADD_LCSSA]]
;
entry:
@@ -549,7 +525,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "
; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]])
@@ -567,7 +543,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "
; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[ADD_LCSSA]]
@@ -611,7 +587,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ
; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]])
@@ -629,7 +605,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ
; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[ADD_LCSSA]]
@@ -657,48 +633,45 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-LABEL: define float @fmin_fast(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -723,48 +696,45 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #
; CHECK-LABEL: define half @fmin_fast_half_zvfhmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -789,48 +759,45 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64
; CHECK-LABEL: define bfloat @fmin_fast_bfloat_zvfbfmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x bfloat> @llvm.vp.merge.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> [[TMP8]], <vscale x 8 x bfloat> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ 0xR0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -857,48 +824,45 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-LABEL: define float @fmax_fast(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -923,48 +887,45 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #
; CHECK-LABEL: define half @fmax_fast_half_zvfhmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -989,48 +950,45 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64
; CHECK-LABEL: define bfloat @fmax_fast_bfloat_zvfbfmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x bfloat> @llvm.vp.merge.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> [[TMP8]], <vscale x 8 x bfloat> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ 0xR0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -1077,7 +1035,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]])
@@ -1095,7 +1053,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
@@ -1142,7 +1100,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re
; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -1165,7 +1123,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re
; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
@@ -1197,40 +1155,37 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-LABEL: define float @fmuladd(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP8:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1238,9 +1193,9 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[MULADD_LCSSA]]
;
entry:
@@ -1266,40 +1221,37 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh"
; CHECK-LABEL: define half @fmuladd_f16_zvfh(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP8:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
@@ -1307,9 +1259,9 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh"
; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[MULADD_LCSSA]]
;
entry:
@@ -1360,7 +1312,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf
; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]])
@@ -1380,7 +1332,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf
; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[MULADD_LCSSA]]
@@ -1430,7 +1382,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin
; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]])
@@ -1450,7 +1402,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin
; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll
index 5a67b54..346f1cb 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll
@@ -1,5 +1,9 @@
; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s
+
+; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow
+; unrolling. Calculate register pressure for all VPlans, not just unrolled ones,
+; and remove.
define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) {
; CHECK-LABEL: add
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll
index d4909fa..b25bc48 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll
@@ -1,6 +1,10 @@
; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfh -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFH
-; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFHMIN
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfh -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFH
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFHMIN
+
+; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow
+; unrolling. Calculate register pressure for all VPlans, not just unrolled ones,
+; and remove.
define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) {
; CHECK-LABEL: add
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll
index 7037282..116ccc9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll
@@ -1,25 +1,29 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -force-vector-width=1 \
+; RUN: -force-vector-width=1 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-SCALAR
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=1 \
+; RUN: -riscv-v-register-bit-width-lmul=1 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL1
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=2 \
+; RUN: -riscv-v-register-bit-width-lmul=2 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL2
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=4 \
+; RUN: -riscv-v-register-bit-width-lmul=4 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL4
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=8 \
+; RUN: -riscv-v-register-bit-width-lmul=8 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL8
+; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow
+; unrolling. Calculate register pressure for all VPlans, not just unrolled ones,
+; and remove.
+
define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) {
; CHECK-LABEL: add
; CHECK-SCALAR: LV(REG): VF = 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
index 85163c7..6413ded3 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -mtriple=riscv64 -mattr=+v -p loop-vectorize -pass-remarks-analysis=loop-vectorize -S 2>&1 | FileCheck %s
; CHECK: remark: <unknown>:0:0: the cost-model indicates that interleaving is not beneficial
@@ -5,41 +6,36 @@ define float @s311(float %a_0, float %s311_sum) {
; CHECK-LABEL: define float @s311(
; CHECK-SAME: float [[A_0:%.*]], float [[S311_SUM:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 1200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1200, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[A_0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi float [ [[S311_SUM]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[BROADCAST_SPLAT]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 1200, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP6]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[BROADCAST_SPLAT]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1200
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 1200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ [[S311_SUM]], %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[S311_SUM]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi float [ [[S311_SUM]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[RED_NEXT]] = fadd float [[A_0]], [[RED]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1200
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[RED_LCSSA]]
@@ -60,8 +56,9 @@ exit:
ret float %red.lcssa
}
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index 0b3dcf8..6e58fdf 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -19,98 +19,88 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
; RV64-LABEL: define void @vector_reverse_i32(
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; RV64-NEXT: [[ENTRY:.*]]:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV64: [[VECTOR_PH]]:
-; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]]
-; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 [[TMP9]]
-; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP11]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP9]]
+; RV64-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[TMP11]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; RV64-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]]
+; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1
; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
-; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]]
-; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]]
-; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; RV64-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP25]]
+; RV64-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP20]], i64 [[TMP18]]
+; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br [[EXIT:label %.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_BODY]]:
;
; RV32-LABEL: define void @vector_reverse_i32(
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; RV32-NEXT: [[ENTRY:.*]]:
-; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV32: [[VECTOR_PH]]:
-; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32
; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]]
; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]]
-; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 [[TMP10]]
-; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP12]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 [[TMP10]]
+; RV32-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 [[TMP12]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; RV32-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32
-; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]]
-; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1
+; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]]
+; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 [[TMP18]]
-; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 [[TMP20]]
-; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP22]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; RV32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP16]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP18]], i32 [[TMP20]]
+; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
+; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br [[EXIT:label %.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_BODY]]:
;
@@ -122,14 +112,12 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV64-UF2: [[VECTOR_PH]]:
-; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
+; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 1023, [[TMP6]]
; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]]
+; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 1023, [[TMP7]]
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64-UF2: [[VECTOR_BODY]]:
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -168,13 +156,13 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP21]])
; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP32]], align 4
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; RV64-UF2-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-UF2-NEXT: br i1 [[TMP33]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; RV64-UF2-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP7]]
+; RV64-UF2-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV64-UF2: [[MIDDLE_BLOCK]]:
-; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
+; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[TMP7]]
; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
; RV64-UF2: [[SCALAR_PH]]:
-; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP33]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
; RV64-UF2: [[FOR_BODY]]:
;
@@ -206,10 +194,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV64: [[FOR_BODY_PREHEADER]]:
; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; RV64: [[VECTOR_SCEVCHECK]]:
; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
@@ -231,48 +216,46 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; RV64: [[VECTOR_PH]]:
-; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
-; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]]
-; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP24]]
-; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP27]], i64 [[TMP26]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP28]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP38:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[TMP28]]
+; RV64-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP38]], i64 [[TMP26]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
; RV64-NEXT: [[TMP29:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]]
+; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1
; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]]
-; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP30]], i64 [[TMP31]]
-; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP34]], i64 [[TMP33]]
-; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]])
-; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP35]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
-; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; RV64-NEXT: [[TMP34:%.*]] = getelementptr i32, ptr [[TMP30]], i64 [[TMP31]]
+; RV64-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[TMP34]], i64 [[TMP33]]
+; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]]
+; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -289,10 +272,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV32: [[FOR_BODY_PREHEADER]]:
; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; RV32: [[VECTOR_MEMCHECK]]:
; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
@@ -301,50 +281,44 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]]
; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; RV32: [[VECTOR_PH]]:
-; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]]
; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]]
-; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 [[TMP17]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i32 [[TMP19]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP21]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP20]], i32 [[TMP19]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; RV32-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32
-; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]]
-; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1
+; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]]
+; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]]
-; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 [[TMP25]]
-; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i32 [[TMP27]]
-; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]])
-; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP29]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; RV32-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP23]], i32 [[TMP21]]
+; RV32-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP24]], i32 [[TMP27]]
+; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]]
+; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -386,15 +360,13 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; RV64-UF2: [[VECTOR_PH]]:
-; RV64-UF2-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-UF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
-; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]]
-; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2
+; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 [[TMP0]], [[TMP19]]
; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
+; RV64-UF2-NEXT: [[TMP48:%.*]] = sub i64 [[TMP0]], [[TMP20]]
+; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[TMP20]] to i32
; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64-UF2: [[VECTOR_BODY]]:
@@ -436,13 +408,13 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP36]])
; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE7]], ptr [[TMP47]], align 4
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
-; RV64-UF2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-UF2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; RV64-UF2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP20]]
+; RV64-UF2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; RV64-UF2: [[MIDDLE_BLOCK]]:
-; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[TMP20]]
; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
; RV64-UF2: [[SCALAR_PH]]:
-; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP48]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]:
@@ -487,10 +459,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV64: [[FOR_BODY_PREHEADER]]:
; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; RV64: [[VECTOR_SCEVCHECK]]:
; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
@@ -512,48 +481,46 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; RV64: [[VECTOR_PH]]:
-; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
-; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]]
-; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP24]]
-; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP27]], i64 [[TMP26]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP28]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP38:%.*]] = getelementptr float, ptr [[TMP23]], i64 [[TMP28]]
+; RV64-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP38]], i64 [[TMP26]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
; RV64-NEXT: [[TMP29:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]]
+; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1
; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]]
-; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP30]], i64 [[TMP31]]
-; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP34]], i64 [[TMP33]]
-; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]])
-; RV64-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP35]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
-; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; RV64-NEXT: [[TMP34:%.*]] = getelementptr float, ptr [[TMP30]], i64 [[TMP31]]
+; RV64-NEXT: [[TMP35:%.*]] = getelementptr float, ptr [[TMP34]], i64 [[TMP33]]
+; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]]
+; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -570,10 +537,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV32: [[FOR_BODY_PREHEADER]]:
; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; RV32: [[VECTOR_MEMCHECK]]:
; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
@@ -582,50 +546,44 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]]
; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; RV32: [[VECTOR_PH]]:
-; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]]
; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]]
-; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP17]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP19]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP21]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP20]], i32 [[TMP19]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; RV32-NEXT: [[TMP22:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32
-; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]]
-; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1
+; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]]
+; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]]
-; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP25]]
-; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i32 [[TMP27]]
-; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]])
-; RV32-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP29]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; RV32-NEXT: [[TMP24:%.*]] = getelementptr float, ptr [[TMP23]], i32 [[TMP21]]
+; RV32-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[TMP24]], i32 [[TMP27]]
+; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]]
+; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -667,15 +625,13 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; RV64-UF2: [[VECTOR_PH]]:
-; RV64-UF2-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-UF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
-; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]]
-; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2
+; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 [[TMP0]], [[TMP19]]
; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
+; RV64-UF2-NEXT: [[TMP48:%.*]] = sub i64 [[TMP0]], [[TMP20]]
+; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[TMP20]] to i32
; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64-UF2: [[VECTOR_BODY]]:
@@ -717,13 +673,13 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP36]])
; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE7]], ptr [[TMP47]], align 4
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
-; RV64-UF2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-UF2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; RV64-UF2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP20]]
+; RV64-UF2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; RV64-UF2: [[MIDDLE_BLOCK]]:
-; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[TMP20]]
; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
; RV64-UF2: [[SCALAR_PH]]:
-; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP48]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]:
@@ -762,98 +718,88 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
; RV64-LABEL: define void @vector_reverse_f32_simplify(
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
; RV64-NEXT: [[ENTRY:.*]]:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV64: [[VECTOR_PH]]:
-; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]]
-; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[TMP9]]
-; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP11]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP9]]
+; RV64-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP12]], i64 [[TMP11]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; RV64-NEXT: [[TMP14:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]]
+; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1
; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
-; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]]
-; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[TMP18]]
-; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]])
-; RV64-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP20]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV64-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i64 [[TMP25]]
+; RV64-NEXT: [[TMP21:%.*]] = getelementptr float, ptr [[TMP20]], i64 [[TMP18]]
+; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br [[EXIT:label %.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_BODY]]:
;
; RV32-LABEL: define void @vector_reverse_f32_simplify(
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
; RV32-NEXT: [[ENTRY:.*]]:
-; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV32: [[VECTOR_PH]]:
-; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32
; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]]
; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]]
-; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP10]]
-; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP12]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP8]], i32 [[TMP10]]
+; RV32-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP13]], i32 [[TMP12]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; RV32-NEXT: [[TMP15:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32
-; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]]
-; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1
+; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]]
+; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP18]]
-; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP20]]
-; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]])
-; RV32-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP22]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV32-NEXT: [[TMP18:%.*]] = getelementptr float, ptr [[TMP16]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP22:%.*]] = getelementptr float, ptr [[TMP18]], i32 [[TMP20]]
+; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
+; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br [[EXIT:label %.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_BODY]]:
;
@@ -865,14 +811,12 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV64-UF2: [[VECTOR_PH]]:
-; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
+; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 1023, [[TMP6]]
; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]]
+; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 1023, [[TMP7]]
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64-UF2: [[VECTOR_BODY]]:
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -911,13 +855,13 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP21]])
; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP32]], align 4
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; RV64-UF2-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-UF2-NEXT: br i1 [[TMP33]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV64-UF2-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP7]]
+; RV64-UF2-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; RV64-UF2: [[MIDDLE_BLOCK]]:
-; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
+; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[TMP7]]
; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
; RV64-UF2: [[SCALAR_PH]]:
-; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP33]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
; RV64-UF2: [[FOR_BODY]]:
;
@@ -984,7 +928,7 @@ define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) {
; RV64-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1
; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; RV64-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020
-; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
; RV64-NEXT: br label %[[SCALAR_PH]]
; RV64: [[SCALAR_PH]]:
@@ -1036,7 +980,7 @@ define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) {
; RV32-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1
; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; RV32-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020
-; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
; RV32-NEXT: br label %[[SCALAR_PH]]
; RV32: [[SCALAR_PH]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
index 3370e92..10d74c0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
@@ -10,36 +10,32 @@ target triple = "riscv64"
define void @test(ptr %p) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 200
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
@@ -47,7 +43,7 @@ define void @test(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -86,7 +82,7 @@ define void @test_may_clobber(ptr %p) {
; CHECK-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -101,7 +97,7 @@ define void @test_may_clobber(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -127,36 +123,32 @@ exit:
define void @trivial_due_max_vscale(ptr %p) {
; CHECK-LABEL: @trivial_due_max_vscale(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 8192
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
@@ -164,7 +156,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -190,36 +182,32 @@ exit:
define void @no_high_lmul_or_interleave(ptr %p) {
; CHECK-LABEL: @no_high_lmul_or_interleave(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1024
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
@@ -227,7 +215,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -277,7 +265,7 @@ define void @safe_load_store_distance_not_pow_of_2(i64 %N) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 24)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -291,7 +279,7 @@ define void @safe_load_store_distance_not_pow_of_2(i64 %N) {
; CHECK-NEXT: store i16 0, ptr [[GEP_OFF]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
index e51f6fa..8bfeac8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
@@ -8,15 +8,8 @@
define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_add(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -24,28 +17,31 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP6]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -72,15 +68,8 @@ for.end:
define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) {
; CHECK-LABEL: @vector_add_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0
@@ -88,28 +77,31 @@ define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP8]], ptr [[TMP6]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP8]], ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ELEM]], [[V]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -174,15 +166,8 @@ for.end:
define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
; CHECK-LABEL: @indexed_store(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -190,28 +175,31 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -235,40 +223,37 @@ for.end:
define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
; CHECK-LABEL: @indexed_load(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
-; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP12]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
@@ -276,7 +261,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
@@ -303,15 +288,8 @@ for.end:
define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @splat_int(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -319,24 +297,27 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -358,15 +339,8 @@ for.end:
define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) {
; CHECK-LABEL: @splat_ptr(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[V:%.*]], i64 0
@@ -374,24 +348,27 @@ define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store ptr [[V]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index c037b70..c807891 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -11,12 +11,6 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -76,12 +70,6 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -139,12 +127,6 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -210,12 +192,6 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -267,12 +243,6 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -334,8 +304,6 @@ define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
@@ -344,7 +312,7 @@ define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]]
; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
@@ -387,12 +355,6 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
index 5c6febc..384cba5d2 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
@@ -5,50 +5,46 @@ define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
; CHECK-LABEL: define i32 @select_icmp(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]]
; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[COND_LCSSA]]
@@ -75,50 +71,46 @@ define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
; CHECK-LABEL: define i32 @select_fcmp(
; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]]
; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[COND_LCSSA]]
@@ -145,48 +137,44 @@ define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-LABEL: define i32 @select_const_i32_from_icmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 3, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 3, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 3, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA]]
@@ -213,48 +201,44 @@ define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64
; CHECK-LABEL: define i32 @select_i32_from_icmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i32 [[A:%.*]], i32 [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]]
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[A]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[A]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]]
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA]]
@@ -281,48 +265,44 @@ define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-LABEL: define i32 @select_const_i32_from_fcmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA]]
@@ -386,45 +366,47 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1
; CHECK-LABEL: define i32 @pred_select_const_i32_from_icmp(
; CHECK-SAME: ptr noalias readonly captures(none) [[SRC1:%.*]], ptr noalias readonly captures(none) [[SRC2:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ult <vscale x 4 x i32> [[TMP18]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35)
+; CHECK-NEXT: [[TMP20:%.*]] = select <vscale x 4 x i1> [[TMP19]], <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> [[TMP7]], i32 [[TMP17]])
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
-; CHECK-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI1:%.*]] = select <vscale x 4 x i1> [[TMP20]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]]
+; CHECK-NEXT: [[PREDPHI]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[PREDPHI1]], <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP17]])
+; CHECK-NEXT: [[TMP21:%.*]] = zext i32 [[TMP17]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ 0, %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35
@@ -439,7 +421,7 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1
; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[FOR_END_LOOPEXIT]]:
; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[R_1_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll
index 13a4b16..8c804e5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll
@@ -4,28 +4,16 @@
define void @small_trip_count_min_vlen_128(ptr nocapture %a) nounwind vscale_range(4,1024) {
; CHECK-LABEL: @small_trip_count_min_vlen_128(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1:%.*]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP1]], align 4
-; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
-; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 [[IV]]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1:%.*]], i32 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[V]], 1
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[IV]], 3
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP1]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -49,28 +37,16 @@ exit:
define void @small_trip_count_min_vlen_32(ptr nocapture %a) nounwind vscale_range(1,1024) {
; CHECK-LABEL: @small_trip_count_min_vlen_32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1:%.*]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP1]], align 4
-; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
-; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 [[IV]]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1:%.*]], i32 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[V]], 1
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[IV]], 3
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP1]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index df907dc..aa90c8b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -6,45 +6,41 @@
define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-LABEL: @single_constant_stride_int_scaled(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP12]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 8)
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP14]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -52,7 +48,7 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -77,46 +73,42 @@ exit:
define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-LABEL: @single_constant_stride_int_iv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 64
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 64)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 64, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 64, [[TMP11]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
@@ -124,7 +116,7 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -151,55 +143,46 @@ exit:
define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK-LABEL: @single_constant_stride_ptr_iv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP18]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P:%.*]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP14]], splat (i64 8)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]]
-; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x ptr> [[VECTOR_GEP]], i32 0
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP17]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP10]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[P]], [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH1]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -225,36 +208,31 @@ exit:
define void @single_stride_int_scaled(ptr %p, i64 %stride) {
; NOSTRIDED-LABEL: @single_stride_int_scaled(
; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -265,7 +243,7 @@ define void @single_stride_int_scaled(ptr %p, i64 %stride) {
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -306,37 +284,32 @@ exit:
define void @single_stride_int_iv(ptr %p, i64 %stride) {
; NOSTRIDED-LABEL: @single_stride_int_iv(
; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -348,7 +321,7 @@ define void @single_stride_int_iv(ptr %p, i64 %stride) {
; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -429,11 +402,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: entry:
; NOSTRIDED-NEXT: [[P3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; NOSTRIDED-NEXT: [[P21:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
@@ -445,28 +414,27 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
; NOSTRIDED-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP8]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP12]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; NOSTRIDED-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
; NOSTRIDED-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P2]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP14]], ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; NOSTRIDED-NEXT: [[TMP13:%.*]] = zext i32 [[TMP16]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -478,17 +446,13 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
; STRIDED-LABEL: @double_stride_int_scaled(
; STRIDED-NEXT: entry:
-; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 80, i64 [[TMP1]])
-; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; STRIDED: vector.scevcheck:
; STRIDED-NEXT: [[TMP24:%.*]] = shl i64 [[STRIDE:%.*]], 2
; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], -4
@@ -537,10 +501,6 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
-; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; STRIDED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
@@ -548,28 +508,32 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP12]], splat (i64 1)
; STRIDED-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP11]]
-; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
-; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; STRIDED-NEXT: [[TMP43:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; STRIDED-NEXT: [[TMP44:%.*]] = zext i32 [[TMP43]] to i64
+; STRIDED-NEXT: [[TMP45:%.*]] = mul i64 1, [[TMP44]]
+; STRIDED-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP45]], i64 0
+; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]]
; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META9:![0-9]+]]
; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META11:![0-9]+]], !noalias [[META8]]
-; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META12:![0-9]+]], !noalias [[META9]]
+; STRIDED-NEXT: [[TMP46:%.*]] = zext i32 [[TMP43]] to i64
+; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP46]], [[INDEX]]
+; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP46]]
; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; STRIDED-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; STRIDED-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; STRIDED-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; STRIDED: middle.block:
-; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; STRIDED-NEXT: br label [[EXIT:%.*]]
; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
; STRIDED-NEXT: br label [[LOOP:%.*]]
; STRIDED: loop:
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -581,7 +545,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; STRIDED: exit:
; STRIDED-NEXT: ret void
;
@@ -607,37 +571,32 @@ exit:
define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-LABEL: @double_stride_int_iv(
; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
-; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -650,7 +609,7 @@ define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -692,7 +651,6 @@ exit:
ret void
}
-
define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-LABEL: @double_stride_ptr_iv(
; NOSTRIDED-NEXT: entry:
@@ -714,11 +672,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
;
; STRIDED-LABEL: @double_stride_ptr_iv(
; STRIDED-NEXT: entry:
-; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
-; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; STRIDED: vector.memcheck:
; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 1023
; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
@@ -738,21 +692,14 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; STRIDED: vector.ph:
-; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
-; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; STRIDED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
-; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
-; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
-; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
-; STRIDED-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP11]]
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ]
+; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -761,23 +708,27 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP27]], [[DOTSPLAT10]]
; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[VECTOR_GEP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META15:![0-9]+]]
+; STRIDED-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[VECTOR_GEP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META16:![0-9]+]]
; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]]
-; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
-; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
+; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
+; STRIDED-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64
+; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; STRIDED-NEXT: [[TMP20:%.*]] = zext i32 [[TMP14]] to i64
+; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP20]]
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]]
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
+; STRIDED-NEXT: [[TMP22:%.*]] = zext i32 [[TMP14]] to i64
+; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP22]]
; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]]
-; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; STRIDED: middle.block:
-; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; STRIDED-NEXT: br label [[EXIT:%.*]]
; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END7]], [[MIDDLE_BLOCK]] ], [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
; STRIDED-NEXT: br label [[LOOP:%.*]]
; STRIDED: loop:
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -790,7 +741,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]]
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; STRIDED: exit:
; STRIDED-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
index 38e7832..f539ccf 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
@@ -22,12 +22,6 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -84,8 +78,6 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -94,7 +86,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = and <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -148,12 +140,6 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -210,8 +196,6 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -220,7 +204,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = or <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -274,12 +258,6 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -336,8 +314,6 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -346,7 +322,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = xor <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -400,12 +376,6 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -462,8 +432,6 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -472,7 +440,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = shl <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -526,12 +494,6 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -588,8 +550,6 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -598,7 +558,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = lshr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -652,12 +612,6 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -714,8 +668,6 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -724,7 +676,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = ashr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -778,12 +730,6 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -840,8 +786,6 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -850,7 +794,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -904,12 +848,6 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -966,8 +904,6 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -976,7 +912,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = sub <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1030,12 +966,6 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1092,8 +1022,6 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1102,7 +1030,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = mul <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1156,12 +1084,6 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1218,8 +1140,6 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1228,7 +1148,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1282,12 +1202,6 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1344,8 +1258,6 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1354,7 +1266,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = udiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1408,12 +1320,6 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1470,8 +1376,6 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1480,7 +1384,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = srem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1534,12 +1438,6 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP1]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
-; IF-EVL-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP7]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1596,8 +1494,6 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1606,7 +1502,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP10:%.*]] = urem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1663,12 +1559,6 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1726,8 +1616,6 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1736,7 +1624,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP12:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1791,12 +1679,6 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1854,8 +1736,6 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1864,7 +1744,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP12:%.*]] = fsub fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1919,12 +1799,6 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1982,8 +1856,6 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1992,7 +1864,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP12:%.*]] = fmul fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -2047,12 +1919,6 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -2110,8 +1976,6 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -2120,7 +1984,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP12:%.*]] = fdiv fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -2228,12 +2092,6 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -2291,8 +2149,6 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -2301,7 +2157,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[TMP12:%.*]] = fneg fast <vscale x 4 x float> [[WIDE_LOAD]]
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
index f604745..d2f3355 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
@@ -27,12 +27,6 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -99,8 +93,6 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -111,7 +103,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -176,12 +168,6 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -248,8 +234,6 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -260,7 +244,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -325,12 +309,6 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -397,8 +375,6 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -409,7 +385,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -474,12 +450,6 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
; IF-EVL-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP28]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP28]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -546,8 +516,6 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -558,7 +526,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -619,12 +587,6 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP21]], [[TMP20]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP23]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP23]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -682,8 +644,6 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -692,7 +652,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -748,12 +708,6 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP6]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP10]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -811,8 +765,6 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -821,7 +773,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -877,12 +829,6 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP24]], [[TMP23]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP26]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP26]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -944,8 +890,6 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -956,7 +900,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32>
; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1016,12 +960,6 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP24]], [[TMP23]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP26]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP26]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1083,8 +1021,6 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1095,7 +1031,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32>
; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1155,12 +1091,6 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP21]], [[TMP20]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP23]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP23]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1218,8 +1148,6 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1228,7 +1156,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true)
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index 2be74e5..6db81b3 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -22,12 +22,6 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -85,8 +79,6 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -95,7 +87,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = sext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -150,12 +142,6 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -213,8 +199,6 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -223,7 +207,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META12:![0-9]+]], !noalias [[META9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -278,12 +262,6 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -341,8 +319,6 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -351,7 +327,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = trunc <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x i32>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x i32> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -406,12 +382,6 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -469,8 +439,6 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -479,7 +447,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = fpext <vscale x 2 x float> [[WIDE_LOAD]] to <vscale x 2 x double>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x double> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META26:![0-9]+]], !noalias [[META23]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -534,12 +502,6 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP9]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -597,8 +559,6 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -607,7 +567,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = fptrunc <vscale x 2 x double> [[WIDE_LOAD]] to <vscale x 2 x float>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x float> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META33:![0-9]+]], !noalias [[META30]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP6]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -662,12 +622,6 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -725,8 +679,6 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -735,7 +687,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = sitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -790,12 +742,6 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -853,8 +799,6 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -863,7 +807,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = uitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -918,12 +862,6 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -981,8 +919,6 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -991,7 +927,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = fptosi <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1046,12 +982,6 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1109,8 +1039,6 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1119,7 +1047,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = fptoui <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1174,12 +1102,6 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
; IF-EVL-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1237,8 +1159,6 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1247,7 +1167,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP12:%.*]] = inttoptr <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x ptr>
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store <vscale x 2 x ptr> [[TMP12]], ptr [[TMP13]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -1293,12 +1213,6 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[ENTRY:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; IF-EVL-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
@@ -1353,12 +1267,10 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
-; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
+; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP3]]
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1369,7 +1281,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; NO-VP-NEXT: [[TMP10:%.*]] = ptrtoint <vscale x 2 x ptr> [[TMP9]] to <vscale x 2 x i64>
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index 76a830a..0c67e47 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -23,12 +23,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: entry:
; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
@@ -76,12 +70,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: entry:
; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -134,8 +122,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-OUTLOOP: vector.body:
@@ -146,7 +132,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 3)
; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP21]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-OUTLOOP-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[TMP16]], [[VEC_PHI]]
-; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP-OUTLOOP: middle.block:
@@ -184,8 +170,6 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-INLOOP: vector.body:
; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -196,7 +180,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
-; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP-INLOOP: middle.block:
@@ -246,12 +230,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: entry:
; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
@@ -308,12 +286,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: entry:
; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -369,8 +341,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-OUTLOOP: vector.body:
@@ -381,7 +351,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP18:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 3)
; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]]
; NO-VP-OUTLOOP-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP18]], <vscale x 4 x i32> [[TMP16]], <vscale x 4 x i32> [[VEC_PHI]]
-; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-OUTLOOP-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP-OUTLOOP: middle.block:
@@ -423,8 +393,6 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-INLOOP: vector.body:
; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -435,7 +403,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
-; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP-INLOOP: middle.block:
@@ -495,12 +463,6 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: entry:
; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
@@ -557,12 +519,6 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: entry:
; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
@@ -624,13 +580,11 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; NO-VP-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; NO-VP-OUTLOOP-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i32> [[TMP12]], splat (i32 1)
; NO-VP-OUTLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP14]]
-; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32
+; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP7]] to i32
; NO-VP-OUTLOOP-NEXT: [[TMP17:%.*]] = mul i32 1, [[TMP16]]
; NO-VP-OUTLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
; NO-VP-OUTLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -644,7 +598,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP27:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], [[VEC_IND]]
; NO-VP-OUTLOOP-NEXT: [[TMP22:%.*]] = select <vscale x 4 x i1> [[TMP27]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-OUTLOOP-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI]]
-; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-OUTLOOP-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-OUTLOOP-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -684,12 +638,10 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-INLOOP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; NO-VP-INLOOP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP6]], splat (i32 1)
; NO-VP-INLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]]
-; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = mul i32 1, [[TMP10]]
; NO-VP-INLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
; NO-VP-INLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -704,7 +656,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
-; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -757,12 +709,6 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: entry:
; IF-EVL-OUTLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
@@ -828,12 +774,6 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: entry:
; IF-EVL-INLOOP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
@@ -898,13 +838,11 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-OUTLOOP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NO-VP-OUTLOOP-NEXT: [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; NO-VP-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; NO-VP-OUTLOOP-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i32> [[TMP12]], splat (i32 1)
; NO-VP-OUTLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP14]]
-; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32
+; NO-VP-OUTLOOP-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP7]] to i32
; NO-VP-OUTLOOP-NEXT: [[TMP17:%.*]] = mul i32 1, [[TMP16]]
; NO-VP-OUTLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
; NO-VP-OUTLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -918,7 +856,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-OUTLOOP-NEXT: [[TMP28:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], [[VEC_IND]]
; NO-VP-OUTLOOP-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]]
; NO-VP-OUTLOOP-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP28]], <vscale x 4 x i32> [[TMP22]], <vscale x 4 x i32> [[VEC_PHI]]
-; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-OUTLOOP-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-OUTLOOP-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -962,12 +900,10 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-INLOOP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-INLOOP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; NO-VP-INLOOP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP6]], splat (i32 1)
; NO-VP-INLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]]
-; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP3]] to i32
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = mul i32 1, [[TMP10]]
; NO-VP-INLOOP-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
; NO-VP-INLOOP-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -982,7 +918,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
-; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index a216aa8..0d1d9a9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -13,12 +13,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -71,8 +65,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -83,7 +75,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP8:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -134,12 +126,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -192,8 +178,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -204,7 +188,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP8:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -254,12 +238,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -312,8 +290,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -324,7 +300,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP8:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -374,12 +350,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[LOOP_PREHEADER:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -432,8 +402,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
; NO-VP: [[VECTOR_BODY]]:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -444,7 +412,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: [[TMP8:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index f92bf5a..76afbd45d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -15,12 +15,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[ENTRY:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP8]] to i32
@@ -78,8 +72,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4
; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -94,7 +86,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], [[WIDE_LOAD]]
; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP14]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -147,12 +139,6 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[ENTRY:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP8]] to i32
@@ -218,8 +204,6 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4
; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -240,7 +224,7 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP17:%.*]] = add nsw <vscale x 4 x i32> [[TMP15]], [[TMP16]]
; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP17]], ptr [[TMP18]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -300,12 +284,6 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[ENTRY:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP39:%.*]] = trunc i64 [[TMP8]] to i32
@@ -381,8 +359,6 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4
; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -410,7 +386,7 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[TMP21]], [[TMP18]]
; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP22]], ptr [[TMP23]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -486,8 +462,6 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -502,7 +476,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw <vscale x 4 x i32> [[TMP10]], [[WIDE_LOAD]]
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]]
; IF-EVL-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP5]]
+; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -547,8 +521,6 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
; NO-VP-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4
; NO-VP-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
@@ -563,7 +535,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; NO-VP-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], [[WIDE_LOAD]]
; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP14]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: [[MIDDLE_BLOCK]]:
@@ -621,12 +593,6 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[ENTRY:.*]]:
; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; IF-EVL: [[VECTOR_PH]]:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP18]], 2
; IF-EVL-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
@@ -689,12 +655,10 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
-; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP9]]
+; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP3]]
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP10]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; NO-VP-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
@@ -710,7 +674,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; NO-VP-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> [[VECTOR_RECUR]], <vscale x 2 x i64> [[TMP12]], i32 -1)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP13]], ptr [[TMP11]], align 8
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index da5aed9..47339e0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -12,12 +12,6 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; IF-EVL-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
@@ -27,7 +21,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -75,12 +69,10 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
-; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
+; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP3]]
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
@@ -93,7 +85,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> [[TMP10]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
; NO-VP-NEXT: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true))
-; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], [[TMP5]]
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], [[TMP3]]
; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index 433d1e4..f425010 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -14,19 +14,13 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -67,8 +61,6 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -77,7 +69,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
@@ -229,19 +221,13 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -282,8 +268,6 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -292,7 +276,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[TMP10]] = or i32 [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: middle.block:
@@ -337,19 +321,13 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -390,8 +368,6 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -400,7 +376,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[TMP10]] = and i32 [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: middle.block:
@@ -445,19 +421,13 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -498,8 +468,6 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -508,7 +476,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[TMP10]] = xor i32 [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; NO-VP: middle.block:
@@ -553,19 +521,13 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -607,8 +569,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -617,7 +577,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP9]], i32 [[VEC_PHI]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; NO-VP: middle.block:
@@ -664,19 +624,13 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -718,8 +672,6 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -728,7 +680,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smax.i32(i32 [[TMP9]], i32 [[VEC_PHI]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NO-VP: middle.block:
@@ -775,19 +727,13 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -829,8 +775,6 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -839,7 +783,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umin.i32(i32 [[TMP9]], i32 [[VEC_PHI]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; NO-VP: middle.block:
@@ -886,19 +830,13 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -940,8 +878,6 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -950,7 +886,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP9]], i32 [[VEC_PHI]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; NO-VP: middle.block:
@@ -997,19 +933,13 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1050,8 +980,6 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1060,7 +988,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[WIDE_LOAD]])
; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; NO-VP: middle.block:
@@ -1212,19 +1140,13 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1267,8 +1189,6 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1278,7 +1198,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
; NO-VP-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP9]], [[VEC_PHI]]
; NO-VP-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP9]], float [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; NO-VP: middle.block:
@@ -1325,19 +1245,13 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1380,8 +1294,6 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1391,7 +1303,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
; NO-VP-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast ogt float [[TMP9]], [[VEC_PHI]]
; NO-VP-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP9]], float [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; NO-VP: middle.block:
@@ -1654,19 +1566,13 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1712,8 +1618,6 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1725,7 +1629,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP11:%.*]] = fmul reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP12:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP11]])
; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[VEC_PHI]], [[TMP12]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
; NO-VP: middle.block:
@@ -1774,19 +1678,13 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1831,8 +1729,6 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1841,7 +1737,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; NO-VP: middle.block:
@@ -1891,19 +1787,13 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1948,8 +1838,6 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1958,7 +1846,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast olt <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index c5d2739..8198409 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -13,18 +13,12 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -75,8 +69,6 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -88,7 +80,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; NO-VP-NEXT: [[TMP21:%.*]] = add nsw <vscale x 4 x i32> [[TMP19]], [[TMP18]]
; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP21]], ptr [[TMP22]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
index be6ae1d..84ea3b9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
@@ -31,12 +31,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[ENTRY:%.*]]
; IF-EVL-OUTLOOP: vector.ph:
-; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP7]]
-; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
@@ -89,12 +83,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IF-EVL-INLOOP-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; IF-EVL-INLOOP: vector.ph:
-; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], 1
-; IF-EVL-INLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP10]]
-; IF-EVL-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
-; IF-EVL-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -152,8 +140,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-OUTLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]]
; NO-VP-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-OUTLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-OUTLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; NO-VP-OUTLOOP-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; NO-VP-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-OUTLOOP: vector.body:
@@ -162,7 +148,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-OUTLOOP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-OUTLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP10]], align 4, !alias.scope [[META0:![0-9]+]]
; NO-VP-OUTLOOP-NEXT: [[TMP12]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; NO-VP-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP-OUTLOOP: middle.block:
@@ -207,8 +193,6 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-INLOOP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-INLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-INLOOP-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-INLOOP-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; NO-VP-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-INLOOP: vector.body:
; NO-VP-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -217,7 +201,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META0:![0-9]+]]
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
-; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP-INLOOP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index 62a4f73..acfad66 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -12,18 +12,12 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[N:%.*]], [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N:%.*]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -61,8 +55,6 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; NO-VP-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP1]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP11]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP2]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -70,7 +62,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP6]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP11]]
; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 296405d..60e0aab 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -16,12 +16,6 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TC]], i64 1)
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP6]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -85,12 +79,6 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK: [[LOOP_PREHEADER]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC]], [[TMP6]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -154,12 +142,6 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK: [[LOOP_PREHEADER]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TC_ADD]], [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index e06bbe9..7a39502 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -12,18 +12,12 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -72,8 +66,6 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[INC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
@@ -84,7 +76,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
; NO-VP-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[WIDE_MASKED_LOAD]]
; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP9]], ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[INC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[FOR_INC:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 775d9ca..8142154 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -14,19 +14,13 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -66,8 +60,6 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N1]], [[TMP3]]
; NO-VP-NEXT: [[N:%.*]] = sub i64 [[N1]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
@@ -75,7 +67,7 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[ARRAYIDX]], align 4
; NO-VP-NEXT: [[ADD]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[SUM_07]], <vscale x 4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index 464667d..afbed37a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -12,12 +12,6 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
@@ -25,7 +19,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -67,8 +61,6 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
@@ -77,7 +69,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; NO-VP-NEXT: [[TMP10]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
@@ -230,12 +222,6 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
@@ -243,7 +229,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -285,8 +271,6 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
@@ -295,7 +279,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: middle.block:
@@ -341,12 +325,6 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> splat (i32 -1), i32 [[START:%.*]], i32 0
@@ -354,7 +332,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -396,8 +374,6 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> splat (i32 -1), i32 [[START:%.*]], i32 0
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
@@ -406,7 +382,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; NO-VP-NEXT: [[TMP10]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: middle.block:
@@ -452,12 +428,6 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
@@ -465,7 +435,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -507,8 +477,6 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
@@ -517,7 +485,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
; NO-VP-NEXT: [[TMP10]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; NO-VP: middle.block:
@@ -563,12 +531,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
@@ -577,7 +539,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -621,8 +583,6 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -633,7 +593,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; NO-VP: middle.block:
@@ -681,12 +641,6 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
@@ -695,7 +649,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -739,8 +693,6 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -751,7 +703,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NO-VP: middle.block:
@@ -799,12 +751,6 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
@@ -813,7 +759,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -857,8 +803,6 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -869,7 +813,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = icmp ult <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; NO-VP: middle.block:
@@ -917,12 +861,6 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
@@ -931,7 +869,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -975,8 +913,6 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[START:%.*]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -987,7 +923,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; NO-VP: middle.block:
@@ -1035,12 +971,6 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0
@@ -1048,7 +978,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1090,8 +1020,6 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
@@ -1100,7 +1028,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4
; NO-VP-NEXT: [[TMP10]] = fadd reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; NO-VP: middle.block:
@@ -1253,12 +1181,6 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0
@@ -1267,7 +1189,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1311,8 +1233,6 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -1323,7 +1243,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; NO-VP: middle.block:
@@ -1371,12 +1291,6 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0
@@ -1385,7 +1299,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1429,8 +1343,6 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[START:%.*]], i64 0
; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -1441,7 +1353,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
; NO-VP-NEXT: [[TMP10]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; NO-VP: middle.block:
@@ -1705,12 +1617,6 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0
@@ -1718,7 +1624,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1764,8 +1670,6 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
@@ -1776,7 +1680,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP10]], align 4
; NO-VP-NEXT: [[TMP12]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
; NO-VP: middle.block:
@@ -1826,19 +1730,13 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1883,8 +1781,6 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -1893,7 +1789,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; NO-VP: middle.block:
@@ -1943,19 +1839,13 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -2000,8 +1890,6 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -2010,7 +1898,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = fcmp fast olt <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
; NO-VP-NEXT: [[TMP10]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index 397cb95..3f378c7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -12,12 +12,6 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -81,8 +75,6 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL:%.*]], [[N_VEC]]
; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
@@ -91,22 +83,22 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL]], [[INDEX]]
; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP8]]
-; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]]
-; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP3]]
+; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP3]], 1
; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]]
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]]
; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP8]]
-; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]]
-; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP3]]
+; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP3]], 1
; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]]
; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]]
; NO-VP-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE]])
; NO-VP-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
@@ -154,12 +146,6 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -236,8 +222,6 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL1:%.*]], [[N_VEC]]
; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
@@ -250,8 +234,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
; NO-VP-NEXT: [[TMP10:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 100)
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR1:%.*]], i64 [[TMP8]]
-; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP5]]
-; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP3]]
+; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 -1, [[TMP13]]
; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP11]], i64 [[TMP12]]
; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP14]]
@@ -259,15 +243,15 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[REVERSE]], <vscale x 4 x i32> poison)
; NO-VP-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]])
; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i64 [[TMP8]]
-; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP5]]
-; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP3]]
+; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP3]], 1
; NO-VP-NEXT: [[TMP20:%.*]] = mul i64 -1, [[TMP19]]
; NO-VP-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i64 [[TMP18]]
; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP21]], i64 [[TMP20]]
; NO-VP-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]])
; NO-VP-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE2]])
; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[REVERSE4]], ptr [[TMP22]], i32 4, <vscale x 4 x i1> [[REVERSE3]])
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: middle.block:
@@ -334,12 +318,6 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index 2ec23b91..e32af06 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -15,12 +15,6 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -67,8 +61,6 @@ define void @test(ptr %p) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label [[LOOP:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -77,7 +69,7 @@ define void @test(ptr %p) {
; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 200
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 8
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
@@ -346,12 +338,6 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; IF-EVL-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP2]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -398,8 +384,6 @@ define void @trivial_due_max_vscale(ptr %p) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label [[LOOP:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -408,7 +392,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 8192
; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]]
; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 32
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: middle.block:
@@ -454,11 +438,6 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 3002, [[TMP1]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index ab05166..aad4dcd 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -15,12 +15,6 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 3, [[SPEC_SELECT]]
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP7]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index 034b767..22c9b2e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -8,15 +8,8 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-LABEL: define void @test_pr98413_zext_removed(
; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0
@@ -24,24 +17,27 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8>
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 97, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 8 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8>
; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP12]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP12]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[TMP7]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 97
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8
; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64
@@ -51,7 +47,7 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -79,15 +75,8 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-LABEL: define void @test_pr98413_sext_removed(
; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0
@@ -95,24 +84,27 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8>
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 97, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 8 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8>
; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP12]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP12]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[TMP7]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 97
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8
; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64
@@ -122,7 +114,7 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -153,12 +145,6 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4
-; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP3]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 9, [[TMP12]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP10]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST]], i64 0
@@ -172,7 +158,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
@@ -259,15 +245,8 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[N]] to i32
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[V]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[N]], i64 0
@@ -284,20 +263,23 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x double> poison)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> [[BROADCAST_SPLAT6]], i32 8, <vscale x 2 x i1> [[TMP8]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> [[TMP8]], i32 [[TMP14]])
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT6]], <vscale x 2 x i1> [[TMP8]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]]
; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
@@ -344,15 +326,15 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" }
attributes #1 = { "target-features"="+64bit,+v" }
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META7:![0-9]+]], [[META2]]}
-; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index 01edeed..661fd28 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -11,12 +11,6 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 9, [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
index 6476373..c1fedd9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
@@ -20,12 +20,6 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP8]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP7]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x ptr> poison, ptr [[DSTV]], i64 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index d97e93d..ae894d1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -10,42 +10,38 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; SCALABLE-LABEL: define void @uniform_load(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: [[TMP7:%.*]] = load i64, ptr [[B]], align 8
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP7]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -88,12 +84,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -156,8 +146,6 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -166,9 +154,9 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
@@ -182,7 +170,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
; SCALABLE-NEXT: ret i64 [[V_LCSSA]]
@@ -234,8 +222,6 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
-; TF-SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -244,7 +230,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -286,15 +272,8 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-LABEL: define void @conditional_uniform_load(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[B]], i64 0
@@ -302,30 +281,39 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; SCALABLE-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1)
; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP5]]
-; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
-; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP17:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP17]] to i64
+; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]]
+; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP18:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 4 x i32> [[TMP18]], [[BROADCAST_SPLAT4]]
; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 4 x i64> [[VEC_IND]], splat (i64 10)
-; SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i64> poison)
-; SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer
+; SCALABLE-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> zeroinitializer
+; SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP10]], i32 [[TMP17]])
+; SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer
; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 4 x i64> [[PREDPHI]], ptr [[TMP12]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]])
+; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP17]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; SCALABLE-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]]
; SCALABLE: [[DO_LOAD]]:
@@ -337,7 +325,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -394,12 +382,6 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[B]], i64 0
@@ -482,42 +464,38 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; SCALABLE-LABEL: define void @uniform_load_unaligned(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: [[TMP6:%.*]] = load i64, ptr [[B]], align 1
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP6]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -560,12 +538,6 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -619,42 +591,38 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; SCALABLE-LABEL: define void @uniform_store(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -697,12 +665,6 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
@@ -756,54 +718,49 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; SCALABLE-LABEL: define void @uniform_store_of_loop_varying(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[B]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; SCALABLE-NEXT: [[TMP13:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
+; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP13]]
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; SCALABLE-NEXT: [[TMP10:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX:%.*]] = mul i64 1, [[TMP8]]
; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX]], i64 0
; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; SCALABLE-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP6]]
-; SCALABLE-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP8]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
-; SCALABLE-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32()
-; SCALABLE-NEXT: [[TMP13:%.*]] = mul nuw i32 [[TMP12]], 2
-; SCALABLE-NEXT: [[TMP14:%.*]] = sub i32 [[TMP13]], 1
-; SCALABLE-NEXT: [[TMP15:%.*]] = extractelement <vscale x 2 x i64> [[TMP9]], i32 [[TMP14]]
-; SCALABLE-NEXT: store i64 [[TMP15]], ptr [[B]], align 8
+; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[VEC_IND]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT1]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP16]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[TMP10]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
+; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -850,12 +807,6 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[B]], i64 0
@@ -920,15 +871,8 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-LABEL: define void @conditional_uniform_store(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
@@ -938,29 +882,33 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; SCALABLE-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP5]]
-; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
-; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP14]]
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 2 x i64> [[VEC_IND]], splat (i64 10)
-; SCALABLE-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> [[BROADCAST_SPLAT2]], i32 8, <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT2]], <vscale x 2 x i1> [[TMP10]], i32 [[TMP7]])
; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr [[TMP12]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]]
; SCALABLE: [[DO_STORE]]:
@@ -971,7 +919,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -1027,12 +975,6 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
@@ -1109,42 +1051,38 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-LABEL: define void @uniform_store_unaligned(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -1187,12 +1125,6 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
; TF-SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; TF-SCALABLE: [[VECTOR_PH]]:
-; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; TF-SCALABLE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; TF-SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP2]]
-; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; TF-SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
index d93a5c0..2400198 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
@@ -8,12 +8,6 @@ define void @foo(ptr %arg) #0 {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 3, [[TMP2]]
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index d3c3c6b..8d287fe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -12,18 +12,12 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: entry:
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP5]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -68,8 +62,6 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -80,7 +72,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-NEXT: [[TMP11:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
index bda9839..1f3bd45 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
@@ -17,17 +17,22 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) {
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <2 x ptr> poison, ptr [[DST]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT2]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP10]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT4]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[BROADCAST_SPLAT5]], <2 x ptr> [[BROADCAST_SPLAT3]], i32 4, <2 x i1> splat (i1 true)), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[BROADCAST_SPLAT5]], <2 x ptr> [[BROADCAST_SPLAT3]], i32 4, <2 x i1> splat (i1 true)), !alias.scope [[META3]], !noalias [[META0]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP6]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT3]], <vscale x 4 x ptr> align 4 [[BROADCAST_SPLAT]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -41,7 +46,7 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) {
; CHECK-NEXT: store i32 [[DOTPRE]], ptr [[DST]], align 4
; CHECK-NEXT: [[TMP3]] = add nuw i64 [[TMP2]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[TMP3]], 100
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -65,8 +70,9 @@ exit:
; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
; CHECK: [[META3]] = !{[[META4:![0-9]+]]}
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]}
+; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
index d7c9ce4..4669522 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
@@ -10,8 +10,7 @@
; RUN: -disable-output < %s 2>&1 | FileCheck %s
define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) {
-; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF>=1' {
-; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF
+; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' {
; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count
; CHECK-NEXT: vp<[[OTC:%.+]]> = original trip-count
@@ -21,41 +20,42 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
; CHECK-EMPTY:
; CHECK-NEXT: vector.ph:
-; CHECK-NEXT: vp<[[RESUME_IV_A:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1>
-; CHECK-NEXT: vp<[[RESUME_IV_B:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1>
; CHECK-NEXT: Successor(s): vector loop
; CHECK-EMPTY:
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[INDUCTION:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]>
-; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[INDUCTION]]> * ir<-1>
-; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[VF]]>
+; CHECK-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%.+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
+; CHECK-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ vp<[[OTC]]>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
+; CHECK-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
+; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[EVL_PHI]]> * ir<-1>
+; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[EVL]]>
; CHECK-NEXT: CLONE ir<[[IDX:%.+]]> = add nsw vp<[[SCALAR_STEPS]]>, ir<-1>
; CHECK-NEXT: CLONE ir<[[IDX_PROM:%.+]]> = zext ir<[[IDX]]>
; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_B:%.+]]> = getelementptr inbounds ir<[[B:%.+]]>, ir<[[IDX_PROM]]>
-; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_B]]>, vp<[[VF]]>
-; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = load vp<[[VEC_END_PTR_B]]>
+; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer ir<[[ARRAY_IDX_B]]>, vp<[[EVL]]>
+; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = vp.load vp<[[VEC_END_PTR_B]]>, vp<[[EVL]]>
; CHECK-NEXT: WIDEN ir<[[ADD_RESULT:%.+]]> = add ir<[[VAL_B]]>, ir<1>
; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_A:%.+]]> = getelementptr inbounds ir<[[A:%.+]]>, ir<[[IDX_PROM]]>
-; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_A]]>, vp<[[VF]]>
-; CHECK-NEXT: WIDEN store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]>
-; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add nuw vp<[[INDUCTION]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer ir<[[ARRAY_IDX_A]]>, vp<[[EVL]]>
+; CHECK-NEXT: WIDEN vp.store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]>, vp<[[EVL]]>
+; CHECK-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[EVL]]>, vp<[[EVL_PHI]]>
+; CHECK-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[EVL]]>
+; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDUCTION]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]>
; CHECK-NEXT: No successors
; CHECK-NEXT: }
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq vp<[[OTC]]>, vp<[[VTC]]>
-; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
-; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup>, scalar.ph
+; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup>
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<for.cond.cleanup>:
; CHECK-NEXT: No successors
; CHECK-EMPTY:
; CHECK-NEXT: scalar.ph:
-; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[RESUME_IV_A]]>, middle.block ], [ ir<%n>, ir-bb<entry> ]
-; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[RESUME_IV_B]]>, middle.block ], [ ir<%n>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<%n>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ ir<%n>, ir-bb<entry> ]
; CHECK-NEXT: Successor(s): ir-bb<for.body>
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll
index 13c443c..b4eebcc 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll
@@ -12,7 +12,7 @@
define noundef i32 @fun(i32 %argc, ptr nocapture readnone %argv) {
entry:
%l_4774.i = alloca [4 x [2 x i128]], align 8
- call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %l_4774.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %l_4774.i)
br label %for.cond4.preheader.i
for.cond4.preheader.i: ; preds = %for.cond4.preheader.i, %entry
@@ -31,13 +31,13 @@ func_1.exit: ; preds = %for.cond4.preheader
%cmp200.i = icmp ne i128 %0, 0
%conv202.i = zext i1 %cmp200.i to i64
%call203.i = tail call i64 @safe_sub_func_int64_t_s_s(i64 noundef %conv202.i, i64 noundef 9139899272418802852)
- call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %l_4774.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %l_4774.i)
br label %for.cond
for.cond: ; preds = %for.cond, %func_1.exit
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare dso_local i64 @safe_sub_func_int64_t_s_s(i64, i64)
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll
index 639fb86..6fc7ed2 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll
@@ -13,9 +13,9 @@ define i32 @main(ptr %ptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[S]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]])
; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @goo(ptr nonnull [[I]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP0]] to i16
@@ -111,16 +111,16 @@ define i32 @main(ptr %ptr) {
; CHECK-NEXT: br label [[FOR_END12]]
; CHECK: for.end12:
; CHECK-NEXT: [[CALL13:%.*]] = call i32 (ptr, ...) @foo(ptr nonnull [[S]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[S]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[I]])
; CHECK-NEXT: ret i32 0
;
entry:
%i = alloca i32, align 4
%s = alloca i16, align 2
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %i) #3
store i32 0, ptr %i, align 4
- call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %s) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %s) #3
%call = call i32 (ptr, ...) @goo(ptr nonnull %i) #3
%0 = load i32, ptr %i, align 4
%storemerge6 = trunc i32 %0 to i16
@@ -174,17 +174,17 @@ for.cond.for.end12_crit_edge: ; preds = %for.inc9
for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry
%call13 = call i32 (ptr, ...) @foo(ptr nonnull %s) #3
- call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %s) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %s) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %i) #3
ret i32 0
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @goo(...) local_unnamed_addr #2
declare i32 @foo(...) local_unnamed_addr #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
index 4145967..864d221 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
@@ -23,9 +23,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr
; CHECK: [[LOOP_3_PREHEADER]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[IV_1_LCSSA2]], 15
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[IV_1_LCSSA2]], 1
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
index baa967c..53de252 100644
--- a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
@@ -13,7 +13,7 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], !dbg [[DBG21]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_COND_CLEANUP32:%.*]] ], !dbg [[DBG222:![0-9]+]]
-; CHECK-NEXT: br label [[FOR_COND5_PREHEADER1:%.*]], !dbg [[DBG21]]
+; CHECK-NEXT: br label [[FOR_COND5_PREHEADER1:%.*]]
; CHECK: for.cond5.preheader1:
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP4:%.*]], [[FOR_COND5_PREHEADER1]] ], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[H]], <4 x i64> [[VEC_PHI]]
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll
index 2bafa6c..b266ddf 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-scalable-vf1.ll
@@ -15,7 +15,6 @@ define i64 @pr97452_scalable_vf1_for_live_out(ptr %src) {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1
; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]]
@@ -26,7 +25,7 @@ define i64 @pr97452_scalable_vf1_for_live_out(ptr %src) {
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD]] = load <vscale x 1 x i64>, ptr [[TMP5]], align 8
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1)
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -83,7 +82,6 @@ define void @pr97452_scalable_vf1_for_no_live_out(ptr %src, ptr noalias %dst) {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1
; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]]
@@ -96,7 +94,7 @@ define void @pr97452_scalable_vf1_for_no_live_out(ptr %src, ptr noalias %dst) {
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 1 x i64> [[TMP7]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/histograms.ll b/llvm/test/Transforms/LoopVectorize/histograms.ll
index 1adc0bf..f0ceae7 100644
--- a/llvm/test/Transforms/LoopVectorize/histograms.ll
+++ b/llvm/test/Transforms/LoopVectorize/histograms.ll
@@ -1,25 +1,48 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt < %s -passes=loop-vectorize,instcombine -enable-histogram-loop-vectorization -force-vector-width=2 -S | FileCheck %s
-;; Currently we don't expect this to vectorize, since the generic cost model returns
-;; invalid for the histogram intrinsic.
define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %N) {
; CHECK-LABEL: define void @simple_histogram(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -2
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[WIDE_LOAD]] to <2 x i64>
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP1]], i64 1
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[TMP5]], i64 1
+; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.v2p0.i32(<2 x ptr> [[TMP7]], i32 1, <2 x i1> splat (i1 true))
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 2
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV1]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP12]] to i64
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/intrinsic.ll b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
index 9c910d7..10d83a4 100644
--- a/llvm/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
@@ -324,6 +324,56 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.exp2.f64(double)
+define void @ldexp_f32i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
+; CHECK-LABEL: @ldexp_f32i32(
+; CHECK: llvm.ldexp.v4f32.v4i32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call float @llvm.ldexp.f32.i32(float %0, i32 %exp)
+ %arrayidx2 = getelementptr inbounds float, ptr %x, i32 %iv
+ store float %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.ldexp.f32.i32(float, i32)
+
+define void @ldexp_f64i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
+; CHECK-LABEL: @ldexp_f64i32(
+; CHECK: llvm.ldexp.v4f64.v4i32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call double @llvm.ldexp.f64.i32(double %0, i32 %exp)
+ %arrayidx2 = getelementptr inbounds double, ptr %x, i32 %iv
+ store double %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.ldexp.f64.i32(double, i32)
+
define void @log_f32(i32 %n, ptr %y, ptr %x) {
; CHECK-LABEL: @log_f32(
; CHECK: llvm.log.v4f32
@@ -976,6 +1026,157 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.roundeven.f64(double)
+
+define void @lround_i32f32(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i32f32(
+; CHECK: llvm.lround.v4i32.v4f32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call i32 @llvm.lround.i32.f32(float %0)
+ %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
+ store i32 %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i32 @llvm.lround.i32.f32(float)
+
+define void @lround_i32f64(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i32f64(
+; CHECK: llvm.lround.v4i32.v4f64
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call i32 @llvm.lround.i32.f64(double %0)
+ %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
+ store i32 %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i32 @llvm.lround.i32.f64(double)
+
+define void @lround_i64f32(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i64f32(
+; CHECK: llvm.lround.v4i64.v4f32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call i64 @llvm.lround.i64.f32(float %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.lround.i64.f32(float)
+
+define void @lround_i64f64(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i64f64(
+; CHECK: llvm.lround.v4i64.v4f64
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call i64 @llvm.lround.i64.f64(double %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.lround.i64.f64(double)
+
+define void @llround_i64f32(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @llround_i64f32(
+; CHECK: llvm.llround.v4i64.v4f32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call i64 @llvm.llround.i64.f32(float %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.llround.i64.f32(float)
+
+define void @llround_i64f64(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @llround_i64f64(
+; CHECK: llvm.llround.v4i64.v4f64
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call i64 @llvm.llround.i64.f64(double %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.llround.i64.f64(double)
+
define void @fma_f32(i32 %n, ptr %y, ptr %x, ptr %z, ptr %w) {
; CHECK-LABEL: @fma_f32(
; CHECK: llvm.fma.v4f32
diff --git a/llvm/test/Transforms/LoopVectorize/lifetime.ll b/llvm/test/Transforms/LoopVectorize/lifetime.ll
index 3dd41b5..61e8635 100644
--- a/llvm/test/Transforms/LoopVectorize/lifetime.ll
+++ b/llvm/test/Transforms/LoopVectorize/lifetime.ll
@@ -12,23 +12,23 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @test(ptr %d) {
entry:
%arr = alloca [1024 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%0 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end
for.end:
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
ret void
}
@@ -40,26 +40,26 @@ for.end:
define void @testbitcast(ptr %d) {
entry:
%arr = alloca [1024 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%0 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end
for.end:
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll b/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
index 4fda9d3..31c3248 100644
--- a/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
@@ -24,12 +24,10 @@ define void @foo() {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]
-; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP19]]
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP3]]
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -52,7 +50,7 @@ define void @foo() {
; CHECK: vector.latch:
; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <vscale x 4 x float> [ [[TMP12]], [[INNER_LOOP1]] ]
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VEC_PHI5]], <vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
index c5838fe..8d9ee2a 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
@@ -26,7 +26,7 @@ define i32 @foo(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind uwtable ssp
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16, !dbg [[DBG9]]
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]], !dbg [[DBG9]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483644, !dbg [[DBG9]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483644
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], !dbg [[DBG9]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], !dbg [[DBG9]]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-assume.ll b/llvm/test/Transforms/LoopVectorize/scalable-assume.ll
index 8291164..83541f2 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-assume.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-assume.ll
@@ -14,8 +14,6 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b)
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -39,7 +37,7 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b)
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4
; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -90,8 +88,6 @@ define void @test2(ptr %a, ptr noalias %b) {
; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP7]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -113,7 +109,7 @@ define void @test2(ptr %a, ptr noalias %b) {
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4
; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -159,13 +155,11 @@ define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll
index bfc0a48..07402ab 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll
@@ -36,8 +36,6 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) {
; CHECK-VF4UF1-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP11]]
; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-VF4UF1-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF1-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; CHECK-VF4UF1-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF1-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4
; CHECK-VF4UF1-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1
@@ -53,7 +51,7 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) {
; CHECK-VF4UF1-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
; CHECK-VF4UF1-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[TMP20]]
; CHECK-VF4UF1-NEXT: store <vscale x 4 x i32> [[TMP22]], ptr [[TMP21]], align 4
-; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
+; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
; CHECK-VF4UF1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-VF4UF1: [[MIDDLE_BLOCK]]:
@@ -101,8 +99,6 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) {
; CHECK-VF4UF2-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 8
; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP11]]
; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-VF4UF2-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF2-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 8
; CHECK-VF4UF2-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF2-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4
; CHECK-VF4UF2-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1
@@ -128,7 +124,7 @@ define i32 @recurrence_1(ptr nocapture readonly %a, ptr nocapture %b, i32 %n) {
; CHECK-VF4UF2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP25]], i64 [[TMP30]]
; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP26]], ptr [[TMP25]], align 4
; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP27]], ptr [[TMP31]], align 4
-; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
+; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
; CHECK-VF4UF2-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF2-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-VF4UF2: [[MIDDLE_BLOCK]]:
@@ -198,8 +194,6 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) {
; CHECK-VF4UF1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; CHECK-VF4UF1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-VF4UF1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 4
; CHECK-VF4UF1-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], 1
@@ -217,7 +211,7 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) {
; CHECK-VF4UF1-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i32> [[TMP13]], <vscale x 4 x i32> zeroinitializer
; CHECK-VF4UF1-NEXT: [[TMP16:%.*]] = icmp slt <vscale x 4 x i32> [[VEC_PHI]], [[TMP15]]
; CHECK-VF4UF1-NEXT: [[TMP17]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP15]]
-; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-VF4UF1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF1-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-VF4UF1: [[MIDDLE_BLOCK]]:
@@ -259,8 +253,6 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) {
; CHECK-VF4UF2-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; CHECK-VF4UF2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-VF4UF2-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF2-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 4
; CHECK-VF4UF2-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], 1
@@ -289,7 +281,7 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) {
; CHECK-VF4UF2-NEXT: [[TMP24:%.*]] = icmp slt <vscale x 4 x i32> [[VEC_PHI1]], [[TMP22]]
; CHECK-VF4UF2-NEXT: [[TMP25]] = select <vscale x 4 x i1> [[TMP23]], <vscale x 4 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP21]]
; CHECK-VF4UF2-NEXT: [[TMP26]] = select <vscale x 4 x i1> [[TMP24]], <vscale x 4 x i32> [[VEC_PHI1]], <vscale x 4 x i32> [[TMP22]]
-; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-VF4UF2-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF2-NEXT: br i1 [[TMP27]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-VF4UF2: [[MIDDLE_BLOCK]]:
@@ -388,8 +380,6 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f
; CHECK-VF4UF1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP12]]
; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-VF4UF1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4
; CHECK-VF4UF1-NEXT: [[TMP15:%.*]] = add i64 1, [[N_VEC]]
; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x double> poison, double [[CONV1]], i64 0
; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x double> [[BROADCAST_SPLATINSERT]], <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
@@ -411,7 +401,7 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f
; CHECK-VF4UF1-NEXT: [[TMP25:%.*]] = fsub fast <vscale x 4 x double> [[TMP22]], [[TMP24]]
; CHECK-VF4UF1-NEXT: [[TMP26:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-VF4UF1-NEXT: store <vscale x 4 x double> [[TMP25]], ptr [[TMP26]], align 8, !alias.scope [[META9:![0-9]+]], !noalias [[META6]]
-; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]]
+; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
; CHECK-VF4UF1-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF1-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-VF4UF1: [[MIDDLE_BLOCK]]:
@@ -466,8 +456,6 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f
; CHECK-VF4UF2-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP12]]
; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-VF4UF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF2-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 8
; CHECK-VF4UF2-NEXT: [[TMP15:%.*]] = add i64 1, [[N_VEC]]
; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x double> poison, double [[CONV1]], i64 0
; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x double> [[BROADCAST_SPLATINSERT]], <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
@@ -502,7 +490,7 @@ define void @recurrence_3(ptr nocapture readonly %a, ptr nocapture %b, i32 %n, f
; CHECK-VF4UF2-NEXT: [[TMP38:%.*]] = getelementptr inbounds double, ptr [[TMP34]], i64 [[TMP37]]
; CHECK-VF4UF2-NEXT: store <vscale x 4 x double> [[TMP32]], ptr [[TMP34]], align 8, !alias.scope [[META9:![0-9]+]], !noalias [[META6]]
; CHECK-VF4UF2-NEXT: store <vscale x 4 x double> [[TMP33]], ptr [[TMP38]], align 8, !alias.scope [[META9]], !noalias [[META6]]
-; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]]
+; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
; CHECK-VF4UF2-NEXT: [[TMP39:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF2-NEXT: br i1 [[TMP39]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-VF4UF2: [[MIDDLE_BLOCK]]:
@@ -568,12 +556,10 @@ define i64 @constant_folded_previous_value() {
; CHECK-VF4UF1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]]
; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]]
-; CHECK-VF4UF1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-VF4UF1-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK-VF4UF1: [[VECTOR_BODY]]:
; CHECK-VF4UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF1-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-VF4UF1: [[MIDDLE_BLOCK]]:
@@ -596,12 +582,10 @@ define i64 @constant_folded_previous_value() {
; CHECK-VF4UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]]
; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]]
-; CHECK-VF4UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-VF4UF2-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK-VF4UF2: [[VECTOR_BODY]]:
; CHECK-VF4UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-VF4UF2: [[MIDDLE_BLOCK]]:
@@ -646,14 +630,12 @@ define i32 @extract_second_last_iteration(ptr %cval, i32 %x) {
; CHECK-VF4UF1-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4
; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i32 96, [[TMP3]]
; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i32 96, [[N_MOD_VF]]
-; CHECK-VF4UF1-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-VF4UF1-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4
; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
; CHECK-VF4UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; CHECK-VF4UF1-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i32> [[TMP6]], splat (i32 1)
; CHECK-VF4UF1-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP7]]
-; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul i32 1, [[TMP5]]
+; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul i32 1, [[TMP3]]
; CHECK-VF4UF1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP8]], i64 0
; CHECK-VF4UF1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-VF4UF1-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -661,7 +643,7 @@ define i32 @extract_second_last_iteration(ptr %cval, i32 %x) {
; CHECK-VF4UF1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF4UF1-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF4UF1-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; CHECK-VF4UF1-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-VF4UF1-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF1-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
@@ -690,13 +672,11 @@ define i32 @extract_second_last_iteration(ptr %cval, i32 %x) {
; CHECK-VF4UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 96, [[TMP1]]
; CHECK-VF4UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK-VF4UF2: [[VECTOR_PH]]:
-; CHECK-VF4UF2-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-VF4UF2-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 8
-; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i32 96, [[TMP3]]
-; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i32 96, [[N_MOD_VF]]
; CHECK-VF4UF2-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF2-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4
; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 2
+; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i32 96, [[TMP6]]
+; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i32 96, [[N_MOD_VF]]
; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
; CHECK-VF4UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-VF4UF2-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
@@ -778,8 +758,6 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) {
; CHECK-VF4UF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-VF4UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; CHECK-VF4UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-VF4UF1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF1-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; CHECK-VF4UF1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF1-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4
; CHECK-VF4UF1-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], 1
@@ -797,7 +775,7 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) {
; CHECK-VF4UF1-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP17]], [[TMP16]]
; CHECK-VF4UF1-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
; CHECK-VF4UF1-NEXT: store <vscale x 4 x i32> [[TMP18]], ptr [[TMP19]], align 4, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
-; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-VF4UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-VF4UF1-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF1-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK-VF4UF1: [[MIDDLE_BLOCK]]:
@@ -837,8 +815,6 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) {
; CHECK-VF4UF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECK-VF4UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; CHECK-VF4UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-VF4UF2-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF4UF2-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
; CHECK-VF4UF2-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-VF4UF2-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4
; CHECK-VF4UF2-NEXT: [[TMP11:%.*]] = sub i32 [[TMP10]], 1
@@ -868,7 +844,7 @@ define void @sink_after(ptr %a, ptr %b, i64 %n) {
; CHECK-VF4UF2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i64 [[TMP29]]
; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP24]], ptr [[TMP26]], align 4, !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; CHECK-VF4UF2-NEXT: store <vscale x 4 x i32> [[TMP25]], ptr [[TMP30]], align 4, !alias.scope [[META20]], !noalias [[META17]]
-; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-VF4UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-VF4UF2-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-VF4UF2-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK-VF4UF2: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
index 6264941..ce4592c 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
@@ -15,13 +15,11 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP4]], 1
; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
@@ -97,17 +95,15 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 1
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 1 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP4]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP2]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 1 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
@@ -122,7 +118,7 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[TMP13]], i64 [[TMP14]]
; CHECK-NEXT: store <vscale x 1 x i64> [[TMP11]], ptr [[TMP13]], align 8
; CHECK-NEXT: store <vscale x 1 x i64> [[TMP12]], ptr [[TMP15]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 1 x i64> [[STEP_ADD]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
@@ -185,13 +181,11 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[DOTCAST]], 1
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
; CHECK-NEXT: [[TMP7:%.*]] = shl <vscale x 4 x i32> [[TMP6]], splat (i32 1)
-; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP5]] to i32
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP9:%.*]] = shl i32 [[TMP8]], 1
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP9]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -201,7 +195,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP10]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -262,8 +256,6 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[DOTCAST:%.*]] = sitofp i64 [[N_VEC]] to float
; CHECK-NEXT: [[TMP4:%.*]] = fmul float [[DOTCAST]], 2.000000e+00
; CHECK-NEXT: [[IND_END:%.*]] = fadd float [[TMP4]], 0.000000e+00
@@ -271,7 +263,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = uitofp <vscale x 4 x i32> [[TMP7]] to <vscale x 4 x float>
; CHECK-NEXT: [[TMP9:%.*]] = fmul <vscale x 4 x float> [[TMP8]], splat (float 2.000000e+00)
; CHECK-NEXT: [[INDUCTION:%.*]] = fadd <vscale x 4 x float> [[TMP9]], zeroinitializer
-; CHECK-NEXT: [[TMP12:%.*]] = uitofp i64 [[TMP6]] to float
+; CHECK-NEXT: [[TMP12:%.*]] = uitofp i64 [[TMP3]] to float
; CHECK-NEXT: [[TMP13:%.*]] = fmul float [[TMP12]], 2.000000e+00
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[TMP13]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
@@ -281,7 +273,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x float> [[VEC_IND]], ptr [[TMP14]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = fadd <vscale x 4 x float> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll b/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll
index 1ec2993..0467a58 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-iv-outside-user.ll
@@ -12,13 +12,11 @@ define i32 @iv_live_out_wide(ptr %dst) {
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 2000, [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 2000, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 2000, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2
; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 2000, [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 2000, [[N_MOD_VF]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[STEP_2]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll b/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll
index 7aac9d1..bf14c87 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll
@@ -10,7 +10,7 @@ define void @test(ptr %d) {
; CHECK-SAME: (ptr [[D:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [1024 x i32], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 128, [[TMP1]]
@@ -20,18 +20,16 @@ define void @test(ptr %d) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 128, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 128, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP6]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]]
+; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP4]], align 8
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 128, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -40,39 +38,39 @@ define void @test(ptr %d) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: store i32 100, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%arr = alloca [1024 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%0 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end, !llvm.loop !0
for.end:
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
ret void
}
@@ -90,18 +88,16 @@ define void @testloopvariant(ptr %d) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 128, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 128, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP6]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]]
+; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP4]], align 8
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 128, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -110,12 +106,12 @@ define void @testloopvariant(ptr %d) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [1024 x i32], ptr [[ARR]], i32 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [1024 x i32], ptr [[ARR]], i32 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: store i32 100, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128
@@ -130,11 +126,11 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = getelementptr [1024 x i32], ptr %arr, i32 0, i64 %indvars.iv
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%1 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
@@ -144,9 +140,9 @@ for.end:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll b/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
index f384d3c..901f228 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
@@ -12,8 +12,6 @@
; CHECKUF1-DAG: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2
; CHECKUF1-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX4]]
; CHECKUF1: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf
-; CHECKUF1: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF1: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2
; CHECKUF1: vector.body:
; CHECKUF1: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
@@ -41,8 +39,6 @@
; CHECKUF2-DAG: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3
; CHECKUF2-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX8]]
; CHECKUF2: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf
-; CHECKUF2: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF2: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3
; CHECKUF2: vector.body:
; CHECKUF2: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
index a3a4c29..ffa2602 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
@@ -19,13 +19,11 @@ define void @foo(i32 %val, ptr dereferenceable(1024) %ptr) {
; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 256, [[TMP2]]
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 256)
-; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP4]]
+; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll
index ba337aa..c2ae92e 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll
@@ -15,8 +15,6 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 16
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 256, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 256, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -38,7 +36,7 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP35:%.*]] = trunc <vscale x 8 x i32> [[TMP29]] to <vscale x 8 x i8>
; CHECK-NEXT: [[TMP34]] = zext <vscale x 8 x i8> [[TMP33]] to <vscale x 8 x i32>
; CHECK-NEXT: [[TMP36]] = zext <vscale x 8 x i8> [[TMP35]] to <vscale x 8 x i32>
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll
index 4b8ff86..4495ed6 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll
@@ -13,8 +13,6 @@ define void @trunc_minimal_bitwidth(ptr %bptr, ptr noalias %hptr, i32 %val, i64
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i16>
@@ -23,7 +21,7 @@ define void @trunc_minimal_bitwidth(ptr %bptr, ptr noalias %hptr, i32 %val, i64
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[HPTR:%.*]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 4 x i16> [[TMP4]], ptr [[TMP5]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -73,8 +71,6 @@ define void @trunc_minimal_bitwidths_shufflevector (ptr %p, i32 %arg1, i64 %len)
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[LEN]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[LEN]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[ARG1:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i8>
@@ -86,7 +82,7 @@ define void @trunc_minimal_bitwidths_shufflevector (ptr %p, i32 %arg1, i64 %len)
; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 4 x i8> [[WIDE_LOAD]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i8> [[TMP6]], [[WIDE_LOAD]]
; CHECK-NEXT: store <vscale x 4 x i8> [[TMP7]], ptr [[TMP5]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
index 983f327..c86a404 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
@@ -152,9 +152,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF1-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -2
; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF8UF1: [[VECTOR_PH]]:
-; VF8UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 7
-; VF8UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8
-; VF8UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VF8UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1
; VF8UF1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; VF8UF1-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer
@@ -239,9 +236,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF2-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -2
; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF8UF2: [[VECTOR_PH]]:
-; VF8UF2-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 15
-; VF8UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
-; VF8UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VF8UF2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1
; VF8UF2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; VF8UF2-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer
@@ -383,9 +377,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF16UF1-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -2
; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF16UF1: [[VECTOR_PH]]:
-; VF16UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 15
-; VF16UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
-; VF16UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VF16UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1
; VF16UF1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; VF16UF1-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer
@@ -701,9 +692,6 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF8UF1: [[VECTOR_PH]]:
-; VF8UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 7
-; VF8UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8
-; VF8UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VF8UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1
; VF8UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; VF8UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
@@ -817,9 +805,6 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF2-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF8UF2: [[VECTOR_PH]]:
-; VF8UF2-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 15
-; VF8UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
-; VF8UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VF8UF2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1
; VF8UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; VF8UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
@@ -1014,9 +999,6 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF16UF1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; VF16UF1: [[VECTOR_PH]]:
-; VF16UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 15
-; VF16UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
-; VF16UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VF16UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1
; VF16UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
; VF16UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll
index d3f7794..4af9767 100644
--- a/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll
@@ -45,8 +45,6 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP1]], 4
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP14]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP2]], 4
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -57,7 +55,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-NEXT: [[TMP16:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP16]], ptr [[TMP9]], align 4
-; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
@@ -90,7 +88,6 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-DEF-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-DEF-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP1]]
; NO-VP-DEF-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-VP-DEF-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-DEF-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP-DEF: vector.body:
; NO-VP-DEF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -101,7 +98,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; NO-VP-DEF-NEXT: [[TMP8:%.*]] = add nsw <vscale x 1 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; NO-VP-DEF-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-DEF-NEXT: store <vscale x 1 x i32> [[TMP8]], ptr [[TMP9]], align 4
-; NO-VP-DEF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; NO-VP-DEF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; NO-VP-DEF-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-DEF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP-DEF: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll b/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll
index 128594c..e779233 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-iv-transforms.ll
@@ -89,8 +89,8 @@ define void @iv_expand(ptr %p, i64 %n) {
; CHECK-NEXT: Successor(s): middle.block
; CHECK: VPlan 'Final VPlan for VF={8},UF={1}'
; CHECK: ir-bb<vector.ph>:
-; CHECK-NEXT: IR %n.mod.vf = urem i64 %n, 8
-; CHECK-NEXT: IR %n.vec = sub i64 %n, %n.mod.vf
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%n>, ir<8>
+; CHECK-NEXT: EMIT vp<%n.vec> = sub ir<%n>, vp<%n.mod.vf>
; CHECK-NEXT: EMIT vp<[[STEP_VECTOR:%.+]]> = step-vector
; CHECK-NEXT: EMIT vp<[[BROADCAST_0:%.+]]> = broadcast ir<0>
; CHECK-NEXT: EMIT vp<[[BROADCAST_1:%.+]]> = broadcast ir<1>
@@ -109,7 +109,7 @@ define void @iv_expand(ptr %p, i64 %n) {
; CHECK-NEXT: WIDEN store ir<%q>, ir<%y>
; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[SCALAR_PHI]]>, ir<8>
; CHECK-NEXT: EMIT vp<%vec.ind.next> = add ir<%iv>, vp<[[BROADCAST_INC]]>
-; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<%n.vec>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%n.vec>
; CHECK-NEXT: Successor(s): middle.block, vector.body
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll
index 46f9125..49d87a2 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-predicate-switch.ll
@@ -5,16 +5,15 @@ define void @switch4_default_common_dest_with_case(ptr %start, ptr %end) {
; CHECK: VPlan 'Final VPlan for VF={2},UF={1}' {
; CHECK-NEXT: Live-in ir<[[VF:.+]]> = VF
; CHECK-NEXT: Live-in ir<[[VFxUF:.+]]> = VF * UF
-; CHECK-NEXT: Live-in ir<[[VTC:%.+]]> = vector-trip-count
; CHECK-NEXT: ir<%0> = original trip-count
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<entry>:
; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph>
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<vector.ph>:
-; CHECK-NEXT: IR %n.mod.vf = urem i64 %0, 2
-; CHECK-NEXT: IR %n.vec = sub i64 %0, %n.mod.vf
-; CHECK-NEXT: vp<[[END:%.+]]> = DERIVED-IV ir<%start> + ir<%n.vec> * ir<1>
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%0>, ir<2>
+; CHECK-NEXT: EMIT vp<[[VTC:%.+]]> = sub ir<%0>, vp<%n.mod.vf>
+; CHECK-NEXT: vp<[[END:%.+]]> = DERIVED-IV ir<%start> + vp<[[VTC]]> * ir<1>
; CHECK-NEXT: Successor(s): vector.body
; CHECK-EMPTY:
; CHECK-NEXT: vector.body:
@@ -78,11 +77,11 @@ define void @switch4_default_common_dest_with_case(ptr %start, ptr %end) {
; CHECK-EMPTY:
; CHECK-NEXT: default.2:
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV]]>, ir<[[VFxUF]]>
-; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, ir<[[VTC]]>
+; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]>
; CHECK-NEXT: Successor(s): middle.block, vector.body
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT vp<[[MIDDLE_CMP:%.+]]> = icmp eq ir<%0>, ir<[[VTC]]>
+; CHECK-NEXT: EMIT vp<[[MIDDLE_CMP:%.+]]> = icmp eq ir<%0>, vp<[[VTC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[MIDDLE_CMP]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
index d856387..d200359 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
@@ -62,18 +62,18 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
;
; CHECK: Executing best plan with VF=8, UF=2
; CHECK-NEXT: VPlan 'Final VPlan for VF={8},UF={2}' {
-; CHECK-NEXT: Live-in ir<[[VTC:%.+]]> = vector-trip-count
-; CHECK-NEXT: ir<%and> = original trip-count
+; CHECK-NEXT: Live-in ir<16> = VF * UF
+; CHECK-NEXT: Live-in ir<%and> = original trip-count
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<entry>:
; CHECK-NEXT: IR %and = and i64 %N, 15
; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph>
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<vector.ph>:
-; CHECK-NEXT: IR %n.mod.vf = urem i64 %and, 16
-; CHECK-NEXT: IR %n.vec = sub i64 %and, %n.mod.vf
-; CHECK-NEXT: vp<[[END1:%.+]]> = DERIVED-IV ir<%and> + ir<[[VTC]]> * ir<-1>
-; CHECK-NEXT: vp<[[END2:%.+]]> = DERIVED-IV ir<%A> + ir<[[VTC]]> * ir<1>
+; CHECK-NEXT: EMIT vp<%n.mod.vf> = urem ir<%and>, ir<16>
+; CHECK-NEXT: EMIT vp<[[VTC:%.+]]> = sub ir<%and>, vp<%n.mod.vf>
+; CHECK-NEXT: vp<[[END1:%.+]]> = DERIVED-IV ir<%and> + vp<[[VTC]]> * ir<-1>
+; CHECK-NEXT: vp<[[END2:%.+]]> = DERIVED-IV ir<%A> + vp<[[VTC]]> * ir<1>
; CHECK-NEXT: Successor(s): vector.body
; CHECK-EMPTY:
; CHECK-NEXT: vector.body:
@@ -88,7 +88,7 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT vp<[[C:%.+]]> = icmp eq ir<%and>, ir<[[VTC]]>
+; CHECK-NEXT: EMIT vp<[[C:%.+]]> = icmp eq ir<%and>, vp<[[VTC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[C]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, ir-bb<scalar.ph>
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
index 7cc8458..612c96c 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
@@ -73,7 +73,7 @@ define void @lifetime_for_first_arg_before_multiply(ptr noalias %B, ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -81,7 +81,7 @@ entry:
call void @init(ptr %A)
%a = load <4 x double>, ptr %A, align 8
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
%c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
store <4 x double> %c, ptr %C, align 8
ret void
@@ -154,7 +154,7 @@ define void @lifetime_for_second_arg_before_multiply(ptr noalias %A, ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
@@ -162,7 +162,7 @@ entry:
call void @init(ptr %B)
%a = load <4 x double>, ptr %A, align 8
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
%c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
store <4 x double> %c, ptr %C, align 8
ret void
@@ -236,7 +236,7 @@ define void @lifetime_for_first_arg_before_multiply_load_from_offset(ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -245,7 +245,7 @@ entry:
%gep.8 = getelementptr i8, ptr %A, i64 8
%a = load <4 x double>, ptr %gep.8, align 8
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
%c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
store <4 x double> %c, ptr %C, align 8
ret void
@@ -332,7 +332,7 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
br label %exit
exit:
@@ -422,7 +422,7 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
br label %exit
exit:
@@ -442,8 +442,8 @@ define void @multiple_unrelated_lifetimes(ptr noalias %C, i1 %c.0) {
; CHECK-NEXT: call void @init(ptr [[B]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOC_1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOC_2]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
@@ -522,10 +522,10 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %B)
- call void @llvm.lifetime.end(i64 -1, ptr %alloc.1)
- call void @llvm.lifetime.end(i64 -1, ptr %A)
- call void @llvm.lifetime.end(i64 -1, ptr %alloc.2)
+ call void @llvm.lifetime.end(ptr %B)
+ call void @llvm.lifetime.end(ptr %alloc.1)
+ call void @llvm.lifetime.end(ptr %A)
+ call void @llvm.lifetime.end(ptr %alloc.2)
br label %exit
exit:
@@ -607,8 +607,8 @@ define void @lifetimes_for_args_in_different_blocks(ptr noalias %C, i1 %c.0) {
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
@@ -626,8 +626,8 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %A)
+ call void @llvm.lifetime.end(ptr %B)
ret void
}
@@ -640,8 +640,8 @@ define void @lifetimes_for_args_in_different_blocks2(ptr noalias %C, i1 %c.0) {
; CHECK-NEXT: call void @init(ptr [[B]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
@@ -716,8 +716,8 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %A)
+ call void @llvm.lifetime.end(ptr %B)
br label %exit
exit:
@@ -809,7 +809,7 @@ entry:
call void @init(ptr %A)
call void @init(ptr %B)
%a = load <4 x double>, ptr %A, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
br i1 %c.0, label %then, label %exit
then:
@@ -819,7 +819,7 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
ret void
}
@@ -904,7 +904,7 @@ entry:
call void @init(ptr %A)
call void @init(ptr %B)
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
br i1 %c.0, label %then, label %exit
then:
@@ -914,11 +914,11 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
ret void
}
declare void @init(ptr)
-declare void @llvm.lifetime.end(i64, ptr)
+declare void @llvm.lifetime.end(ptr)
declare <4 x double> @llvm.matrix.multiply(<4 x double>, <4 x double>, i32, i32, i32)
diff --git a/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll b/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll
index 87ff922..f7e84274 100644
--- a/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll
+++ b/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll
@@ -10,6 +10,6 @@ define amdgpu_kernel void @addressspace_alloca() {
; CHECK-NEXT: ret void
;
%alloca = alloca i8, align 8, addrspace(5)
- call void @llvm.lifetime.start(i64 2, ptr addrspace(5) %alloca)
+ call void @llvm.lifetime.start(ptr addrspace(5) %alloca)
ret void
}
diff --git a/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll b/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll
index d4bc097..a876319 100644
--- a/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll
+++ b/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll
@@ -2,8 +2,8 @@
; RUN: opt -passes=mem2reg -S -o - < %s | FileCheck %s
declare void @llvm.assume(i1)
-declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
-declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr)
+declare void @llvm.lifetime.start.p0(ptr nocapture %ptr)
+declare void @llvm.lifetime.end.p0(ptr nocapture %ptr)
define void @positive_assume_uses(ptr %arg) {
; CHECK-LABEL: @positive_assume_uses(
@@ -54,10 +54,10 @@ define void @positive_gep_assume_uses() {
;
%A = alloca {i8, i16}
%B = getelementptr {i8, i16}, ptr %A, i32 0, i32 0
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["align"(ptr %B, i64 8), "align"(ptr %B, i64 16)]
store {i8, i16} zeroinitializer, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %B), "align"(ptr %B, i64 2)]
ret void
}
@@ -70,10 +70,10 @@ define void @positive_mixed_assume_uses() {
; CHECK-NEXT: ret void
;
%A = alloca i8
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 8), "align"(ptr %A, i64 16)]
store i8 1, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
ret void
diff --git a/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll b/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll
index bcc9693..510fb2b 100644
--- a/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll
+++ b/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll
@@ -1,15 +1,15 @@
; RUN: opt -passes=mem2reg -S -o - < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
-declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr)
+declare void @llvm.lifetime.start.p0(ptr nocapture %ptr)
+declare void @llvm.lifetime.end.p0(ptr nocapture %ptr)
define void @test1() {
; CHECK: test1
; CHECK-NOT: alloca
%A = alloca i32
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
store i32 1, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
ret void
}
@@ -17,8 +17,8 @@ define void @test2() {
; CHECK: test2
; CHECK-NOT: alloca
%A = alloca {i8, i16}
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
store {i8, i16} zeroinitializer, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
ret void
}
diff --git a/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll b/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll
index 601498e..a0c0e9f 100644
--- a/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll
+++ b/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll
@@ -5,7 +5,7 @@ declare void @use(ptr)
; Make sure callslot optimization merges alias.scope metadata correctly when it merges instructions.
; Merging here naively generates:
; call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false), !alias.scope !3
-; call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !0
+; call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !0
; ...
; !0 = !{!1}
; !1 = distinct !{!1, !2, !"callee1: %a"}
@@ -20,18 +20,18 @@ define i8 @test(i8 %input) {
%src = alloca i8
; NOTE: we're matching the full line and looking for the lack of !alias.scope here
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %src), !noalias !3
+ call void @llvm.lifetime.start.p0(ptr nonnull %src), !noalias !3
store i8 %input, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 1, i1 false), !alias.scope !0
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !3
+ call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !3
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 1, i1 false), !alias.scope !3
%ret_value = load i8, ptr %dst
call void @use(ptr %src)
ret i8 %ret_value
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
!0 = !{!1}
diff --git a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
index 47c4358..c08f60a 100644
--- a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
+++ b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
@@ -5,8 +5,8 @@ target datalayout = "e"
declare void @foo(ptr)
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Check that the transformation isn't applied if the called function can
; capture the pointer argument (i.e. the nocapture attribute isn't present)
@@ -51,18 +51,18 @@ define void @test_lifetime_end() {
; CHECK-LABEL: define {{[^@]+}}@test_lifetime_end() {
; CHECK-NEXT: [[PTR1:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[PTR2:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[PTR2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR2]])
; CHECK-NEXT: call void @foo(ptr [[PTR1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[PTR2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR2]])
; CHECK-NEXT: call void @foo(ptr [[PTR1]])
; CHECK-NEXT: ret void
;
%ptr1 = alloca i8
%ptr2 = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %ptr2)
+ call void @llvm.lifetime.start.p0(ptr %ptr2)
call void @foo(ptr %ptr2)
call void @llvm.memcpy.p0.p0.i32(ptr %ptr1, ptr %ptr2, i32 1, i1 false)
- call void @llvm.lifetime.end.p0(i64 1, ptr %ptr2)
+ call void @llvm.lifetime.end.p0(ptr %ptr2)
call void @foo(ptr %ptr1)
ret void
}
diff --git a/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll b/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll
index 0626f09..06d9434 100644
--- a/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll
+++ b/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll
@@ -7,7 +7,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-grtev4-linux-gnu"
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
define void @test() {
@@ -26,7 +26,7 @@ entry:
%agg.tmp.sroa.14 = alloca [20 x i8], align 4
%agg.tmp.sroa.14.128.sroa_idx = getelementptr i8, ptr %agg.tmp.sroa.14, i64 4
call void @llvm.memset.p0.i64(ptr %agg.tmp.sroa.14.128.sroa_idx, i8 0, i64 1, i1 false)
- call void @llvm.lifetime.start.p0(i64 20, ptr %agg.tmp3.sroa.35)
+ call void @llvm.lifetime.start.p0(ptr %agg.tmp3.sroa.35)
call void @llvm.memcpy.p0.p0.i64(ptr %agg.tmp3.sroa.35, ptr %agg.tmp.sroa.14, i64 20, i1 false)
%agg.tmp3.sroa.35.128.sroa_idx = getelementptr i8, ptr %agg.tmp3.sroa.35, i64 4
call void @llvm.memcpy.p0.p0.i64(ptr inttoptr (i64 4 to ptr), ptr %agg.tmp3.sroa.35.128.sroa_idx, i64 1, i1 false)
diff --git a/llvm/test/Transforms/MemCpyOpt/lifetime.ll b/llvm/test/Transforms/MemCpyOpt/lifetime.ll
index e9fc06b..4eab12a 100644
--- a/llvm/test/Transforms/MemCpyOpt/lifetime.ll
+++ b/llvm/test/Transforms/MemCpyOpt/lifetime.ll
@@ -5,46 +5,46 @@
; @llvm.lifetime.start and @llvm.memcpy.
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @call_slot(ptr nocapture dereferenceable(16) %arg1) {
; CHECK-LABEL: @call_slot(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = alloca [8 x i8], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP]])
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 7
; CHECK-NEXT: store i8 0, ptr [[TMP10]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP]])
; CHECK-NEXT: ret void
;
bb:
%tmp = alloca [8 x i8], align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.start.p0(ptr %tmp)
%tmp10 = getelementptr inbounds i8, ptr %tmp, i64 7
store i8 0, ptr %tmp10, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %arg1, ptr align 8 %tmp, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
ret void
}
define void @memcpy_memcpy_across_lifetime(ptr noalias %p1, ptr noalias %p2, ptr noalias %p3) {
; CHECK-LABEL: @memcpy_memcpy_across_lifetime(
; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A]], ptr [[P1:%.*]], i64 16, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P1]], ptr [[P2:%.*]], i64 16, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P2]], ptr [[A]], i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P3:%.*]], ptr [[P2]], i64 16, i1 false)
; CHECK-NEXT: ret void
;
%a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %p1, i64 16, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr %p2, i64 16, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr %a, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr %p2, i64 16, i1 false)
ret void
}
@@ -55,18 +55,18 @@ define i32 @call_slot_move_lifetime_start() {
; CHECK-LABEL: @call_slot_move_lifetime_start(
; CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
; CHECK-NEXT: call void @call(ptr [[DST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]])
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
%tmp = alloca i32
%dst = alloca i32
call void @call(ptr %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.end.p0(ptr %dst)
%v = load i32, ptr %dst
ret i32 %v
}
@@ -76,20 +76,20 @@ define i32 @call_slot_two_lifetime_starts() {
; CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @call(ptr [[TMP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[TMP]], i64 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]])
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
%tmp = alloca i32
%dst = alloca i32
call void @call(ptr %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.end.p0(ptr %dst)
%v = load i32, ptr %dst
ret i32 %v
}
@@ -100,9 +100,9 @@ define i32 @call_slot_clobber_before_lifetime_start() {
; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @call(ptr [[TMP]])
; CHECK-NEXT: store i32 0, ptr [[DST]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[TMP]], i64 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]])
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
@@ -110,9 +110,9 @@ define i32 @call_slot_clobber_before_lifetime_start() {
%dst = alloca i32
call void @call(ptr %tmp)
store i32 0, ptr %dst
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.end.p0(ptr %dst)
%v = load i32, ptr %dst
ret i32 %v
}
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll
index 383040c..e1b32cd 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll
@@ -7,8 +7,8 @@ declare i1 @check(ptr readonly byval(i64) align 8) readonly argmemonly
declare void @clobber(ptr) argmemonly
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
; %a.2's lifetime ends before the call to @check. We must remove the call to
@@ -25,11 +25,11 @@ define i1 @alloca_forwarding_lifetime_end_clobber() {
entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
- call void @llvm.lifetime.end.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.end.p0(ptr %a.2)
;call void @clobber(ptr %a.2)
%call = call i1 @check(ptr byval(i64) align 8 %a.1)
ret i1 %call
@@ -42,7 +42,7 @@ define i1 @alloca_forwarding_call_clobber() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_1:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[A_2:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A_2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_2]])
; CHECK-NEXT: call void @init(ptr sret(i64) align 8 [[A_2]])
; CHECK-NEXT: store i8 0, ptr [[A_2]], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A_1]], ptr [[A_2]], i64 8, i1 false)
@@ -53,7 +53,7 @@ define i1 @alloca_forwarding_call_clobber() {
entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
@@ -67,7 +67,7 @@ define i1 @alloca_forwarding_call_clobber_after() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_1:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[A_2:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A_2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_2]])
; CHECK-NEXT: call void @init(ptr sret(i64) align 8 [[A_2]])
; CHECK-NEXT: store i8 0, ptr [[A_2]], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A_1]], ptr [[A_2]], i64 8, i1 false)
@@ -78,7 +78,7 @@ define i1 @alloca_forwarding_call_clobber_after() {
entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
@@ -102,7 +102,7 @@ entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
%a.3 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll
index ba6faf3..5e81c0d 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll
@@ -3,8 +3,8 @@
%struct.MaskedType = type { i8, i8 }
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
declare void @MaskedFunction1(ptr, ptr addrspace(1))
declare void @MaskedFunction2(ptr, ptr)
@@ -13,11 +13,11 @@ define i8 @test_gep_not_modified(ptr %in0, ptr %in1) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FUNCALLOC:%.*]] = alloca [[STRUCT_MASKEDTYPE:%.*]], align 4
; CHECK-NEXT: [[PTRALLOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: [[ADDRSPACECAST:%.*]] = addrspacecast ptr [[PTRALLOC]] to ptr addrspace(1)
; CHECK-NEXT: call void @MaskedFunction1(ptr [[IN1:%.*]], ptr addrspace(1) [[ADDRSPACECAST]])
; CHECK-NEXT: [[LOAD1:%.*]] = load i8, ptr [[PTRALLOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: [[GETELEMPTR1:%.*]] = getelementptr inbounds [[STRUCT_MASKEDTYPE]], ptr [[FUNCALLOC]], i32 0, i32 1
; CHECK-NEXT: store i8 [[LOAD1]], ptr [[GETELEMPTR1]], align 1
; CHECK-NEXT: ret i8 0
@@ -25,11 +25,11 @@ define i8 @test_gep_not_modified(ptr %in0, ptr %in1) {
entry:
%funcAlloc = alloca %struct.MaskedType, align 4
%ptrAlloc = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.start.p0(ptr %ptrAlloc) #0
%addrspaceCast = addrspacecast ptr %ptrAlloc to ptr addrspace(1)
call void @MaskedFunction1(ptr %in1, ptr addrspace(1) %addrspaceCast)
%load1 = load i8, ptr %ptrAlloc, align 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.end.p0(ptr %ptrAlloc) #0
%getElemPtr1 = getelementptr inbounds %struct.MaskedType, ptr %funcAlloc, i32 0, i32 1
store i8 %load1, ptr %getElemPtr1, align 1
ret i8 0
@@ -40,19 +40,19 @@ define i8 @test_gep_modified(ptr %in0, ptr %in1) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FUNCALLOC:%.*]] = alloca [[STRUCT_MASKEDTYPE:%.*]], align 4
; CHECK-NEXT: [[PTRALLOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: [[GETELEMPTR1:%.*]] = getelementptr inbounds [[STRUCT_MASKEDTYPE]], ptr [[FUNCALLOC]], i32 0, i32 1
; CHECK-NEXT: call void @MaskedFunction2(ptr [[IN1:%.*]], ptr [[GETELEMPTR1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: ret i8 0
;
entry:
%funcAlloc = alloca %struct.MaskedType, align 4
%ptrAlloc = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.start.p0(ptr %ptrAlloc) #0
call void @MaskedFunction2(ptr %in1, ptr %ptrAlloc)
%load1 = load i8, ptr %ptrAlloc, align 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.end.p0(ptr %ptrAlloc) #0
%getElemPtr1 = getelementptr inbounds %struct.MaskedType, ptr %funcAlloc, i32 0, i32 1
store i8 %load1, ptr %getElemPtr1, align 1
ret i8 0
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
index 1771fe6..7a7f8e1 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
@@ -12,10 +12,10 @@ define void @foo(ptr noalias nocapture sret([8 x i64]) dereferenceable(64) %sret
;
entry-block:
%a = alloca [8 x i64], align 8
- call void @llvm.lifetime.start.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 64, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %sret, ptr align 8 %a, i64 64, i1 false)
- call void @llvm.lifetime.end.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -24,28 +24,28 @@ define void @bar(ptr noalias nocapture sret([8 x i64]) dereferenceable(64) %sret
; CHECK-LABEL: @bar(
; CHECK-NEXT: entry-block:
; CHECK-NEXT: [[A:%.*]] = alloca [8 x i64], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[A]], i8 0, i64 64, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[SRET:%.*]], i8 0, i64 64, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) [[A]], i8 42, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[OUT:%.*]], ptr noundef nonnull align 8 dereferenceable(64) [[A]], i64 64, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry-block:
%a = alloca [8 x i64], align 8
- call void @llvm.lifetime.start.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 64, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %sret, ptr align 8 %a, i64 64, i1 false)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 42, i64 32, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %out, ptr align 8 %a, i64 64, i1 false)
- call void @llvm.lifetime.end.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
index 84253dc..6e28811 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
@@ -29,11 +29,11 @@ define i32 @test1(ptr nocapture %foobie) nounwind noinline ssp uwtable {
define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uwtable {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[IN:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[IN]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[IN]])
; CHECK-NEXT: ret void
;
%in = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %in)
+ call void @llvm.lifetime.start.p0(ptr %in)
call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 8, i1 false)
ret void
}
@@ -42,12 +42,12 @@ define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uw
define void @test_lifetime_may_alias(ptr %src, ptr %dst) {
; CHECK-LABEL: @test_lifetime_may_alias(
; CHECK-NEXT: [[LIFETIME:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[LIFETIME]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LIFETIME]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 8, i1 false)
; CHECK-NEXT: ret void
;
%lifetime = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %lifetime)
+ call void @llvm.lifetime.start.p0(ptr %lifetime)
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 8, i1 false)
ret void
}
@@ -56,12 +56,12 @@ define void @test_lifetime_may_alias(ptr %src, ptr %dst) {
define void @test_lifetime_partial_alias_1(ptr noalias %dst) {
; CHECK-LABEL: @test_lifetime_partial_alias_1(
; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8
; CHECK-NEXT: ret void
;
%a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%gep = getelementptr i8, ptr %a, i64 8
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 8, i1 false)
ret void
@@ -71,12 +71,12 @@ define void @test_lifetime_partial_alias_1(ptr noalias %dst) {
define void @test_lifetime_partial_alias_2(ptr noalias %dst) {
; CHECK-LABEL: @test_lifetime_partial_alias_2(
; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8
; CHECK-NEXT: ret void
;
%a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%gep = getelementptr i8, ptr %a, i64 8
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 16, i1 false)
ret void
@@ -84,4 +84,4 @@ define void @test_lifetime_partial_alias_2(ptr noalias %dst) {
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
index 343f951..2575d58 100644
--- a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
@@ -23,17 +23,17 @@ define void @test_alloca(ptr %result) {
define void @test_alloca_with_lifetimes(ptr %result) {
; CHECK-LABEL: @test_alloca_with_lifetimes(
; CHECK-NEXT: [[A:%.*]] = alloca [[T:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[A]], i8 0, i64 12, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[RESULT:%.*]], i8 0, i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca %T, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 12, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %result, ptr align 8 %a, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -201,5 +201,5 @@ declare void @free(ptr)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/MemCpyOpt/pr29105.ll b/llvm/test/Transforms/MemCpyOpt/pr29105.ll
index d47bddd..f4538b9 100644
--- a/llvm/test/Transforms/MemCpyOpt/pr29105.ll
+++ b/llvm/test/Transforms/MemCpyOpt/pr29105.ll
@@ -7,34 +7,34 @@ define void @baz() unnamed_addr #0 {
; CHECK-LABEL: @baz(
; CHECK-NEXT: entry-block:
; CHECK-NEXT: [[TMP2:%.*]] = alloca [[FOO:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16384, ptr nonnull [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP2]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(16384) [[TMP2]], i8 0, i64 16384, i1 false)
; CHECK-NEXT: call void @bar(ptr noalias nonnull captures(none) dereferenceable(16384) [[TMP2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16384, ptr nonnull [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP2]])
; CHECK-NEXT: ret void
;
entry-block:
%x.sroa.0 = alloca [2048 x i64], align 8
%tmp0 = alloca [2048 x i64], align 8
%tmp2 = alloca %Foo, align 8
- call void @llvm.lifetime.start.p0(i64 16384, ptr %x.sroa.0)
- call void @llvm.lifetime.start.p0(i64 16384, ptr %tmp0)
+ call void @llvm.lifetime.start.p0(ptr %x.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr %tmp0)
call void @llvm.memset.p0.i64(ptr align 8 %tmp0, i8 0, i64 16384, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %x.sroa.0, ptr align 8 %tmp0, i64 16384, i1 false)
- call void @llvm.lifetime.end.p0(i64 16384, ptr %tmp0)
- call void @llvm.lifetime.start.p0(i64 16384, ptr %tmp2)
+ call void @llvm.lifetime.end.p0(ptr %tmp0)
+ call void @llvm.lifetime.start.p0(ptr %tmp2)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp2, ptr align 8 %x.sroa.0, i64 16384, i1 false)
call void @bar(ptr noalias nocapture nonnull dereferenceable(16384) %tmp2)
- call void @llvm.lifetime.end.p0(i64 16384, ptr %tmp2)
- call void @llvm.lifetime.end.p0(i64 16384, ptr %x.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr %tmp2)
+ call void @llvm.lifetime.end.p0(ptr %x.sroa.0)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @bar(ptr noalias nocapture readonly dereferenceable(16384)) unnamed_addr #0
diff --git a/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll b/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll
index ff36bf0..e1a6c3f 100644
--- a/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll
+++ b/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll
@@ -78,7 +78,7 @@ define void @test5(ptr %ptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[EARLY_DATA:%.*]] = alloca [128 x i8], align 8
; CHECK-NEXT: [[TMP:%.*]] = alloca [[T:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[EARLY_DATA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[EARLY_DATA]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call fastcc void @decompose(ptr [[TMP]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[EARLY_DATA]], ptr [[TMP]], i64 32, i1 false)
@@ -87,7 +87,7 @@ define void @test5(ptr %ptr) {
entry:
%early_data = alloca [128 x i8], align 8
%tmp = alloca %t, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %early_data)
+ call void @llvm.lifetime.start.p0(ptr %early_data)
%0 = load i32, ptr %ptr, align 8
call fastcc void @decompose(ptr %tmp)
call void @llvm.memcpy.p0.p0.i64(ptr %early_data, ptr %tmp, i64 32, i1 false)
@@ -131,7 +131,7 @@ define void @test8(ptr noalias %src, ptr %dst) {
declare void @clobber()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
; Function Attrs: argmemonly nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
diff --git a/llvm/test/Transforms/MemCpyOpt/stack-move.ll b/llvm/test/Transforms/MemCpyOpt/stack-move.ll
index 31e255b..940e30e 100644
--- a/llvm/test/Transforms/MemCpyOpt/stack-move.ll
+++ b/llvm/test/Transforms/MemCpyOpt/stack-move.ll
@@ -9,12 +9,12 @@ declare void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) noalias nocapture writeonly
declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.start.p1(i64, ptr addrspace(1) nocapture)
-declare void @llvm.lifetime.end.p1(i64, ptr addrspace(1) nocapture)
-declare void @llvm.lifetime.start.p2(i64, ptr addrspace(2) nocapture)
-declare void @llvm.lifetime.end.p2(i64, ptr addrspace(2) nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
+declare void @llvm.lifetime.start.p1(ptr addrspace(1) nocapture)
+declare void @llvm.lifetime.end.p1(ptr addrspace(1) nocapture)
+declare void @llvm.lifetime.start.p2(ptr addrspace(2) nocapture)
+declare void @llvm.lifetime.end.p2(ptr addrspace(2) nocapture)
declare i32 @use_nocapture(ptr nocapture)
declare i32 @use_maycapture(ptr noundef)
@@ -31,8 +31,8 @@ define void @basic_memcpy() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -40,8 +40,8 @@ define void @basic_memcpy() {
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -74,8 +74,8 @@ define void @basic_memmove() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -83,8 +83,8 @@ define void @basic_memmove() {
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -99,8 +99,8 @@ define void @load_store() {
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -108,8 +108,8 @@ define void @load_store() {
store i32 %src.val, ptr %dest
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -125,8 +125,8 @@ define void @load_store_scalable(<vscale x 4 x i32> %x) {
;
%src = alloca <vscale x 4 x i32>
%dest = alloca <vscale x 4 x i32>
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store <vscale x 4 x i32> %x, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -135,8 +135,8 @@ define void @load_store_scalable(<vscale x 4 x i32> %x) {
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -151,16 +151,16 @@ define void @align_up() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -177,21 +177,21 @@ define void @remove_extra_lifetime_intrinsics() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
%3 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -230,8 +230,8 @@ define void @alias_no_mod() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
%dest.alias = getelementptr %struct.Foo, ptr %dest, i32 0, i32 0
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -240,8 +240,8 @@ define void @alias_no_mod() {
%src.alias = getelementptr %struct.Foo, ptr %src, i32 0, i32 0
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -265,16 +265,16 @@ define void @remove_scoped_noalias() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -289,16 +289,16 @@ define void @remove_alloca_metadata() {
;
%src = alloca %struct.Foo, align 4, !annotation !3
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -314,16 +314,16 @@ define void @noalias_on_lifetime() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src), !alias.scope !2
+ call void @llvm.lifetime.end.p0(ptr nocapture %src), !alias.scope !2
%2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest), !noalias !2
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest), !noalias !2
ret void
}
@@ -338,16 +338,16 @@ define void @src_ref_dest_ref_after_copy() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%1 = call i32 @use_readonly(ptr nocapture %src)
%2 = call i32 @use_readonly(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -362,16 +362,16 @@ define void @src_mod_dest_mod_after_copy() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%1 = call i32 @use_writeonly(ptr nocapture %src)
%2 = call i32 @use_writeonly(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -384,10 +384,10 @@ define void @avoid_memory_use_last_user_crash() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%v = load i32, ptr %dest
ret void
}
@@ -409,14 +409,14 @@ define void @terminator_lastuse() personality i32 0 {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr %src)
+ call void @llvm.lifetime.end.p0(ptr %src)
%rv = invoke i32 @use_nocapture(ptr %dest)
to label %suc unwind label %unw
unw:
@@ -441,8 +441,8 @@ define void @multi_bb_memcpy(i1 %b) {
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
br label %bb0
@@ -453,8 +453,8 @@ bb0:
bb1:
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -471,8 +471,8 @@ define void @multi_bb_load_store(i1 %b) {
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -482,8 +482,8 @@ define void @multi_bb_load_store(i1 %b) {
bb0:
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -494,8 +494,8 @@ define void @multi_bb_separated_load_store(i1 %b) {
; CHECK-SAME: (i1 [[B:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store i32 42, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[SRC_VAL:%.*]] = load i32, ptr [[SRC]], align 4
@@ -505,14 +505,14 @@ define void @multi_bb_separated_load_store(i1 %b) {
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -525,8 +525,8 @@ bb0:
bb1:
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -548,8 +548,8 @@ define void @multi_bb_simple_br(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
@@ -564,8 +564,8 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -598,7 +598,7 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%1 = call i32 @use_nocapture(ptr noundef nocapture %dest)
@@ -620,7 +620,7 @@ define void @multi_bb_dom_test1(i1 %b) {
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 40, i32 50, i32 60 }, ptr [[SRC]], align 4
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: ret void
@@ -641,7 +641,7 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
%1 = call i32 @use_nocapture(ptr noundef nocapture %dest)
@@ -671,7 +671,7 @@ define void @multi_bb_pdom_test0(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
br i1 %b, label %bb0, label %bb1
@@ -686,7 +686,7 @@ bb1:
bb2:
%3 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
uselistorder ptr %dest, { 2, 3, 0, 1, 4, 5 }
@@ -711,7 +711,7 @@ define void @multi_bb_pdom_test1(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
br i1 %b, label %bb0, label %bb1
@@ -747,7 +747,7 @@ define void @multi_bb_pdom_test2(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
%1 = call i32 @use_nocapture(ptr noundef nocapture %dest)
@@ -784,8 +784,8 @@ entry:
%nlt1 = icmp slt i32 %n, 1
%src = alloca %struct.Foo, align 8
%dest = alloca %struct.Foo, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 0, i32 1, i32 42 }, ptr %src
br i1 %nlt1, label %loop_exit, label %loop_body
@@ -816,8 +816,8 @@ define void @multi_bb_unreachable_modref(i1 %b0) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
br i1 %b0, label %bb0, label %exit
@@ -828,8 +828,8 @@ exit:
bb0:
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -850,8 +850,8 @@ define void @multi_bb_non_dominated(i1 %b0, i1 %b1) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
br i1 %b0, label %bb0, label %bb1
@@ -865,8 +865,8 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -878,30 +878,30 @@ define void @memcpy_is_def() {
; CHECK-LABEL: define void @memcpy_is_def() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[SRC]], ptr align 4 [[DEST]], i64 12, i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr noundef nocapture %dest)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %src, ptr align 4 %dest, i64 12, i1 false)
%3 = call i32 @use_nocapture(ptr noundef nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -912,30 +912,30 @@ define void @memset_is_def() {
; CHECK-LABEL: define void @memset_is_def() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[SRC]], i8 42, i64 12, i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr noundef nocapture %dest)
call void @llvm.memset.p0.i64(ptr align 4 %src, i8 42, i64 12, i1 false)
%3 = call i32 @use_nocapture(ptr noundef nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -946,8 +946,8 @@ define void @store_is_def() {
; CHECK-LABEL: define void @store_is_def() {
; CHECK-NEXT: [[SRC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store i32 42, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SRC]], align 4
@@ -955,14 +955,14 @@ define void @store_is_def() {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: store i32 64, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
%2 = load i32, ptr %src
@@ -970,8 +970,8 @@ define void @store_is_def() {
%3 = call i32 @use_nocapture(ptr noundef nocapture %dest)
store i32 64, ptr %src
%4 = call i32 @use_nocapture(ptr noundef nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -982,8 +982,8 @@ define void @multi_bb_dataflow(i1 %b) {
; CHECK-SAME: (i1 [[B:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
@@ -995,14 +995,14 @@ define void @multi_bb_dataflow(i1 %b) {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
@@ -1017,8 +1017,8 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1031,26 +1031,26 @@ define void @incomplete_memcpy() {
; CHECK-LABEL: define void @incomplete_memcpy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 11, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 11, i1 false)
%2 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1060,28 +1060,28 @@ define void @incomplete_store() {
; CHECK-LABEL: define void @incomplete_store() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 [[TMP2]], ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
%2 = load i32, ptr %src
store i32 %2, ptr %dest
%3 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1091,28 +1091,28 @@ define void @dynamically_sized_alloca(i64 %i) {
; CHECK-SAME: (i64 [[I:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 [[I]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i8, i64 %i, align 4
%dest = alloca i8, i64 %i, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1122,28 +1122,28 @@ define void @inalloca() {
; CHECK-LABEL: define void @inalloca() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca inalloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca inalloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1153,28 +1153,28 @@ define void @dynamically_sized_memcpy(i64 %size) {
; CHECK-SAME: (i64 [[SIZE:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 [[SIZE]], i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 %size, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
ret void
}
@@ -1183,28 +1183,28 @@ define void @mismatched_alloca_size() {
; CHECK-LABEL: define void @mismatched_alloca_size() {
; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 24, align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 12, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i8, i64 24, align 4
%dest = alloca i8, i64 12, align 4
- call void @llvm.lifetime.start.p0(i64 24, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 24, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1213,28 +1213,28 @@ define void @mismatched_alloca_addrspace() {
; CHECK-LABEL: define void @mismatched_alloca_addrspace() {
; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 24, align 4, addrspace(1)
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 12, align 4, addrspace(2)
-; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 24, ptr addrspace(1) captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p2(i64 12, ptr addrspace(2) captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p2(ptr addrspace(2) captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr addrspace(1) [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr addrspace(1) captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) align 4 [[DEST]], ptr addrspace(1) align 4 [[SRC]], i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 24, ptr addrspace(1) captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr addrspace(2) captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p2(i64 12, ptr addrspace(2) captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p2(ptr addrspace(2) captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i8, i64 24, align 4, addrspace(1)
%dest = alloca i8, i64 12, align 4, addrspace(2)
- call void @llvm.lifetime.start.p1(i64 24, ptr addrspace(1) nocapture %src)
- call void @llvm.lifetime.start.p2(i64 12, ptr addrspace(2) nocapture %dest)
+ call void @llvm.lifetime.start.p1(ptr addrspace(1) nocapture %src)
+ call void @llvm.lifetime.start.p2(ptr addrspace(2) nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr addrspace(1) %src
%1 = call i32 @use_nocapture(ptr addrspace(1) nocapture %src)
call void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) align 4 %dest, ptr addrspace(1) align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p1(i64 24, ptr addrspace(1) nocapture %src)
+ call void @llvm.lifetime.end.p1(ptr addrspace(1) nocapture %src)
%2 = call i32 @use_nocapture(ptr addrspace(2) nocapture %dest)
- call void @llvm.lifetime.end.p2(i64 12, ptr addrspace(2) nocapture %dest)
+ call void @llvm.lifetime.end.p2(ptr addrspace(2) nocapture %dest)
ret void
}
@@ -1243,28 +1243,28 @@ define void @volatile_memcpy() {
; CHECK-LABEL: define void @volatile_memcpy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 true)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1273,28 +1273,28 @@ define void @dest_captured() {
; CHECK-LABEL: define void @dest_captured() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_maycapture(ptr [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_maycapture(ptr %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1303,28 +1303,28 @@ define void @src_captured() {
; CHECK-LABEL: define void @src_captured() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_maycapture(ptr [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_maycapture(ptr %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1334,30 +1334,30 @@ define void @mod_ref_before_copy() {
; CHECK-LABEL: define void @mod_ref_before_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[R:%.*]] = call i32 @use_readonly(ptr captures(none) [[DEST]])
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%r = call i32 @use_readonly(ptr nocapture %dest)
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1366,30 +1366,30 @@ define void @mod_dest_before_copy() {
; CHECK-LABEL: define void @mod_dest_before_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 13, ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
store i32 13, ptr %dest
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1397,22 +1397,22 @@ define void @mod_src_before_store_after_load() {
; CHECK-LABEL: define void @mod_src_before_store_after_load() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 13, ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[SRC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
store i32 13, ptr %dest
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -1421,9 +1421,9 @@ define void @mod_src_before_store_after_load() {
store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %src
store %struct.Foo %src.val, ptr %dest
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1433,28 +1433,28 @@ define void @src_mod_dest_ref_after_copy() {
; CHECK-LABEL: define void @src_mod_dest_ref_after_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1464,28 +1464,28 @@ define void @src_ref_dest_mod_after_copy() {
; CHECK-LABEL: define void @src_ref_dest_mod_after_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %dest
%1 = call i32 @use_nocapture(ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1494,22 +1494,22 @@ define void @dest_alias_mod_before_copy() {
; CHECK-LABEL: define void @dest_alias_mod_before_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[DEST_ALIAS:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[DEST]], i64 0, i32 1
; CHECK-NEXT: store i32 13, ptr [[DEST_ALIAS]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%dest.alias = getelementptr inbounds %struct.Foo, ptr %dest, i64 0, i32 1
store i32 13, ptr %dest.alias
@@ -1518,8 +1518,8 @@ define void @dest_alias_mod_before_copy() {
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1528,22 +1528,22 @@ define void @alias_src_ref_dest_mod_after_copy() {
; CHECK-LABEL: define void @alias_src_ref_dest_mod_after_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[DEST_ALIAS:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[DEST]], i64 0, i32 1
; CHECK-NEXT: store i32 13, ptr [[DEST_ALIAS]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -1552,8 +1552,8 @@ define void @alias_src_ref_dest_mod_after_copy() {
%dest.alias = getelementptr inbounds %struct.Foo, ptr %dest, i64 0, i32 1
store i32 13, ptr %dest.alias
%2 = call i32 @use_nocapture(ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1564,8 +1564,8 @@ define void @multi_bb_dataflow_conflict(i1 %b) {
; CHECK-SAME: (i1 [[B:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
@@ -1578,14 +1578,14 @@ define void @multi_bb_dataflow_conflict(i1 %b) {
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
@@ -1601,8 +1601,8 @@ bb1:
bb2:
%4 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1614,8 +1614,8 @@ define void @multi_bb_loop_dest_mod_before_copy(i32 %n) {
; CHECK-NEXT: [[NLT1:%.*]] = icmp slt i32 [[N]], 1
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 0, i32 1, i32 42 }, ptr [[SRC]], align 4
; CHECK-NEXT: br i1 [[NLT1]], label [[LOOP_EXIT:%.*]], label [[LOOP_BODY:%.*]]
; CHECK: loop_body:
@@ -1632,8 +1632,8 @@ entry:
%nlt1 = icmp slt i32 %n, 1
%src = alloca %struct.Foo, align 8
%dest = alloca %struct.Foo, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 0, i32 1, i32 42 }, ptr %src
br i1 %nlt1, label %loop_exit, label %loop_body
@@ -1660,17 +1660,17 @@ define void @partial_lifetime() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 3, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 3, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
diff --git a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
index 179d6e6..e2f5007 100644
--- a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
@@ -35,10 +35,10 @@ if.end5: ; preds = %if.then, %entry
ret i1 %rez.0
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/MoveAutoInit/clobber.ll b/llvm/test/Transforms/MoveAutoInit/clobber.ll
index 08ffb13..f52034d 100644
--- a/llvm/test/Transforms/MoveAutoInit/clobber.ll
+++ b/llvm/test/Transforms/MoveAutoInit/clobber.ll
@@ -10,9 +10,9 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = alloca [100 x i8], align 16
; CHECK-NEXT: [[TMP5:%.*]] = alloca [2 x i8], align 1
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [100 x i8], ptr [[TMP4]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100, ptr nonnull [[TMP4]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8], ptr [[TMP5]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[TMP5]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP5]]) #[[ATTR3]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8], ptr [[TMP5]], i64 0, i64 1
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP1:%.*]], 0
; CHECK-NEXT: br i1 [[TMP9]], label [[TMP15:%.*]], label [[TMP10:%.*]]
@@ -38,19 +38,19 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
; CHECK-NEXT: br label [[TMP22]]
; CHECK: 22:
; CHECK-NEXT: [[TMP23:%.*]] = phi i32 [ [[TMP14]], [[TMP10]] ], [ [[TMP21]], [[TMP17]] ], [ 0, [[TMP15]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[TMP5]]) #[[ATTR3]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100, ptr nonnull [[TMP4]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP5]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) #[[ATTR3]]
; CHECK-NEXT: ret i32 [[TMP23]]
;
%4 = alloca [100 x i8], align 16
%5 = alloca [2 x i8], align 1
%6 = getelementptr inbounds [100 x i8], ptr %4, i64 0, i64 0
- call void @llvm.lifetime.start.p0(i64 100, ptr nonnull %4) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %4) #3
; This memset must move.
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(100) %6, i8 -86, i64 100, i1 false), !annotation !0
%7 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 0
- call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %5) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %5) #3
; This store must move.
store i8 -86, ptr %7, align 1, !annotation !0
%8 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 1
@@ -81,16 +81,16 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
22:
%23 = phi i32 [ %14, %10 ], [ %21, %17 ], [ 0, %15 ]
- call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %5) #3
- call void @llvm.lifetime.end.p0(i64 100, ptr nonnull %4) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %5) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %4) #3
ret i32 %23
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { mustprogress nofree nosync nounwind readnone uwtable willreturn }
attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll b/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll
index dfd6d7d..979aa69 100644
--- a/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll
+++ b/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll
@@ -11,7 +11,7 @@ define void @foo(ptr %arg) {
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[BB:.*:]]
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i8, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[ARG]], align 8
; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[LOAD]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call ptr [[LOAD1]](ptr [[ALLOCA]])
@@ -19,14 +19,14 @@ define void @foo(ptr %arg) {
;
bb:
%alloca = alloca i8, align 16
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
%load = load ptr, ptr %arg, align 8
%load1 = load ptr, ptr %load, align 8
%call = call ptr %load1(ptr %alloca)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #0
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #0
declare ptr @malloc(i64)
diff --git a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
index 017f608..8b2d662 100644
--- a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
+++ b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
@@ -18,7 +18,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv) #1
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -87,7 +87,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv) #1
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -106,14 +106,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @__gxx_personality_v0(...)
declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
diff --git a/llvm/test/Transforms/NewGVN/lifetime-simple.ll b/llvm/test/Transforms/NewGVN/lifetime-simple.ll
index 0a7bd33..7fe6649 100644
--- a/llvm/test/Transforms/NewGVN/lifetime-simple.ll
+++ b/llvm/test/Transforms/NewGVN/lifetime-simple.ll
@@ -9,21 +9,21 @@ define i8 @test() nounwind {
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca [32 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i8 1, ptr [[P]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[TMP0]]
;
entry:
%P = alloca [32 x i8]
- call void @llvm.lifetime.start.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.start.p0(ptr %P)
%0 = load i8, ptr %P
store i8 1, ptr %P
- call void @llvm.lifetime.end.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
%1 = load i8, ptr %P
ret i8 %1
}
-declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly
-declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P)
+declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly
+declare void @llvm.lifetime.end.p0(ptr nocapture %P)
diff --git a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
index a19a2a6..15bb1cb 100644
--- a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
+++ b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
@@ -5,7 +5,7 @@
; REQUIRES: asserts
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define void @tinkywinky() {
; CHECK-LABEL: define void @tinkywinky() {
@@ -20,11 +20,11 @@ define void @tinkywinky() {
;
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
br i1 false, label %body, label %end
body:
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
br label %end
end:
diff --git a/llvm/test/Transforms/NewGVN/vscale.ll b/llvm/test/Transforms/NewGVN/vscale.ll
index 7021172..64e22e1 100644
--- a/llvm/test/Transforms/NewGVN/vscale.ll
+++ b/llvm/test/Transforms/NewGVN/vscale.ll
@@ -579,7 +579,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; CHECK-LABEL: @bigexample(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]])
; CHECK-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0
; CHECK-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
@@ -603,12 +603,12 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; CHECK-NEXT: [[TMP12:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP9]], <vscale x 16 x i8> [[DOTUNPACK10]], 2
; CHECK-NEXT: [[DOTUNPACK12:%.*]] = load <vscale x 16 x i8>, ptr [[REF_TMP_REPACK5]], align 16
; CHECK-NEXT: [[TMP15:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP12]], <vscale x 16 x i8> [[DOTUNPACK12]], 3
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]])
; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP15]]
;
entry:
%ref.tmp = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp)
%a.elt = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %a, 0
store <vscale x 4 x i32> %a.elt, ptr %ref.tmp, align 16
%0 = call i64 @llvm.vscale.i64()
@@ -643,7 +643,7 @@ entry:
%.elt11 = getelementptr inbounds i8, ptr %ref.tmp, i64 %14
%.unpack12 = load <vscale x 16 x i8>, ptr %.elt11, align 16
%15 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %12, <vscale x 16 x i8> %.unpack12, 3
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp)
ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %15
}
diff --git a/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll b/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll
index 180fd0a..694deb3 100644
--- a/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll
+++ b/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll
@@ -7,8 +7,8 @@ declare ptr @llvm.objc.autoreleaseReturnValue(ptr)
declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr)
declare ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr)
declare void @opaque()
-declare void @llvm.lifetime.start(i64, ptr nocapture)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
; CHECK-LABEL: define ptr @elide_with_retainRV(
; CHECK-NEXT: entry:
@@ -81,16 +81,16 @@ entry:
; CHECK-LABEL: define ptr @elide_with_retainRV_splitByLifetime(
; CHECK-NEXT: entry:
; CHECK-NEXT: %x = alloca ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %x)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: ret ptr %x
define ptr @elide_with_retainRV_splitByLifetime() nounwind {
entry:
; Cleanup should skip over lifetime intrinsics.
%x = alloca ptr
- call void @llvm.lifetime.start(i64 8, ptr %x)
+ call void @llvm.lifetime.start(ptr %x)
%b = call ptr @llvm.objc.autoreleaseReturnValue(ptr %x) nounwind
- call void @llvm.lifetime.end(i64 8, ptr %x)
+ call void @llvm.lifetime.end(ptr %x)
%d = call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %b) nounwind
ret ptr %d
}
@@ -221,17 +221,17 @@ entry:
; CHECK-LABEL: define ptr @elide_with_claimRV_splitByLifetime(
; CHECK-NEXT: entry:
; CHECK-NEXT: %x = alloca ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %x)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: tail call void @llvm.objc.release(ptr %x)
; CHECK-NEXT: ret ptr %x
define ptr @elide_with_claimRV_splitByLifetime() nounwind {
entry:
; Cleanup should skip over lifetime intrinsics.
%x = alloca ptr
- call void @llvm.lifetime.start(i64 8, ptr %x)
+ call void @llvm.lifetime.start(ptr %x)
%b = call ptr @llvm.objc.autoreleaseReturnValue(ptr %x) nounwind
- call void @llvm.lifetime.end(i64 8, ptr %x)
+ call void @llvm.lifetime.end(ptr %x)
%d = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr %b) nounwind
ret ptr %d
}
diff --git a/llvm/test/Transforms/ObjCARC/post-inlining.ll b/llvm/test/Transforms/ObjCARC/post-inlining.ll
index c15e089..b184bea 100644
--- a/llvm/test/Transforms/ObjCARC/post-inlining.ll
+++ b/llvm/test/Transforms/ObjCARC/post-inlining.ll
@@ -65,22 +65,22 @@ entry:
; 2) Lifetime markers.
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
; CHECK-LABEL: define ptr @testLifetime(
; CHECK: entry:
; CHECK-NEXT: %obj = alloca i8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %obj)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %obj)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %obj)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %obj)
; CHECK-NEXT: ret ptr %call.i
; CHECK-NEXT: }
define ptr @testLifetime(ptr %call.i) {
entry:
%obj = alloca i8
- call void @llvm.lifetime.start.p0(i64 8, ptr %obj)
+ call void @llvm.lifetime.start.p0(ptr %obj)
%0 = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %call.i) nounwind
- call void @llvm.lifetime.end.p0(i64 8, ptr %obj)
+ call void @llvm.lifetime.end.p0(ptr %obj)
%1 = tail call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %call.i) nounwind
ret ptr %call.i
}
diff --git a/llvm/test/Transforms/ObjCARC/related-check.ll b/llvm/test/Transforms/ObjCARC/related-check.ll
index 7c56b2d..045c001 100644
--- a/llvm/test/Transforms/ObjCARC/related-check.ll
+++ b/llvm/test/Transforms/ObjCARC/related-check.ll
@@ -52,9 +52,9 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo
for.body: ; preds = %for.body.lr.ph, %if.end19
%i.032 = phi i32 [ 1, %for.body.lr.ph ], [ %inc, %if.end19 ]
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %persistent) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %persistent) #4
store i32 0, ptr %persistent, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %personalized) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %personalized) #4
store i32 0, ptr %personalized, align 4
%call = call zeroext i1 @lookupType(ptr noundef nonnull %persistent, ptr noundef nonnull %personalized) #8, !clang.arc.no_objc_arc_exceptions !15
br i1 %call, label %if.then, label %if.end19
@@ -110,18 +110,18 @@ if.end18: ; preds = %if.else, %if.then13
br label %if.end19
if.end19: ; preds = %if.end18, %for.body
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %personalized) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %persistent) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %personalized) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %persistent) #4
%inc = add nuw nsw i32 %i.032, 1
%exitcond.not = icmp eq i32 %inc, %argc
br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
}
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: inaccessiblememonly mustprogress nocallback nofree nosync nounwind willreturn
declare void @llvm.objc.clang.arc.noop.use(...) #5
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
index ad41639..60969ec 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
@@ -72,10 +72,10 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @3) #3
call void @unknown() #6, !dbg !20
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i.i, i64 noundef 0) #3, !dbg !23
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
call void @unknown() #6, !dbg !27
call void @__kmpc_target_deinit() #3, !dbg !28
br label %common.ret
@@ -116,18 +116,18 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @9) #3
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !35
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%3 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %3, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !40
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%4 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %4, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !43
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
call void @no_openmp()
call void @no_parallelism()
call void @__kmpc_target_deinit() #3, !dbg !46
@@ -155,10 +155,10 @@ declare void @__kmpc_get_shared_variables(ptr) local_unnamed_addr
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) local_unnamed_addr
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
declare void @no_openmp() #7
declare void @no_parallelism() #8
diff --git a/llvm/test/Transforms/OpenMP/nested_parallelism.ll b/llvm/test/Transforms/OpenMP/nested_parallelism.ll
index 412e5ea..5d96465 100644
--- a/llvm/test/Transforms/OpenMP/nested_parallelism.ll
+++ b/llvm/test/Transforms/OpenMP/nested_parallelism.ll
@@ -52,7 +52,7 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l13
; CHECK: common.ret:
; CHECK-NEXT: ret void
; CHECK: user_code.entry:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2:[0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @__kmpc_get_hardware_thread_id_in_block() #[[ATTR2]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 0
@@ -66,7 +66,7 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l13
; CHECK-NEXT: [[TMP4:%.*]] = addrspacecast ptr [[CAPTURED_VARS_ADDRS_I]] to ptr addrspace(5)
; CHECK-NEXT: store ptr addrspacecast (ptr addrspace(3) @i_shared to ptr), ptr addrspace(5) [[TMP4]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__, ptr nonnull @__omp_outlined___wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: call void @__kmpc_target_deinit()
; CHECK-NEXT: br label [[COMMON_RET]]
;
@@ -80,7 +80,7 @@ common.ret: ; preds = %entry, %_Z3fooi.int
ret void
user_code.entry: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i)
%1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
%2 = tail call i32 @__kmpc_get_hardware_thread_id_in_block() #6
%3 = icmp eq i32 %2, 0
@@ -95,7 +95,7 @@ _Z3fooi.internalized.exit: ; preds = %user_code.entry, %r
tail call void @__kmpc_barrier_simple_spmd(ptr nonnull @1, i32 %2)
store ptr addrspacecast (ptr addrspace(3) @i_shared to ptr), ptr %captured_vars_addrs.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__, ptr nonnull @__omp_outlined___wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i)
call void @__kmpc_target_deinit() #6
br label %common.ret
}
@@ -139,13 +139,13 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l16
; CHECK-NEXT: ret void
; CHECK: user_code.entry:
; CHECK-NEXT: [[I_ADDR_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[I:%.*]] to i32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]]
; CHECK-NEXT: store i32 [[I_ADDR_SROA_0_0_EXTRACT_TRUNC]], ptr addrspace(3) @i.i_shared, align 16
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[CAPTURED_VARS_ADDRS_I]] to ptr addrspace(5)
; CHECK-NEXT: store ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), ptr addrspace(5) [[TMP2]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: call void @__kmpc_target_deinit()
; CHECK-NEXT: br label [[COMMON_RET]]
;
@@ -160,12 +160,12 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%i.addr.sroa.0.0.extract.trunc = trunc i64 %i to i32
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i)
%1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
store i32 %i.addr.sroa.0.0.extract.trunc, ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), align 16
store ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), ptr %captured_vars_addrs.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i)
call void @__kmpc_target_deinit() #6
br label %common.ret
}
@@ -201,7 +201,7 @@ define internal void @__omp_outlined__(ptr noalias nocapture readnone %.global_t
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CAPTURED_VARS_ADDRS_I:%.*]] = alloca [1 x ptr], align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I:%.*]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]]
; CHECK-NEXT: [[I_I:%.*]] = tail call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #[[ATTR2]]
; CHECK-NEXT: store i32 [[TMP0]], ptr [[I_I]], align 16
@@ -209,20 +209,20 @@ define internal void @__omp_outlined__(ptr noalias nocapture readnone %.global_t
; CHECK-NEXT: store ptr [[I_I]], ptr addrspace(5) [[TMP2]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1)
; CHECK-NEXT: call void @__kmpc_free_shared(ptr [[I_I]], i64 4) #[[ATTR2]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: ret void
;
entry:
%captured_vars_addrs.i = alloca [1 x ptr], align 8
%0 = load i32, ptr %i, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i)
%1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
%i.i = tail call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #6
store i32 %0, ptr %i.i, align 16
store ptr %i.i, ptr %captured_vars_addrs.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6
call void @__kmpc_free_shared(ptr %i.i, i64 4) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i)
ret void
}
@@ -236,7 +236,7 @@ define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 %1) #5 {
; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr addrspace(5) [[TMP5]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]]
; CHECK-NEXT: [[I_I_I:%.*]] = call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #[[ATTR2]]
; CHECK-NEXT: store i32 [[TMP4]], ptr [[I_I_I]], align 16
@@ -244,7 +244,7 @@ define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 %1) #5 {
; CHECK-NEXT: store ptr [[I_I_I]], ptr addrspace(5) [[TMP7]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]], i64 1)
; CHECK-NEXT: call void @__kmpc_free_shared(ptr [[I_I_I]], i64 4) #[[ATTR2]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
; CHECK-NEXT: ret void
;
entry:
@@ -254,14 +254,14 @@ entry:
%2 = load ptr, ptr %global_args, align 8
%3 = load ptr, ptr %2, align 8
%4 = load i32, ptr %3, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i)
%5 = call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
%i.i.i = call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #6
store i32 %4, ptr %i.i.i, align 16
store ptr %i.i.i, ptr %captured_vars_addrs.i.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %5, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i.i, i64 1) #6
call void @__kmpc_free_shared(ptr %i.i.i, i64 4) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i)
ret void
}
@@ -316,9 +316,9 @@ declare i32 @__kmpc_get_hardware_thread_id_in_block() local_unnamed_addr
declare void @__kmpc_barrier_simple_spmd(ptr, i32) local_unnamed_addr #10
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #11
+declare void @llvm.lifetime.start.p0(ptr nocapture) #11
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #11
+declare void @llvm.lifetime.end.p0(ptr nocapture) #11
!omp_offload.info = !{!0, !1}
diff --git a/llvm/test/Transforms/OpenMP/parallel_deletion.ll b/llvm/test/Transforms/OpenMP/parallel_deletion.ll
index 3e16d96..67970c4 100644
--- a/llvm/test/Transforms/OpenMP/parallel_deletion.ll
+++ b/llvm/test/Transforms/OpenMP/parallel_deletion.ll
@@ -282,46 +282,46 @@ define void @delete_parallel_2() {
; CHECK-LABEL: define {{[^@]+}}@delete_parallel_2() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR18:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR18:[0-9]+]]
; CHECK-NEXT: store i32 0, ptr [[A]], align 4
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..3, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..5, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..6, ptr noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]])
; CHECK-NEXT: ret void
;
; CHECK1-LABEL: define {{[^@]+}}@delete_parallel_2() {
; CHECK1-NEXT: entry:
; CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
+; CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
; CHECK1-NEXT: store i32 0, ptr [[A]], align 4
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..3, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..4, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..5, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..6, ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A]])
-; CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]])
+; CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]])
; CHECK1-NEXT: ret void
; CHECK2-LABEL: define {{[^@]+}}@delete_parallel_2() {
; CHECK2-NEXT: entry:
; CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
; CHECK2-NEXT: store i32 0, ptr [[A]], align 4
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..3, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..4, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..5, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..6, ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A]])
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]])
; CHECK2-NEXT: ret void
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
store i32 0, ptr %a, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..3, ptr nonnull %a)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..4, ptr nonnull %a)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..5, ptr nonnull %a)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..6, ptr nonnull %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
@@ -445,7 +445,7 @@ omp_if.end: ; preds = %entry, %omp_if.then
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @omp_get_thread_num() inaccessiblememonly nofree nosync nounwind readonly
@@ -531,7 +531,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]]
; CHECK-NEXT: store i32 1, ptr [[A1]], align 4
; CHECK-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
@@ -552,7 +552,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4
; CHECK-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
; CHECK: .omp.reduction.default:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]])
; CHECK-NEXT: ret void
;
; CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
@@ -560,7 +560,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK1-NEXT: entry:
; CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
; CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
-; CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
+; CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
; CHECK1-NEXT: store i32 1, ptr [[A1]], align 4
; CHECK1-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8
; CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
@@ -581,14 +581,14 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK1-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4
; CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
; CHECK1: .omp.reduction.default:
-; CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]])
+; CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]])
; CHECK1-NEXT: ret void
; CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
; CHECK2-SAME: (ptr noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], ptr noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]], ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) {
; CHECK2-NEXT: entry:
; CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4
; CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
; CHECK2-NEXT: store i32 1, ptr [[A1]], align 4
; CHECK2-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8
; CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
@@ -609,12 +609,12 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK2-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4
; CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
; CHECK2: .omp.reduction.default:
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]])
; CHECK2-NEXT: ret void
entry:
%a1 = alloca i32, align 4
%.omp.reduction.red_list = alloca [1 x ptr], align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a1)
store i32 1, ptr %a1, align 4
store ptr %a1, ptr %.omp.reduction.red_list, align 8
%tmp2 = load i32, ptr %.global_tid., align 4
@@ -638,7 +638,7 @@ entry:
br label %.omp.reduction.default
.omp.reduction.default: ; preds = %.omp.reduction.case2, %.omp.reduction.case1, %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a1)
ret void
}
@@ -696,7 +696,7 @@ declare i32 @__kmpc_reduce_nowait(ptr, i32, i32, i64, ptr, ptr, ptr)
declare void @__kmpc_end_reduce_nowait(ptr, i32, ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare !callback !2 void @__kmpc_fork_call(ptr, i32, ptr, ...)
diff --git a/llvm/test/Transforms/OpenMP/parallel_region_merging.ll b/llvm/test/Transforms/OpenMP/parallel_region_merging.ll
index d587b9a..83452e7 100644
--- a/llvm/test/Transforms/OpenMP/parallel_region_merging.ll
+++ b/llvm/test/Transforms/OpenMP/parallel_region_merging.ll
@@ -433,11 +433,11 @@ entry:
%b = alloca i32, align 4
store i32 %a, ptr %a.addr, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..14, ptr nonnull %a.addr)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
%0 = ptrtoint ptr %b to i64
%1 = trunc i64 %0 to i32
store i32 %1, ptr %b, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..15, ptr nonnull %a.addr)
ret void
}
@@ -449,9 +449,9 @@ entry:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define internal void @.omp_outlined..15(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid., ptr nocapture nonnull readonly align 4 dereferenceable(4) %a) {
entry:
@@ -466,12 +466,12 @@ entry:
%b = alloca i32, align 4
store i32 %a, ptr %a.addr, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..16, ptr nonnull %a.addr)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
%0 = load i32, ptr %a.addr, align 4
%add = add nsw i32 %0, 1
store i32 %add, ptr %b, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 2, ptr @.omp_outlined..17, ptr nonnull %a.addr, ptr nonnull %b)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
ret void
}
@@ -1184,11 +1184,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -1216,7 +1216,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -1224,7 +1224,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -2155,11 +2155,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -2187,7 +2187,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -2195,7 +2195,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -3126,11 +3126,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -3158,7 +3158,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -3166,7 +3166,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -4097,11 +4097,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -4129,7 +4129,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -4137,7 +4137,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -5148,11 +5148,11 @@ entry:
; CHECK2: omp_region.body:
; CHECK2-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK2: seq.par.merged:
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK2-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK2-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK2: omp.par.merged.split:
; CHECK2-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -5197,13 +5197,13 @@ entry:
; CHECK2-NEXT: store ptr [[A_ADDR]], ptr [[GEP_A_ADDR]], align 8
; CHECK2-NEXT: [[GEP_B:%.*]] = getelementptr { ptr, ptr, ptr }, ptr [[STRUCTARG]], i32 0, i32 2
; CHECK2-NEXT: store ptr [[B]], ptr [[GEP_B]], align 8
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @merge_seq_par_use..omp_par, ptr [[STRUCTARG]])
; CHECK2-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK2: omp.par.exit:
; CHECK2-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK2: entry.split.split:
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK2-NEXT: ret void
;
;
diff --git a/llvm/test/Transforms/OpenMP/spmdization.ll b/llvm/test/Transforms/OpenMP/spmdization.ll
index e91f160..0272c41 100644
--- a/llvm/test/Transforms/OpenMP/spmdization.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization.ll
@@ -2922,7 +2922,7 @@ declare void @unknown() #7
declare void @unknowni32p(ptr) #7
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #8
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #8
define weak i32 @__kmpc_target_init(ptr %0, ptr %1) {
; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init
@@ -2958,7 +2958,7 @@ declare void @__kmpc_get_shared_variables(ptr)
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #9
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #8
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #8
; Function Attrs: convergent
declare void @spmd_amenable() #6
diff --git a/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll b/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
index 70c0d04..5a7d097 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
@@ -61,7 +61,7 @@ common.ret: ; preds = %user_code.entry, %e
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) captures(none)) #1
+declare void @llvm.lifetime.start.p5(ptr addrspace(5) captures(none)) #1
; Function Attrs: alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(none)
define internal void @__omp_outlined__(ptr noalias captures(none) %.global_tid., ptr noalias captures(none) %.bound_tid., ptr nonnull align 4 captures(none) %ng, ptr nonnull align 8 captures(none) %aa) #2 {
@@ -72,7 +72,7 @@ entry:
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) captures(none)) #1
+declare void @llvm.lifetime.end.p5(ptr addrspace(5) captures(none)) #1
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 noundef %1) #3 {
diff --git a/llvm/test/Transforms/OpenMP/spmdization_indirect.ll b/llvm/test/Transforms/OpenMP/spmdization_indirect.ll
index 3c3e1d7..d1e006a 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_indirect.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_indirect.ll
@@ -1017,7 +1017,7 @@ declare void @unknown() #5
declare void @unknowni32p(ptr) #5
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #6
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #6
define weak i32 @__kmpc_target_init(ptr %0, ptr %1) {
; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init
@@ -1037,7 +1037,7 @@ declare void @__kmpc_get_shared_variables(ptr)
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #7
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #6
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #6
; Function Attrs: convergent
declare void @spmd_amenable() #4
diff --git a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
index ef36937b..f30e827 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
@@ -75,10 +75,10 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @3) #3
call void @unknown() #6, !dbg !20
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i.i, i64 noundef 0) #3, !dbg !23
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
call void @unknown() #6, !dbg !27
call void @__kmpc_target_deinit() #3, !dbg !28
br label %common.ret
@@ -119,18 +119,18 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @9) #3
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !35
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%3 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %3, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !40
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%4 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %4, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !43
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
call void @spmd_amenable()
call void @__kmpc_target_deinit() #3, !dbg !46
br label %common.ret
@@ -157,10 +157,10 @@ declare void @__kmpc_get_shared_variables(ptr) local_unnamed_addr
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) local_unnamed_addr
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
declare void @spmd_amenable() #7
diff --git a/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll b/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll
index a7337d0..014f95f 100644
--- a/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll
+++ b/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll
@@ -48,11 +48,11 @@ for.end6:
ret void
}
-declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
declare i32 @memcmp(ptr, ptr, i64)
declare i32 @bcmp(ptr, ptr, i64)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
diff --git a/llvm/test/Transforms/PGOProfile/entry_alloca.ll b/llvm/test/Transforms/PGOProfile/entry_alloca.ll
index 580f055..c791e1d 100644
--- a/llvm/test/Transforms/PGOProfile/entry_alloca.ll
+++ b/llvm/test/Transforms/PGOProfile/entry_alloca.ll
@@ -19,8 +19,8 @@
define dso_local double @foo() {
%1 = alloca %struct.A, align 4
%2 = alloca %struct.B, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %2)
call void @bar(ptr noundef nonnull %1, ptr noundef nonnull %2)
%3 = load i32, ptr %1, align 4
%4 = icmp sgt i32 %3, 0
@@ -48,8 +48,8 @@ define dso_local double @foo() {
21:
%22 = phi double [ 0.000000e+00, %0 ], [ %18, %9 ]
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret double %22
}
diff --git a/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll b/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll
index 3ef185a..9122454 100644
--- a/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll
+++ b/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll
@@ -56,11 +56,11 @@ for.end6:
ret void
}
-declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
declare i32 @memcmp(ptr, ptr, i64)
declare i32 @bcmp(ptr, ptr, i64)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
diff --git a/llvm/test/Transforms/PGOProfile/memop_size_opt.ll b/llvm/test/Transforms/PGOProfile/memop_size_opt.ll
index c4f749b..f63989a 100644
--- a/llvm/test/Transforms/PGOProfile/memop_size_opt.ll
+++ b/llvm/test/Transforms/PGOProfile/memop_size_opt.ll
@@ -181,14 +181,14 @@ for.end6:
!30 = !{!"VP", i32 1, i64 556, i64 0, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22}
!31 = !{!"VP", i32 1, i64 556, i64 0, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22}
-declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
declare i32 @memcmp(ptr, ptr, i64)
declare i32 @bcmp(ptr, ptr, i64)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
; YAML: --- !Passed
; YAML-NEXT: Pass: pgo-memop-opt
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
index f187d41..0a1efda 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
@@ -23,10 +23,10 @@ define i32 @bar() #0 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando) #4
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x) #4
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -52,13 +52,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x) #4
+ call void @llvm.lifetime.end.p0(ptr %rando) #4
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -70,7 +70,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
index 146ad44..68b233e 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
@@ -20,10 +20,10 @@ define i32 @bar() #0 !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -49,13 +49,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -67,7 +67,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
index 18677b7..2f188f5 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
@@ -33,10 +33,10 @@ define i32 @bar() #0 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando) #4
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x) #4
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -62,13 +62,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x) #4
+ call void @llvm.lifetime.end.p0(ptr %rando) #4
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -80,7 +80,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
index 1e7e8c1..4add781 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
@@ -21,10 +21,10 @@ define i32 @bar() #0 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #3
+ call void @llvm.lifetime.start.p0(ptr %rando) #3
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !2
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+ call void @llvm.lifetime.start.p0(ptr %x) #3
store i32 0, ptr %x, align 4, !tbaa !2
%0 = load i32, ptr %rando, align 4, !tbaa !2
%rem = srem i32 %0, 200000
@@ -49,13 +49,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !2
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #3
+ call void @llvm.lifetime.end.p0(ptr %x) #3
+ call void @llvm.lifetime.end.p0(ptr %rando) #3
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -64,7 +64,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
index 10c3718..5a7731b 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
@@ -37,10 +37,10 @@ define i32 @bar() #0 !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -66,13 +66,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -84,7 +84,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
index 6e21c08..859ba72 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
@@ -40,7 +40,7 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local void @init_arry() #0 {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #6
+ call void @llvm.lifetime.start.p0(ptr %i) #6
store i32 0, ptr %i, align 4, !tbaa !4
br label %for.cond
@@ -65,12 +65,12 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #6
+ call void @llvm.lifetime.end.p0(ptr %i) #6
ret void
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone speculatable willreturn
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -79,7 +79,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
declare dso_local i32 @rand() #3
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local i32 @main() #0 {
@@ -90,9 +90,9 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry()
- call void @llvm.lifetime.start.p0(i64 4, ptr %val) #6
+ call void @llvm.lifetime.start.p0(ptr %val) #6
store i32 0, ptr %val, align 4, !tbaa !4
- call void @llvm.lifetime.start.p0(i64 4, ptr %j) #6
+ call void @llvm.lifetime.start.p0(ptr %j) #6
store i32 0, ptr %j, align 4, !tbaa !4
br label %for.cond
@@ -102,7 +102,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %condition) #6
+ call void @llvm.lifetime.start.p0(ptr %condition) #6
%call = call i32 @rand() #6
%rem = srem i32 %call, 5
store i32 %rem, ptr %condition, align 4, !tbaa !4
@@ -138,7 +138,7 @@ sw.default: ; preds = %for.body
unreachable
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(i64 4, ptr %condition) #6
+ call void @llvm.lifetime.end.p0(ptr %condition) #6
br label %for.inc
for.inc: ; preds = %sw.epilog
@@ -148,8 +148,8 @@ for.inc: ; preds = %sw.epilog
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %j) #6
- call void @llvm.lifetime.end.p0(i64 4, ptr %val) #6
+ call void @llvm.lifetime.end.p0(ptr %j) #6
+ call void @llvm.lifetime.end.p0(ptr %val) #6
ret i32 0
}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
index ebecee1..242d5b8 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
@@ -43,7 +43,7 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local void @init_arry() #0 !dbg !21 {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #6, !dbg !26
+ call void @llvm.lifetime.start.p0(ptr %i) #6, !dbg !26
call void @llvm.dbg.declare(metadata ptr %i, metadata !25, metadata !DIExpression()), !dbg !27
store i32 0, ptr %i, align 4, !dbg !28, !tbaa !30
br label %for.cond, !dbg !34
@@ -69,12 +69,12 @@ for.inc: ; preds = %for.body
br label %for.cond, !dbg !47, !llvm.loop !48
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #6, !dbg !50
+ call void @llvm.lifetime.end.p0(ptr %i) #6, !dbg !50
ret void, !dbg !50
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone speculatable willreturn
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -83,7 +83,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
declare dso_local i32 @rand() #3
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local i32 @main() #0 !dbg !51 {
@@ -94,10 +94,10 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry(), !dbg !62
- call void @llvm.lifetime.start.p0(i64 4, ptr %val) #6, !dbg !63
+ call void @llvm.lifetime.start.p0(ptr %val) #6, !dbg !63
call void @llvm.dbg.declare(metadata ptr %val, metadata !55, metadata !DIExpression()), !dbg !64
store i32 0, ptr %val, align 4, !dbg !64, !tbaa !30
- call void @llvm.lifetime.start.p0(i64 4, ptr %j) #6, !dbg !65
+ call void @llvm.lifetime.start.p0(ptr %j) #6, !dbg !65
call void @llvm.dbg.declare(metadata ptr %j, metadata !56, metadata !DIExpression()), !dbg !66
store i32 0, ptr %j, align 4, !dbg !67, !tbaa !30
br label %for.cond, !dbg !68
@@ -108,7 +108,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !dbg !71
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %condition) #6, !dbg !72
+ call void @llvm.lifetime.start.p0(ptr %condition) #6, !dbg !72
call void @llvm.dbg.declare(metadata ptr %condition, metadata !57, metadata !DIExpression()), !dbg !73
%call = call i32 @rand() #6, !dbg !74
%rem = srem i32 %call, 5, !dbg !75
@@ -145,7 +145,7 @@ sw.default: ; preds = %for.body
unreachable, !dbg !87
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(i64 4, ptr %condition) #6, !dbg !88
+ call void @llvm.lifetime.end.p0(ptr %condition) #6, !dbg !88
br label %for.inc, !dbg !89
for.inc: ; preds = %sw.epilog
@@ -155,8 +155,8 @@ for.inc: ; preds = %sw.epilog
br label %for.cond, !dbg !91, !llvm.loop !92
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %j) #6, !dbg !94
- call void @llvm.lifetime.end.p0(i64 4, ptr %val) #6, !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %j) #6, !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %val) #6, !dbg !94
ret i32 0, !dbg !95
}
diff --git a/llvm/test/Transforms/PartialInlining/switch_stmt.ll b/llvm/test/Transforms/PartialInlining/switch_stmt.ll
index 3f43369..d99fbba 100644
--- a/llvm/test/Transforms/PartialInlining/switch_stmt.ll
+++ b/llvm/test/Transforms/PartialInlining/switch_stmt.ll
@@ -62,7 +62,7 @@ define dso_local signext i32 @caller(i32 signext %c) !prof !30 {
; CHECK-LABEL: @caller(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RC_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[RC_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RC_I]])
; CHECK-NEXT: store i32 0, ptr [[RC_I]], align 4
; CHECK-NEXT: switch i32 [[C:%.*]], label [[SW_DEFAULT_I:%.*]] [
; CHECK-NEXT: i32 0, label [[CODEREPL_I:%.*]]
@@ -83,7 +83,7 @@ define dso_local signext i32 @caller(i32 signext %c) !prof !30 {
; CHECK-NEXT: br label [[CALLEE_1_EXIT]]
; CHECK: callee.1.exit:
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[RC_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[RC_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RC_I]])
;
entry:
%0 = call signext i32 @callee(i32 signext %c, i32 signext %c)
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
index b4b12da..141503d 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
@@ -68,9 +68,9 @@ entry:
store ptr %array, ptr %array.addr, align 8
store i32 %count, ptr %count.addr, align 4
store i32 %n, ptr %n.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #3
+ call void @llvm.lifetime.start.p0(ptr %sum) #3
store i32 0, ptr %sum, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i32 0, ptr %i, align 4
br label %for.cond
@@ -81,7 +81,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end
for.body: ; preds = %for.cond
@@ -113,7 +113,7 @@ for.inc: ; preds = %if.end
for.end: ; preds = %for.cond.cleanup
%9 = load i32, ptr %sum, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %sum)
+ call void @llvm.lifetime.end.p0(ptr %sum)
ret i32 %9
}
@@ -184,9 +184,9 @@ entry:
%1 = getelementptr inbounds { ptr, i64 }, ptr %s, i32 0, i32 1
store i64 %s.coerce1, ptr %1, align 8
store i64 %n, ptr %n.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %ret) #7
+ call void @llvm.lifetime.start.p0(ptr %ret) #7
store i32 0, ptr %ret, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr %i) #7
+ call void @llvm.lifetime.start.p0(ptr %i) #7
store i64 0, ptr %i, align 8
br label %for.cond
@@ -197,7 +197,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 8, ptr %i) #7
+ call void @llvm.lifetime.end.p0(ptr %i) #7
br label %for.end
for.body: ; preds = %for.cond
@@ -217,7 +217,7 @@ for.inc: ; preds = %for.body
for.end: ; preds = %for.cond.cleanup
%8 = load i32, ptr %ret, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %ret)
+ call void @llvm.lifetime.end.p0(ptr %ret)
ret i32 %8
}
@@ -283,11 +283,11 @@ entry:
ret i64 %0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.trap()
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
index f583a61..e74bf592 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
@@ -89,7 +89,7 @@ entry:
%i = alloca i32, align 4
store ptr %X, ptr %X.addr, align 8
store ptr %Y, ptr %Y.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4
br label %for.cond
@@ -99,7 +99,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -237,6 +237,6 @@ exit:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index 089511d..9679806 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -408,7 +408,7 @@ entry:
store i32 %i, ptr %i.addr, align 4
store ptr %A, ptr %A.addr, align 8
store ptr %B, ptr %B.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %j) #3
+ call void @llvm.lifetime.start.p0(ptr %j) #3
store i32 0, ptr %j, align 4
br label %for.cond
@@ -419,11 +419,11 @@ for.cond: ; preds = %for.inc12, %entry
for.cond.cleanup: ; preds = %for.cond
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %j) #3
+ call void @llvm.lifetime.end.p0(ptr %j) #3
br label %for.end14
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.start.p0(ptr %k) #3
store i32 0, ptr %k, align 4
br label %for.cond1
@@ -435,7 +435,7 @@ for.cond1: ; preds = %for.inc, %for.body
for.cond.cleanup3: ; preds = %for.cond1
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.end.p0(ptr %k) #3
br label %for.end
for.body4: ; preds = %for.cond1
@@ -501,13 +501,13 @@ for.end14: ; preds = %for.cond.cleanup
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn
declare void @llvm.assume(i1 noundef) #2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind ssp uwtable mustprogress
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
index c7098d2..e8709a5 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
@@ -108,13 +108,13 @@ entry:
store ptr %samples, ptr %samples.addr, align 8
store double %Y, ptr %Y.addr, align 8
store double %Z, ptr %Z.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %block) #2
- call void @llvm.lifetime.start.p0(i64 8, ptr %rngVal) #2
- call void @llvm.lifetime.start.p0(i64 8, ptr %callValue) #2
- call void @llvm.lifetime.start.p0(i64 8, ptr %v0) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %block) #2
+ call void @llvm.lifetime.start.p0(ptr %rngVal) #2
+ call void @llvm.lifetime.start.p0(ptr %callValue) #2
+ call void @llvm.lifetime.start.p0(ptr %v0) #2
store double 0.000000e+00, ptr %v0, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %v1) #2
+ call void @llvm.lifetime.start.p0(ptr %v1) #2
store double 0.000000e+00, ptr %v1, align 8
store i32 0, ptr %i, align 4
br label %for.cond
@@ -169,12 +169,12 @@ for.end: ; preds = %for.cond
%15 = load double, ptr %v0, align 8
%16 = load double, ptr %v1, align 8
%add5 = fadd fast double %15, %16
- call void @llvm.lifetime.end.p0(i64 8, ptr %v1) #2
- call void @llvm.lifetime.end.p0(i64 8, ptr %v0) #2
- call void @llvm.lifetime.end.p0(i64 8, ptr %callValue) #2
- call void @llvm.lifetime.end.p0(i64 8, ptr %rngVal) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %block) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %v1) #2
+ call void @llvm.lifetime.end.p0(ptr %v0) #2
+ call void @llvm.lifetime.end.p0(ptr %callValue) #2
+ call void @llvm.lifetime.end.p0(ptr %rngVal) #2
+ call void @llvm.lifetime.end.p0(ptr %block) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
ret double %add5
}
@@ -305,13 +305,13 @@ entry:
store ptr %samples, ptr %samples.addr, align 8
store double %Y, ptr %Y.addr, align 8
store double %Z, ptr %Z.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #4
- call void @llvm.lifetime.start.p0(i64 4, ptr %block) #4
- call void @llvm.lifetime.start.p0(i64 8, ptr %rngVal) #4
- call void @llvm.lifetime.start.p0(i64 8, ptr %callValue) #4
- call void @llvm.lifetime.start.p0(i64 8, ptr %v0) #4
+ call void @llvm.lifetime.start.p0(ptr %i) #4
+ call void @llvm.lifetime.start.p0(ptr %block) #4
+ call void @llvm.lifetime.start.p0(ptr %rngVal) #4
+ call void @llvm.lifetime.start.p0(ptr %callValue) #4
+ call void @llvm.lifetime.start.p0(ptr %v0) #4
store double 0.000000e+00, ptr %v0, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %v1) #4
+ call void @llvm.lifetime.start.p0(ptr %v1) #4
store double 0.000000e+00, ptr %v1, align 8
store i32 0, ptr %block, align 4
br label %for.cond
@@ -389,19 +389,19 @@ for.end10: ; preds = %for.cond
%21 = load double, ptr %v0, align 8
%22 = load double, ptr %v1, align 8
%add11 = fadd fast double %21, %22
- call void @llvm.lifetime.end.p0(i64 8, ptr %v1) #4
- call void @llvm.lifetime.end.p0(i64 8, ptr %v0) #4
- call void @llvm.lifetime.end.p0(i64 8, ptr %callValue) #4
- call void @llvm.lifetime.end.p0(i64 8, ptr %rngVal) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %block) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #4
+ call void @llvm.lifetime.end.p0(ptr %v1) #4
+ call void @llvm.lifetime.end.p0(ptr %v0) #4
+ call void @llvm.lifetime.end.p0(ptr %callValue) #4
+ call void @llvm.lifetime.end.p0(ptr %rngVal) #4
+ call void @llvm.lifetime.end.p0(ptr %block) #4
+ call void @llvm.lifetime.end.p0(ptr %i) #4
ret double %add11
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @resample(i32 noundef, ptr noundef)
declare double @llvm.exp2.f64(double)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
index d55559d..258ef63 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
@@ -525,9 +525,9 @@ entry:
store ptr %dct, ptr %dct.addr, align 8
store ptr %mf, ptr %mf.addr, align 8
store ptr %bias, ptr %bias.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %nz) #2
+ call void @llvm.lifetime.start.p0(ptr %nz) #2
store i32 0, ptr %nz, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4
br label %for.cond
@@ -537,7 +537,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -636,13 +636,13 @@ for.end: ; preds = %for.cond.cleanup
%lnot = xor i1 %tobool, true
%lnot34 = xor i1 %lnot, true
%lnot.ext = zext i1 %lnot34 to i32
- call void @llvm.lifetime.end.p0(i64 4, ptr %nz) #2
+ call void @llvm.lifetime.end.p0(ptr %nz) #2
ret i32 %lnot.ext
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_muladd.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_muladd.ll
new file mode 100644
index 0000000..ca78ff7
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_muladd.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -S -O3 < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64"
+
+; This function (a 16x reduction of a[i] * b[i]) should be vectorized successfully.
+
+define dso_local nofpclass(nan inf) float @vmlaq(ptr noundef %0, ptr noundef %1) #0 {
+; CHECK-LABEL: define dso_local nofpclass(nan inf) float @vmlaq
+; CHECK-SAME: (ptr noundef readonly captures(none) [[TMP0:%.*]], ptr noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x float>, ptr [[TMP0]], align 4, !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x float>, ptr [[TMP1]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <16 x float> [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> [[TMP5]])
+; CHECK-NEXT: ret float [[TMP6]]
+;
+ %3 = alloca ptr, align 8
+ %4 = alloca ptr, align 8
+ %5 = alloca float, align 4
+ %6 = alloca i32, align 4
+ store ptr %0, ptr %3, align 8, !tbaa !4
+ store ptr %1, ptr %4, align 8, !tbaa !4
+ call void @llvm.lifetime.start.p0(ptr %5) #2
+ store float 0.000000e+00, ptr %5, align 4, !tbaa !9
+ call void @llvm.lifetime.start.p0(ptr %6) #2
+ store i32 0, ptr %6, align 4, !tbaa !11
+ br label %7
+
+7: ; preds = %25, %2
+ %8 = load i32, ptr %6, align 4, !tbaa !11
+ %9 = icmp slt i32 %8, 16
+ br i1 %9, label %11, label %10
+
+10: ; preds = %7
+ call void @llvm.lifetime.end.p0(ptr %6) #2
+ br label %28
+
+11: ; preds = %7
+ %12 = load ptr, ptr %3, align 8, !tbaa !4
+ %13 = load i32, ptr %6, align 4, !tbaa !11
+ %14 = sext i32 %13 to i64
+ %15 = getelementptr inbounds float, ptr %12, i64 %14
+ %16 = load float, ptr %15, align 4, !tbaa !9
+ %17 = load ptr, ptr %4, align 8, !tbaa !4
+ %18 = load i32, ptr %6, align 4, !tbaa !11
+ %19 = sext i32 %18 to i64
+ %20 = getelementptr inbounds float, ptr %17, i64 %19
+ %21 = load float, ptr %20, align 4, !tbaa !9
+ %22 = fmul fast float %16, %21
+ %23 = load float, ptr %5, align 4, !tbaa !9
+ %24 = fadd fast float %23, %22
+ store float %24, ptr %5, align 4, !tbaa !9
+ br label %25
+
+25: ; preds = %11
+ %26 = load i32, ptr %6, align 4, !tbaa !11
+ %27 = add nsw i32 %26, 1
+ store i32 %27, ptr %6, align 4, !tbaa !11
+ br label %7, !llvm.loop !13
+
+28: ; preds = %10
+ %29 = load float, ptr %5, align 4, !tbaa !9
+ call void @llvm.lifetime.end.p0(ptr %5) #2
+ ret float %29
+}
+
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #1
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #1
+
+attributes #0 = { nounwind uwtable "approx-func-fp-math"="true" "frame-pointer"="non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" "unsafe-fp-math"="true" }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
+attributes #2 = { nounwind }
+
+!llvm.module.flags = !{!0, !1, !2}
+!llvm.ident = !{!3}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"uwtable", i32 2}
+!2 = !{i32 7, !"frame-pointer", i32 1}
+!3 = !{!"clang version 22.0.0git"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"p1 float", !6, i64 0}
+!6 = !{!"any pointer", !7, i64 0}
+!7 = !{!"omnipotent char", !8, i64 0}
+!8 = !{!"Simple C/C++ TBAA"}
+!9 = !{!10, !10, i64 0}
+!10 = !{!"float", !7, i64 0}
+!11 = !{!12, !12, i64 0}
+!12 = !{!"int", !7, i64 0}
+!13 = distinct !{!13, !14}
+!14 = !{!"llvm.loop.mustprogress"}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll
new file mode 100644
index 0000000..fb958b8
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/reduce_submuladd.ll
@@ -0,0 +1,274 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -S -O3 < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64"
+
+; This function (a more complex reduction of (a[i] - b[i]) * itself) should be vectorized successfully.
+
+define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii(ptr noundef %0, ptr noundef %1, i32 noundef %2, i32 noundef %3) #0 {
+; CHECK-LABEL: define dso_local noundef nofpclass(nan inf) float @_Z4testPKfS0_ii
+; CHECK-SAME: (ptr noundef readonly captures(none) [[TMP0:%.*]], ptr noundef readonly captures(none) [[TMP1:%.*]], i32 noundef [[TMP2:%.*]], i32 noundef [[TMP3:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: .preheader.i:
+; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP2]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = load <20 x float>, ptr [[TMP0]], align 4, !tbaa [[TBAA4:![0-9]+]]
+; CHECK-NEXT: [[TMP7:%.*]] = load <20 x float>, ptr [[TMP1]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP8:%.*]] = fsub fast <20 x float> [[TMP6]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <20 x float> [[TMP8]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 80
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[TMP10]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 80
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[TMP12]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP14:%.*]] = fsub fast float [[TMP11]], [[TMP13]]
+; CHECK-NEXT: [[TMP15:%.*]] = fmul fast float [[TMP14]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 [[TMP4]]
+; CHECK-NEXT: [[OP_RDX:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP15]], <20 x float> [[TMP9]])
+; CHECK-NEXT: [[TMP18:%.*]] = load <20 x float>, ptr [[TMP16]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP19:%.*]] = load <20 x float>, ptr [[TMP17]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP20:%.*]] = fsub fast <20 x float> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = fmul fast <20 x float> [[TMP20]], [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP16]], i64 80
+; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[TMP22]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP17]], i64 80
+; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[TMP24]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP26:%.*]] = fsub fast float [[TMP23]], [[TMP25]]
+; CHECK-NEXT: [[TMP27:%.*]] = fmul fast float [[TMP26]], [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[TMP4]]
+; CHECK-NEXT: [[OP_RDX_1:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP27]], <20 x float> [[TMP21]])
+; CHECK-NEXT: [[OP_RDX3_1:%.*]] = fadd fast float [[OP_RDX_1]], [[OP_RDX]]
+; CHECK-NEXT: [[TMP30:%.*]] = load <20 x float>, ptr [[TMP28]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP31:%.*]] = load <20 x float>, ptr [[TMP29]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP32:%.*]] = fsub fast <20 x float> [[TMP30]], [[TMP31]]
+; CHECK-NEXT: [[TMP33:%.*]] = fmul fast <20 x float> [[TMP32]], [[TMP32]]
+; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP28]], i64 80
+; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[TMP34]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP29]], i64 80
+; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[TMP36]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP38:%.*]] = fsub fast float [[TMP35]], [[TMP37]]
+; CHECK-NEXT: [[TMP39:%.*]] = fmul fast float [[TMP38]], [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds float, ptr [[TMP29]], i64 [[TMP4]]
+; CHECK-NEXT: [[OP_RDX_2:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP39]], <20 x float> [[TMP33]])
+; CHECK-NEXT: [[OP_RDX3_2:%.*]] = fadd fast float [[OP_RDX_2]], [[OP_RDX3_1]]
+; CHECK-NEXT: [[TMP42:%.*]] = load <20 x float>, ptr [[TMP40]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP43:%.*]] = load <20 x float>, ptr [[TMP41]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP44:%.*]] = fsub fast <20 x float> [[TMP42]], [[TMP43]]
+; CHECK-NEXT: [[TMP45:%.*]] = fmul fast <20 x float> [[TMP44]], [[TMP44]]
+; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP40]], i64 80
+; CHECK-NEXT: [[TMP47:%.*]] = load float, ptr [[TMP46]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP41]], i64 80
+; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[TMP48]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP50:%.*]] = fsub fast float [[TMP47]], [[TMP49]]
+; CHECK-NEXT: [[TMP51:%.*]] = fmul fast float [[TMP50]], [[TMP50]]
+; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds float, ptr [[TMP40]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[TMP41]], i64 [[TMP4]]
+; CHECK-NEXT: [[OP_RDX_3:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP51]], <20 x float> [[TMP45]])
+; CHECK-NEXT: [[OP_RDX3_3:%.*]] = fadd fast float [[OP_RDX_3]], [[OP_RDX3_2]]
+; CHECK-NEXT: [[TMP54:%.*]] = load <20 x float>, ptr [[TMP52]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP55:%.*]] = load <20 x float>, ptr [[TMP53]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP56:%.*]] = fsub fast <20 x float> [[TMP54]], [[TMP55]]
+; CHECK-NEXT: [[TMP57:%.*]] = fmul fast <20 x float> [[TMP56]], [[TMP56]]
+; CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP52]], i64 80
+; CHECK-NEXT: [[TMP59:%.*]] = load float, ptr [[TMP58]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP53]], i64 80
+; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[TMP60]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP62:%.*]] = fsub fast float [[TMP59]], [[TMP61]]
+; CHECK-NEXT: [[TMP63:%.*]] = fmul fast float [[TMP62]], [[TMP62]]
+; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP52]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP53]], i64 [[TMP4]]
+; CHECK-NEXT: [[OP_RDX_4:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP63]], <20 x float> [[TMP57]])
+; CHECK-NEXT: [[OP_RDX3_4:%.*]] = fadd fast float [[OP_RDX_4]], [[OP_RDX3_3]]
+; CHECK-NEXT: [[TMP66:%.*]] = load <20 x float>, ptr [[TMP64]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP67:%.*]] = load <20 x float>, ptr [[TMP65]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP68:%.*]] = fsub fast <20 x float> [[TMP66]], [[TMP67]]
+; CHECK-NEXT: [[TMP69:%.*]] = fmul fast <20 x float> [[TMP68]], [[TMP68]]
+; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP64]], i64 80
+; CHECK-NEXT: [[TMP71:%.*]] = load float, ptr [[TMP70]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP65]], i64 80
+; CHECK-NEXT: [[TMP73:%.*]] = load float, ptr [[TMP72]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP74:%.*]] = fsub fast float [[TMP71]], [[TMP73]]
+; CHECK-NEXT: [[TMP75:%.*]] = fmul fast float [[TMP74]], [[TMP74]]
+; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds float, ptr [[TMP65]], i64 [[TMP4]]
+; CHECK-NEXT: [[OP_RDX_5:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP75]], <20 x float> [[TMP69]])
+; CHECK-NEXT: [[OP_RDX3_5:%.*]] = fadd fast float [[OP_RDX_5]], [[OP_RDX3_4]]
+; CHECK-NEXT: [[TMP78:%.*]] = load <20 x float>, ptr [[TMP76]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP79:%.*]] = load <20 x float>, ptr [[TMP77]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP80:%.*]] = fsub fast <20 x float> [[TMP78]], [[TMP79]]
+; CHECK-NEXT: [[TMP81:%.*]] = fmul fast <20 x float> [[TMP80]], [[TMP80]]
+; CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP76]], i64 80
+; CHECK-NEXT: [[TMP83:%.*]] = load float, ptr [[TMP82]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP77]], i64 80
+; CHECK-NEXT: [[TMP85:%.*]] = load float, ptr [[TMP84]], align 4, !tbaa [[TBAA4]]
+; CHECK-NEXT: [[TMP86:%.*]] = fsub fast float [[TMP83]], [[TMP85]]
+; CHECK-NEXT: [[TMP87:%.*]] = fmul fast float [[TMP86]], [[TMP86]]
+; CHECK-NEXT: [[OP_RDX_6:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v20f32(float [[TMP87]], <20 x float> [[TMP81]])
+; CHECK-NEXT: [[OP_RDX3_6:%.*]] = fadd fast float [[OP_RDX_6]], [[OP_RDX3_5]]
+; CHECK-NEXT: ret float [[OP_RDX3_6]]
+;
+ %5 = alloca ptr, align 8
+ %6 = alloca ptr, align 8
+ %7 = alloca i32, align 4
+ %8 = alloca i32, align 4
+ store ptr %0, ptr %5, align 8, !tbaa !4
+ store ptr %1, ptr %6, align 8, !tbaa !4
+ store i32 %2, ptr %7, align 4, !tbaa !9
+ store i32 %3, ptr %8, align 4, !tbaa !9
+ %9 = load ptr, ptr %5, align 8, !tbaa !4
+ %10 = load ptr, ptr %6, align 8, !tbaa !4
+ %11 = load i32, ptr %7, align 4, !tbaa !9
+ %12 = load i32, ptr %8, align 4, !tbaa !9
+ %13 = call fast noundef nofpclass(nan inf) float @_ZL6reduceILi7EEfPKfS1_ii(ptr noundef %9, ptr noundef %10, i32 noundef %11, i32 noundef %12)
+ ret float %13
+}
+
+define internal noundef nofpclass(nan inf) float @_ZL6reduceILi7EEfPKfS1_ii(ptr noundef %0, ptr noundef %1, i32 noundef %2, i32 noundef %3) #1 {
+ %5 = alloca ptr, align 8
+ %6 = alloca ptr, align 8
+ %7 = alloca i32, align 4
+ %8 = alloca i32, align 4
+ %9 = alloca i32, align 4
+ %10 = alloca i32, align 4
+ %11 = alloca i32, align 4
+ %12 = alloca float, align 4
+ %13 = alloca i32, align 4
+ %14 = alloca i32, align 4
+ %15 = alloca float, align 4
+ %16 = alloca i32, align 4
+ %17 = alloca float, align 4
+ store ptr %0, ptr %5, align 8, !tbaa !4
+ store ptr %1, ptr %6, align 8, !tbaa !4
+ store i32 %2, ptr %7, align 4, !tbaa !9
+ store i32 %3, ptr %8, align 4, !tbaa !9
+ call void @llvm.lifetime.start.p0(ptr %9) #3
+ store i32 3, ptr %9, align 4, !tbaa !9
+ call void @llvm.lifetime.start.p0(ptr %10) #3
+ store i32 3, ptr %10, align 4, !tbaa !9
+ call void @llvm.lifetime.start.p0(ptr %11) #3
+ store i32 7, ptr %11, align 4, !tbaa !9
+ call void @llvm.lifetime.start.p0(ptr %12) #3
+ store float 0.000000e+00, ptr %12, align 4, !tbaa !11
+ call void @llvm.lifetime.start.p0(ptr %13) #3
+ store i32 0, ptr %13, align 4, !tbaa !9
+ br label %18
+
+18: ; preds = %59, %4
+ %19 = load i32, ptr %13, align 4, !tbaa !9
+ %20 = icmp slt i32 %19, 7
+ br i1 %20, label %22, label %21
+
+21: ; preds = %18
+ store i32 2, ptr %14, align 4
+ call void @llvm.lifetime.end.p0(ptr %13) #3
+ br label %62
+
+22: ; preds = %18
+ call void @llvm.lifetime.start.p0(ptr %15) #3
+ store float 0.000000e+00, ptr %15, align 4, !tbaa !11
+ call void @llvm.lifetime.start.p0(ptr %16) #3
+ store i32 0, ptr %16, align 4, !tbaa !9
+ br label %23
+
+23: ; preds = %44, %22
+ %24 = load i32, ptr %16, align 4, !tbaa !9
+ %25 = icmp slt i32 %24, 21
+ br i1 %25, label %27, label %26
+
+26: ; preds = %23
+ store i32 5, ptr %14, align 4
+ call void @llvm.lifetime.end.p0(ptr %16) #3
+ br label %47
+
+27: ; preds = %23
+ call void @llvm.lifetime.start.p0(ptr %17) #3
+ %28 = load ptr, ptr %5, align 8, !tbaa !4
+ %29 = load i32, ptr %16, align 4, !tbaa !9
+ %30 = sext i32 %29 to i64
+ %31 = getelementptr inbounds float, ptr %28, i64 %30
+ %32 = load float, ptr %31, align 4, !tbaa !11
+ %33 = load ptr, ptr %6, align 8, !tbaa !4
+ %34 = load i32, ptr %16, align 4, !tbaa !9
+ %35 = sext i32 %34 to i64
+ %36 = getelementptr inbounds float, ptr %33, i64 %35
+ %37 = load float, ptr %36, align 4, !tbaa !11
+ %38 = fsub fast float %32, %37
+ store float %38, ptr %17, align 4, !tbaa !11
+ %39 = load float, ptr %17, align 4, !tbaa !11
+ %40 = load float, ptr %17, align 4, !tbaa !11
+ %41 = fmul fast float %39, %40
+ %42 = load float, ptr %15, align 4, !tbaa !11
+ %43 = fadd fast float %42, %41
+ store float %43, ptr %15, align 4, !tbaa !11
+ call void @llvm.lifetime.end.p0(ptr %17) #3
+ br label %44
+
+44: ; preds = %27
+ %45 = load i32, ptr %16, align 4, !tbaa !9
+ %46 = add nsw i32 %45, 1
+ store i32 %46, ptr %16, align 4, !tbaa !9
+ br label %23, !llvm.loop !13
+
+47: ; preds = %26
+ %48 = load i32, ptr %7, align 4, !tbaa !9
+ %49 = load ptr, ptr %5, align 8, !tbaa !4
+ %50 = sext i32 %48 to i64
+ %51 = getelementptr inbounds float, ptr %49, i64 %50
+ store ptr %51, ptr %5, align 8, !tbaa !4
+ %52 = load i32, ptr %8, align 4, !tbaa !9
+ %53 = load ptr, ptr %6, align 8, !tbaa !4
+ %54 = sext i32 %52 to i64
+ %55 = getelementptr inbounds float, ptr %53, i64 %54
+ store ptr %55, ptr %6, align 8, !tbaa !4
+ %56 = load float, ptr %15, align 4, !tbaa !11
+ %57 = load float, ptr %12, align 4, !tbaa !11
+ %58 = fadd fast float %57, %56
+ store float %58, ptr %12, align 4, !tbaa !11
+ call void @llvm.lifetime.end.p0(ptr %15) #3
+ br label %59
+
+59: ; preds = %47
+ %60 = load i32, ptr %13, align 4, !tbaa !9
+ %61 = add nsw i32 %60, 1
+ store i32 %61, ptr %13, align 4, !tbaa !9
+ br label %18, !llvm.loop !15
+
+62: ; preds = %21
+ %63 = load float, ptr %12, align 4, !tbaa !11
+ store i32 1, ptr %14, align 4
+ call void @llvm.lifetime.end.p0(ptr %12) #3
+ call void @llvm.lifetime.end.p0(ptr %11) #3
+ call void @llvm.lifetime.end.p0(ptr %10) #3
+ call void @llvm.lifetime.end.p0(ptr %9) #3
+ ret float %63
+}
+
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #2
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #2
+
+attributes #0 = { mustprogress uwtable "approx-func-fp-math"="true" "frame-pointer"="non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" "unsafe-fp-math"="true" }
+attributes #1 = { inlinehint mustprogress nounwind uwtable "approx-func-fp-math"="true" "frame-pointer"="non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" "unsafe-fp-math"="true" }
+attributes #2 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
+attributes #3 = { nounwind }
+
+!llvm.module.flags = !{!0, !1, !2}
+!llvm.ident = !{!3}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"uwtable", i32 2}
+!2 = !{i32 7, !"frame-pointer", i32 1}
+!3 = !{!"clang version 22.0.0git"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"p1 float", !6, i64 0}
+!6 = !{!"any pointer", !7, i64 0}
+!7 = !{!"omnipotent char", !8, i64 0}
+!8 = !{!"Simple C++ TBAA"}
+!9 = !{!10, !10, i64 0}
+!10 = !{!"int", !7, i64 0}
+!11 = !{!12, !12, i64 0}
+!12 = !{!"float", !7, i64 0}
+!13 = distinct !{!13, !14}
+!14 = !{!"llvm.loop.mustprogress"}
+!15 = distinct !{!15, !14}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll
index a201983..d19242f 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll
@@ -136,14 +136,14 @@ entry:
store i32 %ip1, ptr %ip1.addr, align 4, !tbaa !8
store ptr %p2, ptr %p2.addr, align 8, !tbaa !4
store i32 %ip2, ptr %ip2.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 64, ptr %emp) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r0) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r1) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r2) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r3) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #2
+ call void @llvm.lifetime.start.p0(ptr %emp) #2
+ call void @llvm.lifetime.start.p0(ptr %r0) #2
+ call void @llvm.lifetime.start.p0(ptr %r1) #2
+ call void @llvm.lifetime.start.p0(ptr %r2) #2
+ call void @llvm.lifetime.start.p0(ptr %r3) #2
+ call void @llvm.lifetime.start.p0(ptr %sum) #2
store i32 0, ptr %sum, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !8
br label %for.cond
@@ -153,7 +153,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -241,22 +241,22 @@ for.body: ; preds = %for.cond
%shl42 = shl i32 %sub41, 16
%rdd43 = add nsw i32 %sub36, %shl42
store i32 %rdd43, ptr %r3, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e0) #2
+ call void @llvm.lifetime.start.p0(ptr %e0) #2
%33 = load i32, ptr %r0, align 4, !tbaa !8
%34 = load i32, ptr %r1, align 4, !tbaa !8
%rdd44 = add i32 %33, %34
store i32 %rdd44, ptr %e0, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e1) #2
+ call void @llvm.lifetime.start.p0(ptr %e1) #2
%35 = load i32, ptr %r0, align 4, !tbaa !8
%36 = load i32, ptr %r1, align 4, !tbaa !8
%sub45 = sub i32 %35, %36
store i32 %sub45, ptr %e1, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e2) #2
+ call void @llvm.lifetime.start.p0(ptr %e2) #2
%37 = load i32, ptr %r2, align 4, !tbaa !8
%38 = load i32, ptr %r3, align 4, !tbaa !8
%rdd46 = add i32 %37, %38
store i32 %rdd46, ptr %e2, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e3) #2
+ call void @llvm.lifetime.start.p0(ptr %e3) #2
%39 = load i32, ptr %r2, align 4, !tbaa !8
%40 = load i32, ptr %r3, align 4, !tbaa !8
%sub47 = sub i32 %39, %40
@@ -293,10 +293,10 @@ for.body: ; preds = %for.cond
%rrrayidx61 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 %idxprom60
%rrrayidx62 = getelementptr inbounds [4 x i32], ptr %rrrayidx61, i64 0, i64 3
store i32 %sub59, ptr %rrrayidx62, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 4, ptr %e3) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e2) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e1) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e0) #2
+ call void @llvm.lifetime.end.p0(ptr %e3) #2
+ call void @llvm.lifetime.end.p0(ptr %e2) #2
+ call void @llvm.lifetime.end.p0(ptr %e1) #2
+ call void @llvm.lifetime.end.p0(ptr %e0) #2
br label %for.inc
for.inc: ; preds = %for.body
@@ -316,7 +316,7 @@ for.inc: ; preds = %for.body
br label %for.cond, !llvm.loop !11
for.end: ; preds = %for.cond.cleanup
- call void @llvm.lifetime.start.p0(i64 4, ptr %i65) #2
+ call void @llvm.lifetime.start.p0(ptr %i65) #2
store i32 0, ptr %i65, align 4, !tbaa !8
br label %for.cond66
@@ -326,11 +326,11 @@ for.cond66: ; preds = %for.inc114, %for.en
br i1 %cmp67, label %for.body70, label %for.cond.cleanup69
for.cond.cleanup69: ; preds = %for.cond66
- call void @llvm.lifetime.end.p0(i64 4, ptr %i65) #2
+ call void @llvm.lifetime.end.p0(ptr %i65) #2
br label %for.end116
for.body70: ; preds = %for.cond66
- call void @llvm.lifetime.start.p0(i64 4, ptr %e071) #2
+ call void @llvm.lifetime.start.p0(ptr %e071) #2
%rrrayidx72 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 0
%59 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom73 = sext i32 %59 to i64
@@ -343,7 +343,7 @@ for.body70: ; preds = %for.cond66
%62 = load i32, ptr %rrrayidx77, align 4, !tbaa !8
%rdd78 = add i32 %60, %62
store i32 %rdd78, ptr %e071, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e179) #2
+ call void @llvm.lifetime.start.p0(ptr %e179) #2
%rrrayidx80 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 0
%63 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom81 = sext i32 %63 to i64
@@ -356,7 +356,7 @@ for.body70: ; preds = %for.cond66
%66 = load i32, ptr %rrrayidx85, align 4, !tbaa !8
%sub86 = sub i32 %64, %66
store i32 %sub86, ptr %e179, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e287) #2
+ call void @llvm.lifetime.start.p0(ptr %e287) #2
%rrrayidx88 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 2
%67 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom89 = sext i32 %67 to i64
@@ -369,7 +369,7 @@ for.body70: ; preds = %for.cond66
%70 = load i32, ptr %rrrayidx93, align 4, !tbaa !8
%rdd94 = add i32 %68, %70
store i32 %rdd94, ptr %e287, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e395) #2
+ call void @llvm.lifetime.start.p0(ptr %e395) #2
%rrrayidx96 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 2
%71 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom97 = sext i32 %71 to i64
@@ -398,10 +398,10 @@ for.body70: ; preds = %for.cond66
%82 = load i32, ptr %e395, align 4, !tbaa !8
%sub106 = sub nsw i32 %81, %82
store i32 %sub106, ptr %r3, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 4, ptr %e395) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e287) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e179) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e071) #2
+ call void @llvm.lifetime.end.p0(ptr %e395) #2
+ call void @llvm.lifetime.end.p0(ptr %e287) #2
+ call void @llvm.lifetime.end.p0(ptr %e179) #2
+ call void @llvm.lifetime.end.p0(ptr %e071) #2
%83 = load i32, ptr %r0, align 4, !tbaa !8
%call = call i32 @twoabs(i32 noundef %83)
%84 = load i32, ptr %r1, align 4, !tbaa !8
@@ -432,20 +432,20 @@ for.end116: ; preds = %for.cond.cleanup69
%shr = lshr i32 %90, 16
%rdd119 = add i32 %conv118, %shr
%shr120 = lshr i32 %rdd119, 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %sum) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r3) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r2) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r1) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r0) #2
- call void @llvm.lifetime.end.p0(i64 64, ptr %emp) #2
+ call void @llvm.lifetime.end.p0(ptr %sum) #2
+ call void @llvm.lifetime.end.p0(ptr %r3) #2
+ call void @llvm.lifetime.end.p0(ptr %r2) #2
+ call void @llvm.lifetime.end.p0(ptr %r1) #2
+ call void @llvm.lifetime.end.p0(ptr %r0) #2
+ call void @llvm.lifetime.end.p0(ptr %emp) #2
ret i32 %shr120
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define internal i32 @twoabs(i32 noundef %r) #0 {
@@ -453,7 +453,7 @@ entry:
%r.addr = alloca i32, align 4
%s = alloca i32, align 4
store i32 %r, ptr %r.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %s) #2
+ call void @llvm.lifetime.start.p0(ptr %s) #2
%0 = load i32, ptr %r.addr, align 4, !tbaa !8
%shr = lshr i32 %0, 15
%rnd = and i32 %shr, 65537
@@ -464,7 +464,7 @@ entry:
%rdd = add i32 %1, %2
%3 = load i32, ptr %s, align 4, !tbaa !8
%xor = xor i32 %rdd, %3
- call void @llvm.lifetime.end.p0(i64 4, ptr %s) #2
+ call void @llvm.lifetime.end.p0(ptr %s) #2
ret i32 %xor
}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll
index f40afbd..ff085fc 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/sve-interleave-vectorization.ll
@@ -24,8 +24,6 @@ define void @interleave_deinterleave(ptr noalias %dst, ptr %a, ptr %b) {
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -47,7 +45,7 @@ define void @interleave_deinterleave(ptr noalias %dst, ptr %a, ptr %b) {
; CHECK-NEXT: [[TMP23:%.*]] = shl <vscale x 4 x i32> [[TMP11]], [[TMP18]]
; CHECK-NEXT: [[TMP24:%.*]] = ashr <vscale x 4 x i32> [[TMP12]], [[TMP19]]
; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x i32> [[TMP22]], <vscale x 4 x i32> [[TMP23]], <vscale x 4 x i32> [[TMP24]], <vscale x 4 x i1> splat (i1 true), ptr [[TMP21]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll
index 3496520..0967736 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll
@@ -379,9 +379,9 @@ entry:
store i32 %s_p1, ptr %s_p1.addr, align 4, !tbaa !9
store ptr %p2, ptr %p2.addr, align 8, !tbaa !4
store i32 %s_p2, ptr %s_p2.addr, align 4, !tbaa !9
- call void @llvm.lifetime.start.p0(i64 4, ptr %i_sum) #3
+ call void @llvm.lifetime.start.p0(ptr %i_sum) #3
store i32 0, ptr %i_sum, align 4, !tbaa !9
- call void @llvm.lifetime.start.p0(i64 4, ptr %y) #3
+ call void @llvm.lifetime.start.p0(ptr %y) #3
store i32 0, ptr %y, align 4, !tbaa !9
br label %for.cond
@@ -392,11 +392,11 @@ for.cond: ; preds = %for.inc10, %entry
for.cond.cleanup: ; preds = %for.cond
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %y) #3
+ call void @llvm.lifetime.end.p0(ptr %y) #3
br label %for.end12
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+ call void @llvm.lifetime.start.p0(ptr %x) #3
store i32 0, ptr %x, align 4, !tbaa !9
br label %for.cond1
@@ -407,7 +407,7 @@ for.cond1: ; preds = %for.inc, %for.body
for.cond.cleanup3: ; preds = %for.cond1
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
+ call void @llvm.lifetime.end.p0(ptr %x) #3
br label %for.end
for.body4: ; preds = %for.cond1
@@ -458,18 +458,18 @@ for.inc10: ; preds = %for.end
for.end12: ; preds = %for.cond.cleanup
%16 = load i32, ptr %i_sum, align 4, !tbaa !9
store i32 1, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %i_sum) #3
+ call void @llvm.lifetime.end.p0(ptr %i_sum) #3
ret i32 %16
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr) #1
+declare void @llvm.lifetime.start.p0(ptr) #1
; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
declare i32 @llvm.abs.i32(i32, i1 immarg) #2
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr) #1
+declare void @llvm.lifetime.end.p0(ptr) #1
attributes #0 = { nounwind uwtable vscale_range(1,16) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+bf16,+bti,+ccidx,+complxnum,+crc,+dit,+dotprod,+ete,+flagm,+fp-armv8,+fp16fml,+fullfp16,+i8mm,+jsconv,+lse,+mte,+neon,+pauth,+perfmon,+predres,+rand,+ras,+rcpc,+rdm,+sb,+spe,+ssbs,+sve,+sve-bitperm,+sve2,+trbe,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+v9a,-fmv" }
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
index 76d9d14..0023dea 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
@@ -43,7 +43,7 @@ entry:
store ptr %pSrcB, ptr %pSrcB.addr, align 4
store ptr %pDst, ptr %pDst.addr, align 4
store i32 %blockSize, ptr %blockSize.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt)
+ call void @llvm.lifetime.start.p0(ptr %blkCnt)
%0 = load i32, ptr %blockSize.addr, align 4
store i32 %0, ptr %blkCnt, align 4
br label %while.cond
@@ -78,7 +78,7 @@ while.body: ; preds = %while.cond
br label %while.cond
while.end: ; preds = %while.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt)
+ call void @llvm.lifetime.end.p0(ptr %blkCnt)
ret void
}
@@ -102,13 +102,13 @@ land.lhs.true: ; preds = %entry
br i1 %cmp1, label %if.then, label %if.end10
if.then: ; preds = %land.lhs.true
- call void @llvm.lifetime.start.p0(i64 4, ptr %max)
+ call void @llvm.lifetime.start.p0(ptr %max)
%2 = load i32, ptr %sat.addr, align 4
%sub = sub i32 %2, 1
%shl = shl i32 1, %sub
%sub2 = sub i32 %shl, 1
store i32 %sub2, ptr %max, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %min)
+ call void @llvm.lifetime.start.p0(ptr %min)
%3 = load i32, ptr %max, align 4
%sub3 = sub nsw i32 -1, %3
store i32 %sub3, ptr %min, align 4
@@ -143,8 +143,8 @@ if.end8: ; preds = %if.end
br label %cleanup
cleanup: ; preds = %if.end8, %if.then7, %if.then5
- call void @llvm.lifetime.end.p0(i64 4, ptr %min)
- call void @llvm.lifetime.end.p0(i64 4, ptr %max)
+ call void @llvm.lifetime.end.p0(ptr %min)
+ call void @llvm.lifetime.end.p0(ptr %max)
%cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4
switch i32 %cleanup.dest, label %unreachable [
i32 0, label %cleanup.cont
@@ -167,8 +167,8 @@ unreachable: ; preds = %cleanup
unreachable
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
attributes #1 = { alwaysinline nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
index 2ab6f2b..436f848 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
@@ -59,8 +59,8 @@ entry:
store i8 %value, ptr %value.addr, align 1, !tbaa !3
store ptr %pDst, ptr %pDst.addr, align 4, !tbaa !6
store i32 %blockSize, ptr %blockSize.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %packedValue) #3
+ call void @llvm.lifetime.start.p0(ptr %blkCnt) #3
+ call void @llvm.lifetime.start.p0(ptr %packedValue) #3
%0 = load i8, ptr %value.addr, align 1, !tbaa !3
%conv = sext i8 %0 to i32
%shl = shl i32 %conv, 0
@@ -122,13 +122,13 @@ while.body16: ; preds = %while.cond13
br label %while.cond13, !llvm.loop !12
while.end18: ; preds = %while.cond13
- call void @llvm.lifetime.end.p0(i64 4, ptr %packedValue) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.end.p0(ptr %packedValue) #3
+ call void @llvm.lifetime.end.p0(ptr %blkCnt) #3
ret void
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: alwaysinline nounwind
define internal void @write_q7x4_ia(ptr %pQ7, i32 %value) #2 {
@@ -138,7 +138,7 @@ entry:
%val = alloca i32, align 4
store ptr %pQ7, ptr %pQ7.addr, align 4, !tbaa !6
store i32 %value, ptr %value.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %val) #3
+ call void @llvm.lifetime.start.p0(ptr %val) #3
%0 = load i32, ptr %value.addr, align 4, !tbaa !8
store i32 %0, ptr %val, align 4, !tbaa !8
%1 = load i32, ptr %val, align 4, !tbaa !8
@@ -175,12 +175,12 @@ entry:
%14 = load ptr, ptr %13, align 4, !tbaa !6
%add.ptr = getelementptr inbounds i8, ptr %14, i32 4
store ptr %add.ptr, ptr %13, align 4, !tbaa !6
- call void @llvm.lifetime.end.p0(i64 4, ptr %val) #3
+ call void @llvm.lifetime.end.p0(ptr %val) #3
ret void
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m0plus" "target-features"="+armv6-m,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-dotprod,-dsp,-fp16fml,-fullfp16,-hwdiv,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-ras,-sb,-sha2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
index b932a69..6862d8b 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
@@ -60,9 +60,9 @@ entry:
store ptr %pSrc, ptr %pSrc.addr, align 4
store i32 %blockSize, ptr %blockSize.addr, align 4
store ptr %pResult, ptr %pResult.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3
- call void @llvm.lifetime.start.p0(i64 16, ptr %vecSrc) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #3
+ call void @llvm.lifetime.start.p0(ptr %blkCnt) #3
+ call void @llvm.lifetime.start.p0(ptr %vecSrc) #3
+ call void @llvm.lifetime.start.p0(ptr %sum) #3
store i32 0, ptr %sum, align 4
%0 = load i32, ptr %blockSize.addr, align 4
%shr = lshr i32 %0, 4
@@ -123,15 +123,15 @@ while.end5: ; preds = %while.cond1
%conv6 = trunc i32 %div to i8
%18 = load ptr, ptr %pResult.addr, align 4
store i8 %conv6, ptr %18, align 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %sum) #3
- call void @llvm.lifetime.end.p0(i64 16, ptr %vecSrc) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.end.p0(ptr %sum) #3
+ call void @llvm.lifetime.end.p0(ptr %vecSrc) #3
+ call void @llvm.lifetime.end.p0(ptr %blkCnt) #3
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @llvm.arm.mve.addv.v16i8(<16 x i8>, i32) #2
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "approx-func-fp-math"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-pacbti,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
attributes #1 = { argmemonly nocallback nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
index 9d613b8..42fdafb 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
@@ -88,7 +88,7 @@ entry:
store ptr %pSrcB, ptr %pSrcB.addr, align 4
store ptr %pDst, ptr %pDst.addr, align 4
store i32 %blockSize, ptr %blockSize.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.start.p0(ptr %blkCnt) #3
%0 = load i32, ptr %blockSize.addr, align 4
store i32 %0, ptr %blkCnt, align 4
br label %while.cond
@@ -123,11 +123,11 @@ while.body: ; preds = %while.cond
br label %while.cond
while.end: ; preds = %while.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.end.p0(ptr %blkCnt) #3
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
define internal i32 @__SSAT(i32 %val, i32 %sat) #2 {
entry:
@@ -149,13 +149,13 @@ land.lhs.true: ; preds = %entry
br i1 %cmp1, label %if.then, label %if.end10
if.then: ; preds = %land.lhs.true
- call void @llvm.lifetime.start.p0(i64 4, ptr %max) #3
+ call void @llvm.lifetime.start.p0(ptr %max) #3
%2 = load i32, ptr %sat.addr, align 4
%sub = sub i32 %2, 1
%shl = shl i32 1, %sub
%sub2 = sub i32 %shl, 1
store i32 %sub2, ptr %max, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %min) #3
+ call void @llvm.lifetime.start.p0(ptr %min) #3
%3 = load i32, ptr %max, align 4
%sub3 = sub nsw i32 -1, %3
store i32 %sub3, ptr %min, align 4
@@ -190,8 +190,8 @@ if.end8: ; preds = %if.end
br label %cleanup
cleanup: ; preds = %if.end8, %if.then7, %if.then5
- call void @llvm.lifetime.end.p0(i64 4, ptr %min) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %max) #3
+ call void @llvm.lifetime.end.p0(ptr %min) #3
+ call void @llvm.lifetime.end.p0(ptr %max) #3
%cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4
switch i32 %cleanup.dest, label %unreachable [
i32 0, label %cleanup.cont
@@ -214,7 +214,7 @@ unreachable: ; preds = %cleanup
unreachable
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll
index 5178e9f..7fe3f33 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll
@@ -60,9 +60,9 @@ bb:
%i5 = alloca ptr, align 8
store i32 %arg, ptr %i, align 4, !tbaa !5
store ptr %arg1, ptr %i2, align 8, !tbaa !9
- call void @llvm.lifetime.start.p0(i64 8, ptr %i3) #3
+ call void @llvm.lifetime.start.p0(ptr %i3) #3
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %i3, ptr align 4 @global, i64 8, i1 false)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i4) #3
+ call void @llvm.lifetime.start.p0(ptr %i4) #3
store i32 0, ptr %i4, align 4, !tbaa !5
br label %bb6
@@ -75,11 +75,11 @@ bb6: ; preds = %bb22, %bb
br i1 %i11, label %bb13, label %bb12
bb12: ; preds = %bb6
- call void @llvm.lifetime.end.p0(i64 4, ptr %i4) #3
+ call void @llvm.lifetime.end.p0(ptr %i4) #3
br label %bb25
bb13: ; preds = %bb6
- call void @llvm.lifetime.start.p0(i64 8, ptr %i5) #3
+ call void @llvm.lifetime.start.p0(ptr %i5) #3
%i14 = load i32, ptr %i4, align 4, !tbaa !5
%i15 = srem i32 %i14, 2
%i16 = sext i32 %i15 to i64
@@ -90,7 +90,7 @@ bb13: ; preds = %bb6
%i20 = load i32, ptr %i19, align 4, !tbaa !5
%i21 = mul nsw i32 %i20, %i18
store i32 %i21, ptr %i19, align 4, !tbaa !5
- call void @llvm.lifetime.end.p0(i64 8, ptr %i5) #3
+ call void @llvm.lifetime.end.p0(ptr %i5) #3
br label %bb22
bb22: ; preds = %bb13
@@ -102,12 +102,12 @@ bb22: ; preds = %bb13
bb25: ; preds = %bb12
%i26 = load ptr, ptr %i2, align 8, !tbaa !9
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %i26, ptr align 4 %i3, i64 8, i1 false), !tbaa.struct !13
- call void @llvm.lifetime.end.p0(i64 8, ptr %i3) #3
+ call void @llvm.lifetime.end.p0(ptr %i3) #3
ret void
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nocallback nofree nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
@@ -126,7 +126,7 @@ bb:
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: mustprogress nounwind uwtable
define linkonce_odr dso_local noundef nonnull align 4 dereferenceable(4) ptr @widget(ptr noundef nonnull align 4 dereferenceable(8) %arg, i64 noundef %arg1) #0 comdat($_ZNSt14__array_traitsIiLm2EE6_S_refERA2_Kim) align 2 {
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll
index c6dc7b3..51f2a36 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll
@@ -50,14 +50,14 @@ entry:
%__end15 = alloca ptr
%elt11 = alloca ptr
store i32 %cnt, ptr %cnt.addr
- call void @llvm.lifetime.start.p0(i64 24, ptr %arr)
- call void @llvm.lifetime.start.p0(i64 8, ptr %__range1)
+ call void @llvm.lifetime.start.p0(ptr %arr)
+ call void @llvm.lifetime.start.p0(ptr %__range1)
store ptr %arr, ptr %__range1
- call void @llvm.lifetime.start.p0(i64 8, ptr %__begin1)
+ call void @llvm.lifetime.start.p0(ptr %__begin1)
%0 = load ptr, ptr %__range1
%call = call ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %0)
store ptr %call, ptr %__begin1
- call void @llvm.lifetime.start.p0(i64 8, ptr %__end1)
+ call void @llvm.lifetime.start.p0(ptr %__end1)
%1 = load ptr, ptr %__range1
%call1 = call ptr @_ZNSt5arrayIiLm6EE3endEv(ptr %1)
store ptr %call1, ptr %__end1
@@ -70,13 +70,13 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %__end1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__begin1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__range1)
+ call void @llvm.lifetime.end.p0(ptr %__end1)
+ call void @llvm.lifetime.end.p0(ptr %__begin1)
+ call void @llvm.lifetime.end.p0(ptr %__range1)
br label %for.end
for.body:
- call void @llvm.lifetime.start.p0(i64 8, ptr %elt)
+ call void @llvm.lifetime.start.p0(ptr %elt)
%4 = load ptr, ptr %__begin1
store ptr %4, ptr %elt
%5 = load i32, ptr %cnt.addr
@@ -84,7 +84,7 @@ for.body:
store i32 %inc, ptr %cnt.addr
%6 = load ptr, ptr %elt
store i32 %inc, ptr %6
- call void @llvm.lifetime.end.p0(i64 8, ptr %elt)
+ call void @llvm.lifetime.end.p0(ptr %elt)
br label %for.inc
for.inc:
@@ -94,13 +94,13 @@ for.inc:
br label %for.cond
for.end:
- call void @llvm.lifetime.start.p0(i64 8, ptr %__range12)
+ call void @llvm.lifetime.start.p0(ptr %__range12)
store ptr %arr, ptr %__range12
- call void @llvm.lifetime.start.p0(i64 8, ptr %__begin13)
+ call void @llvm.lifetime.start.p0(ptr %__begin13)
%8 = load ptr, ptr %__range12
%call4 = call ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %8)
store ptr %call4, ptr %__begin13
- call void @llvm.lifetime.start.p0(i64 8, ptr %__end15)
+ call void @llvm.lifetime.start.p0(ptr %__end15)
%9 = load ptr, ptr %__range12
%call6 = call ptr @_ZNSt5arrayIiLm6EE3endEv(ptr %9)
store ptr %call6, ptr %__end15
@@ -113,19 +113,19 @@ for.cond7:
br i1 %cmp8, label %for.body10, label %for.cond.cleanup9
for.cond.cleanup9:
- call void @llvm.lifetime.end.p0(i64 8, ptr %__end15)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__begin13)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__range12)
+ call void @llvm.lifetime.end.p0(ptr %__end15)
+ call void @llvm.lifetime.end.p0(ptr %__begin13)
+ call void @llvm.lifetime.end.p0(ptr %__range12)
br label %for.end14
for.body10:
- call void @llvm.lifetime.start.p0(i64 8, ptr %elt11)
+ call void @llvm.lifetime.start.p0(ptr %elt11)
%12 = load ptr, ptr %__begin13
store ptr %12, ptr %elt11
%13 = load ptr, ptr %elt11
%14 = load i32, ptr %13
call void @_Z3usei(i32 %14)
- call void @llvm.lifetime.end.p0(i64 8, ptr %elt11)
+ call void @llvm.lifetime.end.p0(ptr %elt11)
br label %for.inc12
for.inc12:
@@ -135,11 +135,11 @@ for.inc12:
br label %for.cond7
for.end14:
- call void @llvm.lifetime.end.p0(i64 24, ptr %arr)
+ call void @llvm.lifetime.end.p0(ptr %arr)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define linkonce_odr dso_local ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %this) {
entry:
@@ -160,7 +160,7 @@ entry:
ret ptr %add.ptr
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare dso_local void @_Z3usei(i32)
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
index dfad534..00453e7 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
@@ -143,7 +143,7 @@ entry:
%j = alloca i64, align 8
store ptr %data, ptr %data.addr, align 8, !tbaa !3
store i64 %numElems, ptr %numElems.addr, align 8, !tbaa !7
- call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i64 0, ptr %i, align 8, !tbaa !7
br label %for.cond
@@ -154,11 +154,11 @@ for.cond:
for.cond.cleanup:
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end8
for.body:
- call void @llvm.lifetime.start.p0(i64 8, ptr %j)
+ call void @llvm.lifetime.start.p0(ptr %j)
store i64 0, ptr %j, align 8, !tbaa !7
br label %for.cond1
@@ -170,7 +170,7 @@ for.cond1:
for.cond.cleanup3:
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 8, ptr %j)
+ call void @llvm.lifetime.end.p0(ptr %j)
br label %for.end
for.body4:
@@ -201,7 +201,7 @@ for.end8:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define linkonce_odr dso_local noundef nonnull align 4 dereferenceable(4) ptr @_ZNSt6vectorIiSaIiEEixEm(ptr noundef nonnull align 8 dereferenceable(24) %this, i64 noundef %__n) comdat align 2 {
entry:
@@ -217,7 +217,7 @@ entry:
ret ptr %add.ptr
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!0 = !{i32 1, !"wchar_size", i32 4}
!1 = !{i32 7, !"uwtable", i32 2}
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll b/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll
index 1a4af5a..3c54ed9 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll
@@ -63,10 +63,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87"}
;.
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll b/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll
index 405a26d..c649f29e 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/masked-memory-ops-with-cf.ll
@@ -13,7 +13,7 @@ define void @basic(i1 %cond, ptr %b, ptr %p, ptr %q) {
; CHECK-NEXT: [[TMP5:%.*]] = call <1 x i64> @llvm.masked.load.v1i64.p0(ptr [[B:%.*]], i32 8, <1 x i1> [[TMP0]], <1 x i64> poison)
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = bitcast i16 [[TMP2]] to <1 x i16>
-; CHECK-NEXT: call void @llvm.masked.store.v1i16.p0(<1 x i16> [[TMP7]], ptr [[B]], i32 2, <1 x i1> [[TMP0]])
+; CHECK-NEXT: call void @llvm.masked.store.v1i16.p0(<1 x i16> [[TMP7]], ptr [[B]], i32 8, <1 x i1> [[TMP0]])
; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32 [[TMP4]] to <1 x i32>
; CHECK-NEXT: call void @llvm.masked.store.v1i32.p0(<1 x i32> [[TMP8]], ptr [[P]], i32 4, <1 x i1> [[TMP0]])
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i64 [[TMP6]] to <1 x i64>
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll
index 362708b..0c58705 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll
@@ -13,7 +13,7 @@ define <2 x i64> @PR61061(<2 x i64> noundef %vect) {
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
%ptr = alloca <2 x i64>, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %ptr)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ptr)
%bc0 = bitcast <2 x i64> %vect to <16 x i8>
%bc1 = bitcast <2 x i64> %vect to <16 x i8>
%bc2 = bitcast <2 x i64> %vect to <16 x i8>
@@ -62,8 +62,8 @@ define <2 x i64> @PR61061(<2 x i64> noundef %vect) {
store i8 %elt2, ptr %ptr14, align 2
store i8 %elt3, ptr %ptr15, align 1
%base = load <2 x i64>, ptr %ptr, align 16
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %ptr)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ptr)
ret <2 x i64> %base
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
index be7f4c2..cb37846 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
@@ -79,9 +79,9 @@ entry:
store ptr %face_cell, ptr %face_cell.addr, align 8, !tbaa !10
store ptr %x, ptr %x.addr, align 8, !tbaa !10
store ptr %y, ptr %y.addr, align 8, !tbaa !10
- call void @llvm.lifetime.start.p0(i64 4, ptr %il) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %ir) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %iface) #3
+ call void @llvm.lifetime.start.p0(ptr %il) #3
+ call void @llvm.lifetime.start.p0(ptr %ir) #3
+ call void @llvm.lifetime.start.p0(ptr %iface) #3
store i32 0, ptr %iface, align 4, !tbaa !6
br label %for.cond
@@ -92,7 +92,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 4, ptr %iface) #3, !llvm.access.group !12
+ call void @llvm.lifetime.end.p0(ptr %iface) #3, !llvm.access.group !12
br label %for.end
for.body:
@@ -134,12 +134,12 @@ for.inc:
br label %for.cond, !llvm.loop !15
for.end:
- call void @llvm.lifetime.end.p0(i64 4, ptr %ir) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %il) #3
+ call void @llvm.lifetime.end.p0(ptr %ir) #3
+ call void @llvm.lifetime.end.p0(ptr %il) #3
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
define linkonce_odr noundef nonnull align 8 dereferenceable(8) ptr @max(ptr noundef nonnull align 8 dereferenceable(8) %__a, ptr noundef nonnull align 8 dereferenceable(8) %__b) #2 {
entry:
@@ -170,7 +170,7 @@ return:
ret ptr %6
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { mustprogress "target-cpu" = "skylake-avx512" }
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
index f60bc26..69a46b2 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
@@ -39,7 +39,7 @@ entry:
%i = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8, !tbaa !3
store float %b, ptr %b.addr, align 4, !tbaa !7
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !9
br label %for.cond
@@ -49,7 +49,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -73,8 +73,8 @@ for.end: ; preds = %for.cond.cleanup
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind ssp uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="true" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
index 85f6fce..f6e8fcd 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
@@ -75,7 +75,7 @@ entry:
%ref.tmp7 = alloca %union.ElementWiseAccess, align 16
%ref.tmp12 = alloca %union.ElementWiseAccess, align 16
store ptr %V, ptr %V.addr, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp) #4
%0 = load ptr, ptr %V.addr, align 8
%call = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %0)
%coerce.dive = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp, i32 0, i32 0
@@ -87,7 +87,7 @@ entry:
store double %4, ptr %3, align 8
%call1 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp, i32 noundef 0)
%vecinit = insertelement <4 x float> undef, float %call1, i32 0
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp2) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp2) #4
%5 = load ptr, ptr %V.addr, align 8
%call3 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %5)
%coerce.dive4 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp2, i32 0, i32 0
@@ -99,7 +99,7 @@ entry:
store double %9, ptr %8, align 8
%call5 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp2, i32 noundef 1)
%vecinit6 = insertelement <4 x float> %vecinit, float %call5, i32 1
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp7) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp7) #4
%10 = load ptr, ptr %V.addr, align 8
%call8 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %10)
%coerce.dive9 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp7, i32 0, i32 0
@@ -111,7 +111,7 @@ entry:
store double %14, ptr %13, align 8
%call10 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp7, i32 noundef 2)
%vecinit11 = insertelement <4 x float> %vecinit6, float %call10, i32 2
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp12) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp12) #4
%15 = load ptr, ptr %V.addr, align 8
%call13 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %15)
%coerce.dive14 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp12, i32 0, i32 0
@@ -125,10 +125,10 @@ entry:
%vecinit16 = insertelement <4 x float> %vecinit11, float %call15, i32 3
store <4 x float> %vecinit16, ptr %.compoundliteral, align 16
%20 = load <4 x float>, ptr %.compoundliteral, align 16
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp12) #4
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp7) #4
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp2) #4
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp12) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp7) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp2) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp) #4
ret <4 x float> %20
}
@@ -144,8 +144,8 @@ entry:
ret { double, double } %1
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #3
define internal noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %0) #1 {
diff --git a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
index 4d0f039..dd9ead4 100644
--- a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
+++ b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
@@ -130,8 +130,8 @@ for.end34:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind ssp uwtable "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll b/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll
index 1297dbe..9a6cad4 100644
--- a/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll
+++ b/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt -passes='function(dse),cgscc(inline),function(sroa,gvn,sccp)' -S %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.assume(i1 noundef)
@@ -35,10 +35,10 @@ define i32 @test() {
;
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
store i32 1, ptr %a, align 4
%res = call i1 @check_cond(ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
call void @llvm.assume(i1 %res)
ret i32 0
}
diff --git a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
index ae98fe6..c6d1cbd 100644
--- a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
+++ b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
@@ -35,7 +35,7 @@ entry:
%i = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store i32 %beam, ptr %beam.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i32 0, ptr %i, align 4
br label %for.cond
@@ -45,7 +45,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body: ; preds = %for.cond
@@ -85,6 +85,6 @@ for.end: ; preds = %for.cond.cleanup
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
index cc20233a..84cbad3 100644
--- a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
@@ -73,23 +73,23 @@ bb:
%i = alloca %0, align 8
%i1 = alloca %0, align 8
%i2 = alloca %0, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i1, ptr align 8 %arg, i64 24, i1 false)
call void @_Z3gen1S(ptr sret(%0) align 8 %i, ptr byval(%0) align 8 %i1)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i2, ptr align 8 %i, i64 24, i1 false)
call void @_Z7escape01S(ptr byval(%0) align 8 %i2)
%i9 = load ptr, ptr %i, align 8
- call void @llvm.lifetime.end.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
ret ptr %i9
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
declare dso_local void @_Z7escape01S(ptr byval(%0) align 8)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define dso_local ptr @_Z3bar1S(ptr byval(%0) align 8 %arg) {
; CHECK-LABEL: @_Z3bar1S(
@@ -112,7 +112,7 @@ define dso_local ptr @_Z3bar1S(ptr byval(%0) align 8 %arg) {
bb:
%i = alloca %0, align 8
%i1 = alloca %0, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i1, ptr align 8 %arg, i64 24, i1 false)
call void @_Z3gen1S(ptr sret(%0) align 8 %i, ptr byval(%0) align 8 %i1)
%i5 = call i32 @_Z4condv()
@@ -133,7 +133,7 @@ bb10:
bb13:
%i15 = load ptr, ptr %i, align 8
- call void @llvm.lifetime.end.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
ret ptr %i15
}
diff --git a/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll b/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll
index 1239b18..c5dbc42 100644
--- a/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll
@@ -7,8 +7,8 @@
; RUN: opt < %s -passes='default<O2>' -S | FileCheck %s --check-prefixes=CHECK,OPT
; RUN: opt < %s -passes="default<O3>" -S | FileCheck %s --check-prefixes=CHECK,OPT
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture)
define void @asan() sanitize_address {
@@ -16,8 +16,8 @@ entry:
; CHECK-LABEL: @asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -31,8 +31,8 @@ entry:
; CHECK-LABEL: @hwasan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -46,8 +46,8 @@ entry:
; CHECK-LABEL: @msan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -61,8 +61,8 @@ entry:
; CHECK-LABEL: @no_asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; OPT-NOT: call void @llvm.lifetime
; NOOPT: call void @llvm.lifetime.start
; NOOPT-NEXT: call void @llvm.lifetime.end
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
index 45f18dd..ae0e591 100644
--- a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
@@ -52,7 +52,7 @@ entry:
%elems.coerce.fca.1.extract = extractvalue [2 x i64] %elems.coerce, 1
%elems.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %elems, i64 0, i64 1
store i64 %elems.coerce.fca.1.extract, ptr %elems.coerce.fca.1.gep, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__begin1) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %__begin1) #6
%0 = load ptr, ptr %elems, align 8
%__size_.i.i = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
%1 = load i64, ptr %__size_.i.i, align 8
@@ -66,7 +66,7 @@ entry:
br i1 %cmp.not.i.i.i.i, label %error, label %check.2
check.2:
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__end1) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %__end1) #6
%l4 = load ptr, ptr %elems, align 8
%__size_.i.i4 = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
%l5 = load i64, ptr %__size_.i.i4, align 8
@@ -90,8 +90,8 @@ for.cond:
br i1 %cmp.i, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__end1)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__begin1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %__end1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %__begin1)
ret void
for.body: ; preds = %for.cond
@@ -115,11 +115,11 @@ for.latch:
declare void @error()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @use(ptr noundef nonnull align 4 dereferenceable(4))
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; -------------------------------------------------------------------------
@@ -160,11 +160,11 @@ entry:
%count = alloca i64, align 8
%i = alloca i64, align 8
store ptr %vec, ptr %vec.addr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %count)
+ call void @llvm.lifetime.start.p0(ptr %count)
%0 = load ptr, ptr %vec.addr, align 8
%call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0)
store i64 %call, ptr %count, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i64 0, ptr %i, align 8
br label %for.cond
@@ -175,7 +175,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body:
@@ -194,7 +194,7 @@ for.inc:
br label %for.cond
for.end:
- call void @llvm.lifetime.end.p0(i64 8, ptr %count) #5
+ call void @llvm.lifetime.end.p0(ptr %count) #5
ret void
}
@@ -299,11 +299,11 @@ entry:
%count = alloca i64, align 8
%i = alloca i64, align 8
store ptr %vec, ptr %vec.addr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %count)
+ call void @llvm.lifetime.start.p0(ptr %count)
%0 = load ptr, ptr %vec.addr, align 8
%call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0)
store i64 %call, ptr %count, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i64 0, ptr %i, align 8
br label %for.cond
@@ -314,7 +314,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body:
@@ -333,7 +333,7 @@ for.inc:
br label %for.cond
for.end:
- call void @llvm.lifetime.end.p0(i64 8, ptr %count)
+ call void @llvm.lifetime.end.p0(ptr %count)
ret void
}
@@ -376,7 +376,7 @@ entry:
%k = alloca i32, align 4
store ptr %arr, ptr %arr.addr, align 8
store i32 %len, ptr %len.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i32 1, ptr %i, align 4
br label %for.cond
@@ -388,11 +388,11 @@ for.cond: ; preds = %for.inc5, %entry
for.cond.cleanup: ; preds = %for.cond
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end6
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.start.p0(ptr %k) #3
%2 = load i32, ptr %i, align 4
store i32 %2, ptr %k, align 4
br label %for.cond1
@@ -404,7 +404,7 @@ for.cond1: ; preds = %for.inc, %for.body
for.cond.cleanup3: ; preds = %for.cond1
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.end.p0(ptr %k) #3
br label %for.end
for.body4: ; preds = %for.cond1
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll b/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll
index c6b5e5f..5ff57ea 100644
--- a/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll
+++ b/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll
@@ -38,8 +38,8 @@ declare void @f0()
declare void @f1()
declare void @f2()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @_Z4loopi(i32 %width) {
; HOIST-LABEL: @_Z4loopi(
@@ -100,7 +100,7 @@ if.then:
br label %return
if.end:
- call void @llvm.lifetime.start.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i32 0, ptr %i, align 4
br label %for.cond
@@ -112,7 +112,7 @@ for.cond:
br i1 %cmp1, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body:
diff --git a/llvm/test/Transforms/PhaseOrdering/vector-select.ll b/llvm/test/Transforms/PhaseOrdering/vector-select.ll
index 1bdd135..c228723 100644
--- a/llvm/test/Transforms/PhaseOrdering/vector-select.ll
+++ b/llvm/test/Transforms/PhaseOrdering/vector-select.ll
@@ -19,9 +19,9 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) {
store <4 x float> %extractVec1, ptr %b.addr, align 16
%extractVec3 = shufflevector <3 x i32> %c, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
store <4 x i32> %extractVec3, ptr %c.addr, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %zero) #2
+ call void @llvm.lifetime.start.p0(ptr %zero) #2
store <4 x i32> <i32 0, i32 0, i32 0, i32 undef>, ptr %zero, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %mask) #2
+ call void @llvm.lifetime.start.p0(ptr %mask) #2
%loadVec4 = load <4 x i32>, ptr %zero, align 16
%extractVec6 = shufflevector <4 x i32> %loadVec4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
%loadVec48 = load <4 x i32>, ptr %c.addr, align 16
@@ -30,7 +30,7 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) {
%sext = sext <3 x i1> %cmp to <3 x i32>
%extractVec10 = shufflevector <3 x i32> %sext, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
store <4 x i32> %extractVec10, ptr %mask, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %res) #2
+ call void @llvm.lifetime.start.p0(ptr %res) #2
%loadVec413 = load <4 x i32>, ptr %mask, align 16
%extractVec14 = shufflevector <4 x i32> %loadVec413, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
%loadVec416 = load <4 x float>, ptr %b.addr, align 16
@@ -51,9 +51,9 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) {
%extractVec32 = shufflevector <4 x i32> %loadVec431, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
%or = or <3 x i32> %and29, %extractVec32
%astype33 = bitcast <3 x i32> %or to <3 x float>
- call void @llvm.lifetime.end.p0(i64 16, ptr %res) #2
- call void @llvm.lifetime.end.p0(i64 16, ptr %mask) #2
- call void @llvm.lifetime.end.p0(i64 16, ptr %zero) #2
+ call void @llvm.lifetime.end.p0(ptr %res) #2
+ call void @llvm.lifetime.end.p0(ptr %mask) #2
+ call void @llvm.lifetime.end.p0(ptr %zero) #2
ret <3 x float> %astype33
}
@@ -112,5 +112,5 @@ for.end:
ret <4 x i32> %min.addr.0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
new file mode 100644
index 0000000..301e5da
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
@@ -0,0 +1,279 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+define void @ldexp_f32i32(ptr %x, ptr %y, i32 %exp) {
+; CHECK-LABEL: @ldexp_f32i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L4]], i32 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L6]], i32 [[EXP]])
+; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
+; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
+; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
+; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp)
+ %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp)
+ %l5 = tail call float @llvm.ldexp.f32.i32(float %l4, i32 %exp)
+ %l7 = tail call float @llvm.ldexp.f32.i32(float %l6, i32 %exp)
+ store float %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
+ store float %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
+ store float %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
+ store float %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f64i32(ptr %x, ptr %y, i32 %exp) {
+; CHECK-LABEL: @ldexp_f64i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L4]], i32 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L6]], i32 [[EXP]])
+; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
+; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
+; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
+; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp)
+ %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp)
+ %l5 = tail call double @llvm.ldexp.f64.i32(double %l4, i32 %exp)
+ %l7 = tail call double @llvm.ldexp.f64.i32(double %l6, i32 %exp)
+ store double %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
+ store double %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
+ store double %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
+ store double %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f32i64(ptr %x, ptr %y, i64 %exp) {
+; CHECK-LABEL: @ldexp_f32i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L0]], i64 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L2]], i64 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP]])
+; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
+; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
+; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
+; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call float @llvm.ldexp.f32.i64(float %l0, i64 %exp)
+ %l3 = tail call float @llvm.ldexp.f32.i64(float %l2, i64 %exp)
+ %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp)
+ %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp)
+ store float %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
+ store float %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
+ store float %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
+ store float %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f64i64(ptr %x, ptr %y, i64 %exp) {
+; CHECK-LABEL: @ldexp_f64i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L0]], i64 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L2]], i64 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP]])
+; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
+; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
+; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
+; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call double @llvm.ldexp.f64.i64(double %l0, i64 %exp)
+ %l3 = tail call double @llvm.ldexp.f64.i64(double %l2, i64 %exp)
+ %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp)
+ %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp)
+ store double %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
+ store double %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
+ store double %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
+ store double %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f32i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
+; CHECK-LABEL: @ldexp_f32i32_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP32:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP32]])
+; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP64:%.*]])
+; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP64]])
+; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
+; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
+; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
+; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp32)
+ %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp32)
+ %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp64)
+ %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp64)
+ store float %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
+ store float %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
+ store float %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
+ store float %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f64_i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
+; CHECK-LABEL: @ldexp_f64_i32_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP32:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP32]])
+; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP64:%.*]])
+; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP64]])
+; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
+; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
+; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
+; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp32)
+ %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp32)
+ %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp64)
+ %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp64)
+ store double %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
+ store double %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
+ store double %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
+ store double %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+declare float @llvm.ldexp.f32.i32(float, i32)
+declare double @llvm.ldexp.f64.i32(double, i32)
+declare float @llvm.ldexp.f32.i64(float, i64)
+declare double @llvm.ldexp.f64.i64(double, i64)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
new file mode 100644
index 0000000..07a3fe7
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
@@ -0,0 +1,280 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+define void @lround_i32f32(ptr %x, ptr %y, i32 %n) {
+; CHECK-LABEL: @lround_i32f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L6]])
+; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
+; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
+; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
+; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call i32 @llvm.lround.i32.f32(float %l0)
+ %l3 = tail call i32 @llvm.lround.i32.f32(float %l2)
+ %l5 = tail call i32 @llvm.lround.i32.f32(float %l4)
+ %l7 = tail call i32 @llvm.lround.i32.f32(float %l6)
+ store i32 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
+ store i32 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
+ store i32 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
+ store i32 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @lround_i32f64(ptr %x, ptr %y, i32 %n) {
+; CHECK-LABEL: @lround_i32f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L6]])
+; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
+; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
+; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
+; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call i32 @llvm.lround.i32.f64(double %l0)
+ %l3 = tail call i32 @llvm.lround.i32.f64(double %l2)
+ %l5 = tail call i32 @llvm.lround.i32.f64(double %l4)
+ %l7 = tail call i32 @llvm.lround.i32.f64(double %l6)
+ store i32 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
+ store i32 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
+ store i32 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
+ store i32 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @lround_i64f32(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @lround_i64f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.lround.i64.f32(float %l0)
+ %l3 = tail call i64 @llvm.lround.i64.f32(float %l2)
+ %l5 = tail call i64 @llvm.lround.i64.f32(float %l4)
+ %l7 = tail call i64 @llvm.lround.i64.f32(float %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @lround_i64f64(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @lround_i64f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.lround.i64.f64(double %l0)
+ %l3 = tail call i64 @llvm.lround.i64.f64(double %l2)
+ %l5 = tail call i64 @llvm.lround.i64.f64(double %l4)
+ %l7 = tail call i64 @llvm.lround.i64.f64(double %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @llround_i64f32(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @llround_i64f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.llround.i64.f32(float %l0)
+ %l3 = tail call i64 @llvm.llround.i64.f32(float %l2)
+ %l5 = tail call i64 @llvm.llround.i64.f32(float %l4)
+ %l7 = tail call i64 @llvm.llround.i64.f32(float %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @llround_i64f64(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @llround_i64f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.llround.i64.f64(double %l0)
+ %l3 = tail call i64 @llvm.llround.i64.f64(double %l2)
+ %l5 = tail call i64 @llvm.llround.i64.f64(double %l4)
+ %l7 = tail call i64 @llvm.llround.i64.f64(double %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+declare i32 @llvm.lround.i32.f32(float)
+declare i64 @llvm.lround.i64.f32(float)
+declare i64 @llvm.lround.i64.f64(double)
+declare i64 @llvm.llround.i64.f32(float)
+declare i64 @llvm.llround.i64.f64(double)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
index 9fbe0a5..ea637bb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
@@ -8,119 +8,134 @@
%"class.3" = type { %"struct.1", i64 }
%"struct.1" = type { [8 x i64] }
-$_ZN1C10SwitchModeEv = comdat any
-
; Function Attrs: uwtable
-define void @_ZN1C10SwitchModeEv() local_unnamed_addr #0 comdat align 2 {
+define void @_ZN1C10SwitchModeEv(ptr %p, i64 %c) {
; SSE-LABEL: @_ZN1C10SwitchModeEv(
; SSE-NEXT: for.body.lr.ph.i:
-; SSE-NEXT: [[OR_1:%.*]] = or i64 undef, 1
-; SSE-NEXT: store i64 [[OR_1]], ptr undef, align 8
-; SSE-NEXT: [[FOO_3:%.*]] = load i64, ptr undef, align 8
-; SSE-NEXT: [[FOO_2:%.*]] = getelementptr inbounds [[CLASS_1:%.*]], ptr undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
+; SSE-NEXT: [[BAR5:%.*]] = or i64 [[C:%.*]], 1
+; SSE-NEXT: store i64 [[BAR5]], ptr [[FOO_2:%.*]], align 8
; SSE-NEXT: [[FOO_4:%.*]] = load i64, ptr [[FOO_2]], align 8
-; SSE-NEXT: [[BAR5:%.*]] = load i64, ptr undef, align 8
-; SSE-NEXT: [[AND_2:%.*]] = and i64 [[OR_1]], [[FOO_3]]
+; SSE-NEXT: [[FOO_3:%.*]] = getelementptr inbounds [[CLASS_1:%.*]], ptr [[FOO_2]], i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
+; SSE-NEXT: [[FOO_5:%.*]] = load i64, ptr [[FOO_3]], align 8
+; SSE-NEXT: [[BAR6:%.*]] = load i64, ptr [[FOO_2]], align 8
; SSE-NEXT: [[AND_1:%.*]] = and i64 [[BAR5]], [[FOO_4]]
-; SSE-NEXT: store i64 [[AND_2]], ptr undef, align 8
-; SSE-NEXT: [[BAR4:%.*]] = getelementptr inbounds [[CLASS_2:%.*]], ptr undef, i64 0, i32 0, i32 0, i32 0, i64 1
-; SSE-NEXT: store i64 [[AND_1]], ptr [[BAR4]], align 8
+; SSE-NEXT: [[AND_2:%.*]] = and i64 [[BAR6]], [[FOO_5]]
+; SSE-NEXT: store i64 [[AND_1]], ptr [[FOO_2]], align 8
+; SSE-NEXT: [[BAR4:%.*]] = getelementptr inbounds [[CLASS_2:%.*]], ptr [[FOO_2]], i64 0, i32 0, i32 0, i32 0, i64 1
+; SSE-NEXT: store i64 [[AND_2]], ptr [[BAR4]], align 8
; SSE-NEXT: ret void
;
; AVX-LABEL: @_ZN1C10SwitchModeEv(
; AVX-NEXT: for.body.lr.ph.i:
-; AVX-NEXT: [[OR_1:%.*]] = or i64 undef, 1
-; AVX-NEXT: store i64 [[OR_1]], ptr undef, align 8
-; AVX-NEXT: [[BAR5:%.*]] = load i64, ptr undef, align 8
-; AVX-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr undef, align 8
+; AVX-NEXT: [[OR_1:%.*]] = or i64 [[C:%.*]], 1
+; AVX-NEXT: store i64 [[OR_1]], ptr [[P:%.*]], align 8
+; AVX-NEXT: [[BAR5:%.*]] = load i64, ptr [[P]], align 8
+; AVX-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[P]], align 8
; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> poison, i64 [[OR_1]], i32 0
; AVX-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> [[TMP1]], i64 [[BAR5]], i32 1
; AVX-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[TMP0]]
-; AVX-NEXT: store <2 x i64> [[TMP3]], ptr undef, align 8
+; AVX-NEXT: store <2 x i64> [[TMP3]], ptr [[P]], align 8
; AVX-NEXT: ret void
;
for.body.lr.ph.i:
- %or.1 = or i64 undef, 1
- store i64 %or.1, ptr undef, align 8
- %foo.3 = load i64, ptr undef, align 8
- %foo.2 = getelementptr inbounds %class.1, ptr undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
+ %or.1 = or i64 %c, 1
+ store i64 %or.1, ptr %p, align 8
+ %foo.3 = load i64, ptr %p, align 8
+ %foo.2 = getelementptr inbounds %class.1, ptr %p, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
%foo.4 = load i64, ptr %foo.2, align 8
- %bar5 = load i64, ptr undef, align 8
+ %bar5 = load i64, ptr %p, align 8
%and.2 = and i64 %or.1, %foo.3
%and.1 = and i64 %bar5, %foo.4
- store i64 %and.2, ptr undef, align 8
- %bar4 = getelementptr inbounds %class.2, ptr undef, i64 0, i32 0, i32 0, i32 0, i64 1
+ store i64 %and.2, ptr %p, align 8
+ %bar4 = getelementptr inbounds %class.2, ptr %p, i64 0, i32 0, i32 0, i32 0, i64 1
store i64 %and.1, ptr %bar4, align 8
ret void
}
; Function Attrs: norecurse nounwind uwtable
-define void @pr35497() local_unnamed_addr #0 {
+define void @pr35497(ptr %p, i64 %c) {
; SSE-LABEL: @pr35497(
; SSE-NEXT: entry:
-; SSE-NEXT: [[TMP0:%.*]] = load i64, ptr undef, align 1
-; SSE-NEXT: [[ADD:%.*]] = add i64 undef, undef
-; SSE-NEXT: store i64 [[ADD]], ptr undef, align 1
-; SSE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 4
-; SSE-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> <i64 poison, i64 undef>, i64 [[TMP0]], i32 0
-; SSE-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[TMP1]], splat (i64 2)
-; SSE-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], splat (i64 20)
-; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> poison, <2 x i32> <i32 1, i32 0>
-; SSE-NEXT: [[TMP5:%.*]] = add nuw nsw <2 x i64> [[TMP4]], zeroinitializer
-; SSE-NEXT: store <2 x i64> [[TMP5]], ptr undef, align 1
-; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> poison, <2 x i32> <i32 1, i32 poison>
-; SSE-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[ADD]], i32 1
-; SSE-NEXT: [[TMP8:%.*]] = shl <2 x i64> [[TMP7]], splat (i64 2)
-; SSE-NEXT: [[TMP9:%.*]] = and <2 x i64> [[TMP8]], splat (i64 20)
-; SSE-NEXT: [[TMP10:%.*]] = lshr <2 x i64> [[TMP5]], splat (i64 6)
-; SSE-NEXT: [[TMP11:%.*]] = add nuw nsw <2 x i64> [[TMP9]], [[TMP10]]
-; SSE-NEXT: store <2 x i64> [[TMP11]], ptr [[ARRAYIDX2_2]], align 1
+; SSE-NEXT: [[TMP0:%.*]] = load i64, ptr [[P:%.*]], align 1
+; SSE-NEXT: [[AND:%.*]] = shl i64 [[TMP0]], 2
+; SSE-NEXT: [[SHL:%.*]] = and i64 [[AND]], 20
+; SSE-NEXT: [[ADD:%.*]] = add i64 [[C:%.*]], [[C]]
+; SSE-NEXT: store i64 [[ADD]], ptr [[P]], align 1
+; SSE-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 5
+; SSE-NEXT: [[AND_1:%.*]] = shl i64 [[C]], 2
+; SSE-NEXT: [[SHL_1:%.*]] = and i64 [[AND_1]], 20
+; SSE-NEXT: [[SHR_1:%.*]] = lshr i64 [[C]], 6
+; SSE-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[SHL]], [[SHR_1]]
+; SSE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 4
+; SSE-NEXT: [[SHR_2:%.*]] = lshr i64 [[C]], 6
+; SSE-NEXT: [[ADD_2:%.*]] = add nuw nsw i64 [[SHL_1]], [[SHR_2]]
+; SSE-NEXT: [[AND_4:%.*]] = shl i64 [[ADD]], 2
+; SSE-NEXT: [[SHL_4:%.*]] = and i64 [[AND_4]], 20
+; SSE-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 1
+; SSE-NEXT: store i64 [[ADD_1]], ptr [[ARRAYIDX2_5]], align 1
+; SSE-NEXT: [[AND_5:%.*]] = shl nuw nsw i64 [[ADD_1]], 2
+; SSE-NEXT: [[SHL_5:%.*]] = and i64 [[AND_5]], 20
+; SSE-NEXT: [[SHR_5:%.*]] = lshr i64 [[ADD_1]], 6
+; SSE-NEXT: [[ADD_5:%.*]] = add nuw nsw i64 [[SHL_4]], [[SHR_5]]
+; SSE-NEXT: store i64 [[ADD_5]], ptr [[ARRAYIDX2_1]], align 1
+; SSE-NEXT: store i64 [[ADD_2]], ptr [[P]], align 1
+; SSE-NEXT: [[SHR_6:%.*]] = lshr i64 [[ADD_2]], 6
+; SSE-NEXT: [[ADD_6:%.*]] = add nuw nsw i64 [[SHL_5]], [[SHR_6]]
+; SSE-NEXT: store i64 [[ADD_6]], ptr [[ARRAYIDX2_2]], align 1
; SSE-NEXT: ret void
;
; AVX-LABEL: @pr35497(
; AVX-NEXT: entry:
-; AVX-NEXT: [[TMP0:%.*]] = load i64, ptr undef, align 1
-; AVX-NEXT: [[ADD:%.*]] = add i64 undef, undef
-; AVX-NEXT: store i64 [[ADD]], ptr undef, align 1
-; AVX-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 4
-; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> <i64 undef, i64 poison>, i64 [[TMP0]], i32 1
+; AVX-NEXT: [[TMP0:%.*]] = load i64, ptr [[P:%.*]], align 1
+; AVX-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[C:%.*]], i32 0
+; AVX-NEXT: [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> poison, <2 x i32> zeroinitializer
+; AVX-NEXT: [[TMP13:%.*]] = lshr <2 x i64> [[TMP11]], splat (i64 6)
+; AVX-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 4
+; AVX-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 1
+; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP11]], i64 [[TMP0]], i32 1
; AVX-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[TMP1]], splat (i64 2)
; AVX-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], splat (i64 20)
-; AVX-NEXT: [[TMP4:%.*]] = add nuw nsw <2 x i64> [[TMP3]], zeroinitializer
-; AVX-NEXT: store <2 x i64> [[TMP4]], ptr undef, align 1
-; AVX-NEXT: [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP4]], <2 x i64> poison, <2 x i32> <i32 1, i32 poison>
-; AVX-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> [[TMP5]], i64 [[ADD]], i32 1
+; AVX-NEXT: [[TMP14:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 2>
+; AVX-NEXT: [[TMP16:%.*]] = shufflevector <2 x i64> [[TMP13]], <2 x i64> [[TMP14]], <2 x i32> <i32 1, i32 3>
+; AVX-NEXT: [[TMP6:%.*]] = add <2 x i64> [[TMP14]], [[TMP16]]
+; AVX-NEXT: [[TMP17:%.*]] = extractelement <2 x i64> [[TMP6]], i32 1
+; AVX-NEXT: store i64 [[TMP17]], ptr [[P]], align 1
+; AVX-NEXT: [[TMP4:%.*]] = add nuw nsw <2 x i64> [[TMP3]], [[TMP13]]
+; AVX-NEXT: [[TMP12:%.*]] = extractelement <2 x i64> [[TMP6]], i32 0
+; AVX-NEXT: store i64 [[TMP12]], ptr [[ARRAYIDX2_5]], align 1
; AVX-NEXT: [[TMP7:%.*]] = shl <2 x i64> [[TMP6]], splat (i64 2)
; AVX-NEXT: [[TMP8:%.*]] = and <2 x i64> [[TMP7]], splat (i64 20)
+; AVX-NEXT: [[TMP15:%.*]] = extractelement <2 x i64> [[TMP4]], i32 0
+; AVX-NEXT: store i64 [[TMP15]], ptr [[P]], align 1
; AVX-NEXT: [[TMP9:%.*]] = lshr <2 x i64> [[TMP4]], splat (i64 6)
; AVX-NEXT: [[TMP10:%.*]] = add nuw nsw <2 x i64> [[TMP8]], [[TMP9]]
; AVX-NEXT: store <2 x i64> [[TMP10]], ptr [[ARRAYIDX2_2]], align 1
; AVX-NEXT: ret void
;
entry:
- %0 = load i64, ptr undef, align 1
+ %0 = load i64, ptr %p, align 1
%and = shl i64 %0, 2
%shl = and i64 %and, 20
- %add = add i64 undef, undef
- store i64 %add, ptr undef, align 1
- %arrayidx2.1 = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 5
- %and.1 = shl i64 undef, 2
+ %add = add i64 %c, %c
+ store i64 %add, ptr %p, align 1
+ %arrayidx2.1 = getelementptr inbounds [0 x i64], ptr %p, i64 0, i64 5
+ %and.1 = shl i64 %c, 2
%shl.1 = and i64 %and.1, 20
- %shr.1 = lshr i64 undef, 6
+ %shr.1 = lshr i64 %c, 6
%add.1 = add nuw nsw i64 %shl, %shr.1
- %arrayidx2.2 = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 4
- %shr.2 = lshr i64 undef, 6
+ %arrayidx2.2 = getelementptr inbounds [0 x i64], ptr %p, i64 0, i64 4
+ %shr.2 = lshr i64 %c, 6
%add.2 = add nuw nsw i64 %shl.1, %shr.2
%and.4 = shl i64 %add, 2
%shl.4 = and i64 %and.4, 20
- %arrayidx2.5 = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 1
+ %arrayidx2.5 = getelementptr inbounds [0 x i64], ptr %p, i64 0, i64 1
store i64 %add.1, ptr %arrayidx2.5, align 1
%and.5 = shl nuw nsw i64 %add.1, 2
%shl.5 = and i64 %and.5, 20
%shr.5 = lshr i64 %add.1, 6
%add.5 = add nuw nsw i64 %shl.4, %shr.5
store i64 %add.5, ptr %arrayidx2.1, align 1
- store i64 %add.2, ptr undef, align 1
+ store i64 %add.2, ptr %p, align 1
%shr.6 = lshr i64 %add.2, 6
%add.6 = add nuw nsw i64 %shl.5, %shr.6
store i64 %add.6, ptr %arrayidx2.2, align 1
diff --git a/llvm/test/Transforms/SROA/alloca-address-space.ll b/llvm/test/Transforms/SROA/alloca-address-space.ll
index 31305c8..941178f 100644
--- a/llvm/test/Transforms/SROA/alloca-address-space.ll
+++ b/llvm/test/Transforms/SROA/alloca-address-space.ll
@@ -140,7 +140,7 @@ define void @addressspace_alloca_lifetime() {
; CHECK-NEXT: ret void
;
%alloca = alloca i8, align 8, addrspace(2)
- call void @llvm.lifetime.start(i64 2, ptr addrspace(2) %alloca)
+ call void @llvm.lifetime.start(ptr addrspace(2) %alloca)
ret void
}
diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index 3034aaa..15803f7 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -4,8 +4,8 @@
target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i32 @test0() {
; CHECK-LABEL: @test0(
@@ -18,21 +18,21 @@ entry:
%a1 = alloca i32
%a2 = alloca float
- call void @llvm.lifetime.start.p0(i64 4, ptr %a1)
+ call void @llvm.lifetime.start.p0(ptr %a1)
store i32 0, ptr %a1
%v1 = load i32, ptr %a1
- call void @llvm.lifetime.end.p0(i64 4, ptr %a1)
+ call void @llvm.lifetime.end.p0(ptr %a1)
- call void @llvm.lifetime.start.p0(i64 4, ptr %a2)
+ call void @llvm.lifetime.start.p0(ptr %a2)
store float 0.0, ptr %a2
%v2 = load float , ptr %a2
%v2.int = bitcast float %v2 to i32
%sum1 = add i32 %v1, %v2.int
- call void @llvm.lifetime.end.p0(i64 4, ptr %a2)
+ call void @llvm.lifetime.end.p0(ptr %a2)
ret i32 %sum1
}
@@ -1102,7 +1102,7 @@ define void @PR14059.1(ptr %d) {
;
entry:
%X.sroa.0.i = alloca double, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %X.sroa.0.i)
+ call void @llvm.lifetime.start.p0(ptr %X.sroa.0.i)
; Store to the low 32-bits...
store i32 0, ptr %X.sroa.0.i, align 8
@@ -1126,7 +1126,7 @@ entry:
%accum.real.i = load double, ptr %d, align 8
%add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
store double %add.r.i, ptr %d, align 8
- call void @llvm.lifetime.end.p0(i64 -1, ptr %X.sroa.0.i)
+ call void @llvm.lifetime.end.p0(ptr %X.sroa.0.i)
ret void
}
@@ -1812,7 +1812,7 @@ define void @PR25873(ptr %outData) {
;
entry:
%tmpData = alloca %struct.STest, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %tmpData)
+ call void @llvm.lifetime.start.p0(ptr %tmpData)
store float 1.230000e+02, ptr %tmpData, align 8
%y = getelementptr inbounds %struct.STest, ptr %tmpData, i64 0, i32 0, i32 1
store float 4.560000e+02, ptr %y, align 4
@@ -1820,7 +1820,7 @@ entry:
%0 = load i64, ptr %tmpData, align 8
store i64 %0, ptr %m_posB, align 8
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %outData, ptr align 4 %tmpData, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %tmpData)
+ call void @llvm.lifetime.end.p0(ptr %tmpData)
ret void
}
@@ -1833,8 +1833,8 @@ define void @PR27999() unnamed_addr {
;
entry-block:
%0 = alloca [2 x i64], align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %0)
- call void @llvm.lifetime.end.p0(i64 8, ptr %0)
+ call void @llvm.lifetime.start.p0(ptr %0)
+ call void @llvm.lifetime.end.p0(ptr %0)
ret void
}
@@ -1846,7 +1846,7 @@ define void @PR29139() {
bb1:
%e.7.sroa.6.i = alloca i32, align 1
%e.7.sroa.6.0.load81.i = load i32, ptr %e.7.sroa.6.i, align 1
- call void @llvm.lifetime.end.p0(i64 2, ptr %e.7.sroa.6.i)
+ call void @llvm.lifetime.end.p0(ptr %e.7.sroa.6.i)
ret void
}
@@ -1898,8 +1898,8 @@ entry:
ret void
}
-declare void @llvm.lifetime.start.isVoid.i64.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.isVoid.i64.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.isVoid.i64.p0(ptr nocapture)
+declare void @llvm.lifetime.end.isVoid.i64.p0(ptr nocapture)
@array = dso_local global [10 x float] zeroinitializer, align 4
define void @test29(i32 %num, i32 %tid) {
@@ -1931,7 +1931,7 @@ define void @test29(i32 %num, i32 %tid) {
;
entry:
%ra = alloca [10 x float], align 4
- call void @llvm.lifetime.start.isVoid.i64.p0(i64 40, ptr nonnull %ra)
+ call void @llvm.lifetime.start.isVoid.i64.p0(ptr nonnull %ra)
%cmp1 = icmp sgt i32 %num, 0
br i1 %cmp1, label %bb1, label %bb7
@@ -1963,7 +1963,7 @@ bb6:
br label %bb7
bb7:
- call void @llvm.lifetime.end.isVoid.i64.p0(i64 40, ptr nonnull %ra)
+ call void @llvm.lifetime.end.isVoid.i64.p0(ptr nonnull %ra)
ret void
}
diff --git a/llvm/test/Transforms/SROA/dead-inst.ll b/llvm/test/Transforms/SROA/dead-inst.ll
index 44ae821..bf47722 100644
--- a/llvm/test/Transforms/SROA/dead-inst.ll
+++ b/llvm/test/Transforms/SROA/dead-inst.ll
@@ -47,7 +47,7 @@ define void @H(ptr noalias nocapture readnone, [2 x i64], ptr %ptr, i32 signext
; CHECK-NEXT: [[TMP21:%.*]] = phi i64 [ -1, [[TMP12]] ], [ [[TMP20]], [[TMP17]] ]
; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 0 to ptr
; CHECK-NEXT: [[TMP23:%.*]] = sub nsw i64 [[TMP21]], [[TMP13]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[TMP3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP3]])
; CHECK-NEXT: [[TMP24:%.*]] = icmp ult i64 [[TMP23]], 2
; CHECK-NEXT: br i1 [[TMP24]], label [[G_EXIT:%.*]], label [[TMP25:%.*]]
; CHECK: 25:
@@ -60,7 +60,7 @@ define void @H(ptr noalias nocapture readnone, [2 x i64], ptr %ptr, i32 signext
; CHECK-NEXT: call void @D(ptr nonnull sret([[CLASS_B]]) [[TMP3]], ptr nonnull dereferenceable(32) [[PTR2:%.*]])
; CHECK-NEXT: br label [[G_EXIT]]
; CHECK: G.exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[TMP3]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP3]])
; CHECK-NEXT: br label [[FOO]]
; CHECK: foo:
; CHECK-NEXT: ret void
@@ -105,7 +105,7 @@ a.exit:
%22 = phi i64 [ -1, %12 ], [ %21, %18 ]
%23 = load ptr, ptr %13, align 8
%24 = sub nsw i64 %22, %14
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3)
+ call void @llvm.lifetime.start.p0(ptr nonnull %3)
%25 = icmp ult i64 %24, 2
br i1 %25, label %G.exit, label %26
@@ -122,7 +122,7 @@ a.exit:
br label %G.exit
G.exit:
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %3)
br label %foo
foo:
@@ -133,10 +133,10 @@ foo:
declare ptr @memchr(ptr, i32 signext, i64) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-MODIFY-CFG: {{.*}}
; CHECK-PRESERVE-CFG: {{.*}}
diff --git a/llvm/test/Transforms/SROA/ignore-droppable.ll b/llvm/test/Transforms/SROA/ignore-droppable.ll
index 9c95dc0..ba581bb 100644
--- a/llvm/test/Transforms/SROA/ignore-droppable.ll
+++ b/llvm/test/Transforms/SROA/ignore-droppable.ll
@@ -3,8 +3,8 @@
; RUN: opt < %s -passes='sroa<modify-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG
declare void @llvm.assume(i1)
-declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
-declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr)
+declare void @llvm.lifetime.start.p0(ptr nocapture %ptr)
+declare void @llvm.lifetime.end.p0(ptr nocapture %ptr)
define void @positive_assume_uses(ptr %arg) {
; CHECK-LABEL: @positive_assume_uses(
@@ -55,10 +55,10 @@ define void @positive_gep_assume_uses() {
;
%A = alloca {i8, i16}
%B = getelementptr {i8, i16}, ptr %A, i32 0, i32 0
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["align"(ptr %B, i64 8), "align"(ptr %B, i64 16)]
store {i8, i16} zeroinitializer, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %B), "align"(ptr %B, i64 2)]
ret void
}
@@ -71,10 +71,10 @@ define void @positive_mixed_assume_uses() {
; CHECK-NEXT: ret void
;
%A = alloca i8
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 8), "align"(ptr %A, i64 16)]
store i8 1, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
ret void
diff --git a/llvm/test/Transforms/SROA/lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/lifetime-intrinsic.ll
index b9e8873..668903d 100644
--- a/llvm/test/Transforms/SROA/lifetime-intrinsic.ll
+++ b/llvm/test/Transforms/SROA/lifetime-intrinsic.ll
@@ -18,14 +18,14 @@ define i16 @with_lifetime(i32 %a, i32 %b) #0 {
; CHECK-NEXT: ret i16 [[RET]]
;
%arr = alloca %i32x2, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr %arr)
+ call void @llvm.lifetime.start.p0(ptr %arr)
%p1 = getelementptr inbounds %i32x2, ptr %arr, i64 0, i32 0, i32 1
store i32 %a, ptr %arr, align 4
store i32 %b, ptr %p1, align 4
%s0 = load i16, ptr %arr, align 4
%s2 = load i16, ptr %p1, align 4
%ret = add i16 %s0, %s2
- call void @llvm.lifetime.end.p0(i64 8, ptr %arr)
+ call void @llvm.lifetime.end.p0(ptr %arr)
ret i16 %ret
}
@@ -50,9 +50,9 @@ define i16 @no_lifetime(i32 %a, i32 %b) #0 {
ret i16 %ret
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { alwaysinline nounwind }
attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll b/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll
index 13808b2..b86f41b 100644
--- a/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll
+++ b/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll
@@ -791,13 +791,13 @@ entry:
ret i32 0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
define i64 @do_schedule_instrs_for_dce_after_fixups() {
; CHECK-LABEL: @do_schedule_instrs_for_dce_after_fixups(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = alloca i64, align 2
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[C]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
; CHECK-NEXT: store i64 0, ptr [[C]], align 4
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.end:
@@ -807,7 +807,7 @@ define i64 @do_schedule_instrs_for_dce_after_fixups() {
;
entry:
%c = alloca i64, align 2
- call void @llvm.lifetime.start.p0(i64 1, ptr %c)
+ call void @llvm.lifetime.start.p0(ptr %c)
store i64 0, ptr %c
br label %if.end
diff --git a/llvm/test/Transforms/SROA/pr26972.ll b/llvm/test/Transforms/SROA/pr26972.ll
index a2872c7..526db3c 100644
--- a/llvm/test/Transforms/SROA/pr26972.ll
+++ b/llvm/test/Transforms/SROA/pr26972.ll
@@ -12,11 +12,11 @@ define void @fn1() {
; CHECK-NEXT: ret void
;
%a = alloca [1073741825 x i32], align 16
- call void @llvm.lifetime.end.p0(i64 4294967300, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-MODIFY-CFG: {{.*}}
; CHECK-PRESERVE-CFG: {{.*}}
diff --git a/llvm/test/Transforms/SROA/readonlynocapture.ll b/llvm/test/Transforms/SROA/readonlynocapture.ll
index 5752fadd..b6f7b1f 100644
--- a/llvm/test/Transforms/SROA/readonlynocapture.ll
+++ b/llvm/test/Transforms/SROA/readonlynocapture.ll
@@ -284,25 +284,25 @@ define void @incompletestruct(i1 %b, i1 %c) {
; CHECK-LABEL: @incompletestruct(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LII:%.*]] = alloca [[STRUCT_LOADIMMEDIATEINFO:%.*]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[LII]])
; CHECK-NEXT: [[BF_CLEAR4:%.*]] = and i32 undef, -262144
; CHECK-NEXT: [[BF_SET5:%.*]] = select i1 [[B:%.*]], i32 196608, i32 131072
; CHECK-NEXT: [[BF_SET12:%.*]] = or disjoint i32 [[BF_SET5]], [[BF_CLEAR4]]
; CHECK-NEXT: store i32 [[BF_SET12]], ptr [[LII]], align 4
; CHECK-NEXT: call void @callee(ptr [[LII]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[LII]])
; CHECK-NEXT: ret void
;
entry:
%LII = alloca %struct.LoadImmediateInfo, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.start.p0(ptr nonnull %LII)
%bf.load = load i32, ptr %LII, align 4
%bf.clear4 = and i32 %bf.load, -262144
%bf.set5 = select i1 %b, i32 196608, i32 131072
%bf.set12 = or disjoint i32 %bf.set5, %bf.clear4
store i32 %bf.set12, ptr %LII, align 4
call void @callee(ptr %LII)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.end.p0(ptr nonnull %LII)
ret void
}
@@ -312,13 +312,13 @@ define void @incompletestruct_bb(i1 %b, i1 %c) {
; CHECK-NEXT: [[LII:%.*]] = alloca [[STRUCT_LOADIMMEDIATEINFO:%.*]], align 4
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[LII]])
; CHECK-NEXT: [[BF_CLEAR4:%.*]] = and i32 undef, -262144
; CHECK-NEXT: [[BF_SET5:%.*]] = select i1 [[B:%.*]], i32 196608, i32 131072
; CHECK-NEXT: [[BF_SET12:%.*]] = or disjoint i32 [[BF_SET5]], [[BF_CLEAR4]]
; CHECK-NEXT: store i32 [[BF_SET12]], ptr [[LII]], align 4
; CHECK-NEXT: call void @callee(ptr [[LII]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[LII]])
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret void
@@ -328,14 +328,14 @@ entry:
br i1 %c, label %if.then, label %if.end
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.start.p0(ptr nonnull %LII)
%bf.load = load i32, ptr %LII, align 4
%bf.clear4 = and i32 %bf.load, -262144
%bf.set5 = select i1 %b, i32 196608, i32 131072
%bf.set12 = or disjoint i32 %bf.set5, %bf.clear4
store i32 %bf.set12, ptr %LII, align 4
call void @callee(ptr %LII)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.end.p0(ptr nonnull %LII)
br label %if.end
if.end: ; preds = %if.then, %entry
@@ -459,35 +459,35 @@ define i32 @provenance_only_capture() {
define i32 @simple_with_lifetimes() {
; CHECK-LABEL: @simple_with_lifetimes(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 0, ptr [[A]], align 4
; CHECK-NEXT: call void @callee(ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 0
;
%a = alloca i32
- call void @llvm.lifetime.start(i64 4, ptr %a)
+ call void @llvm.lifetime.start(ptr %a)
store i32 0, ptr %a
call void @callee(ptr %a)
%l1 = load i32, ptr %a
- call void @llvm.lifetime.end(i64 4, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
ret i32 %l1
}
define i32 @twoalloc_with_lifetimes() {
; CHECK-LABEL: @twoalloc_with_lifetimes(
; CHECK-NEXT: [[A:%.*]] = alloca { i32, i32 }, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 0, ptr [[A]], align 4
; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[A]], i32 1
; CHECK-NEXT: store i32 1, ptr [[B]], align 4
; CHECK-NEXT: call void @callee(ptr [[A]])
; CHECK-NEXT: [[R:%.*]] = add i32 0, 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 [[R]]
;
%a = alloca {i32, i32}
- call void @llvm.lifetime.start(i64 8, ptr %a)
+ call void @llvm.lifetime.start(ptr %a)
store i32 0, ptr %a
%b = getelementptr i32, ptr %a, i32 1
store i32 1, ptr %b
@@ -495,7 +495,7 @@ define i32 @twoalloc_with_lifetimes() {
%l1 = load i32, ptr %a
%l2 = load i32, ptr %b
%r = add i32 %l1, %l2
- call void @llvm.lifetime.end(i64 8, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
ret i32 %r
}
diff --git a/llvm/test/Transforms/SROA/select-load.ll b/llvm/test/Transforms/SROA/select-load.ll
index 9de7650..359ecaa 100644
--- a/llvm/test/Transforms/SROA/select-load.ll
+++ b/llvm/test/Transforms/SROA/select-load.ll
@@ -118,7 +118,7 @@ define i32 @interfering_lifetime(ptr %data, i64 %indvars.iv) {
%min = alloca i32, align 4
%arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv
%i1 = load i32, ptr %arrayidx, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %min)
+ call void @llvm.lifetime.start.p0(ptr %min)
store i32 0, ptr %min, align 4
%cmp.i.i = icmp slt i32 %i1, 0
%__b.__a.i.i = select i1 %cmp.i.i, ptr %min, ptr %arrayidx
@@ -132,9 +132,9 @@ define i32 @clamp_load_to_constant_range(ptr %data, i64 %indvars.iv) {
; CHECK-PRESERVE-CFG-NEXT: [[MIN:%.*]] = alloca i32, align 4
; CHECK-PRESERVE-CFG-NEXT: [[MAX:%.*]] = alloca i32, align 4
; CHECK-PRESERVE-CFG-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 [[INDVARS_IV:%.*]]
-; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[MIN]])
+; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(ptr [[MIN]])
; CHECK-PRESERVE-CFG-NEXT: store i32 0, ptr [[MIN]], align 4
-; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[MAX]])
+; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(ptr [[MAX]])
; CHECK-PRESERVE-CFG-NEXT: store i32 4095, ptr [[MAX]], align 4
; CHECK-PRESERVE-CFG-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-PRESERVE-CFG-NEXT: [[CMP_I_I:%.*]] = icmp slt i32 [[I1]], 0
@@ -167,9 +167,9 @@ define i32 @clamp_load_to_constant_range(ptr %data, i64 %indvars.iv) {
%min = alloca i32, align 4
%max = alloca i32, align 4
%arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv
- call void @llvm.lifetime.start.p0(i64 4, ptr %min)
+ call void @llvm.lifetime.start.p0(ptr %min)
store i32 0, ptr %min, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %max)
+ call void @llvm.lifetime.start.p0(ptr %max)
store i32 4095, ptr %max, align 4
%i1 = load i32, ptr %arrayidx, align 4
%cmp.i.i = icmp slt i32 %i1, 0
@@ -482,6 +482,6 @@ define void @load_of_select_with_noundef_nonnull(ptr %buffer, i1 %b) {
; Ensure that the branch metadata is reversed to match the reversals above.
-declare void @llvm.lifetime.start.p0(i64, ptr )
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr )
+declare void @llvm.lifetime.end.p0(ptr)
declare i32 @llvm.smax.i32(i32, i32)
diff --git a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
index 561315b..60228a4 100644
--- a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
+++ b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
@@ -5,10 +5,10 @@
target datalayout = "e-p:64:32-i64:32-v32:32-n32-S64"
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
define void @wombat(<4 x float> %arg1) {
; CHECK-LABEL: @wombat(
@@ -19,10 +19,10 @@ define void @wombat(<4 x float> %arg1) {
;
bb:
%tmp = alloca <4 x float>, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.start.p0(ptr %tmp)
store <4 x float> %arg1, ptr %tmp, align 16
%tmp18 = load <3 x float>, ptr %tmp
- call void @llvm.lifetime.end.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
call void @wombat3(<3 x float> %tmp18)
ret void
}
diff --git a/llvm/test/Transforms/SROA/vector-promotion.ll b/llvm/test/Transforms/SROA/vector-promotion.ll
index ffa758e..682e8e3 100644
--- a/llvm/test/Transforms/SROA/vector-promotion.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion.ll
@@ -1534,7 +1534,7 @@ bb.5:
}
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(ptr)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-MODIFY-CFG: {{.*}}
; CHECK-PRESERVE-CFG: {{.*}}
diff --git a/llvm/test/Transforms/SafeStack/ARM/debug.ll b/llvm/test/Transforms/SafeStack/ARM/debug.ll
index a8c534c..207475a 100644
--- a/llvm/test/Transforms/SafeStack/ARM/debug.ll
+++ b/llvm/test/Transforms/SafeStack/ARM/debug.ll
@@ -29,15 +29,15 @@ entry:
define void @f() local_unnamed_addr #1 !dbg !27 {
entry:
%c = alloca [16 x i8], align 1
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %c) #5, !dbg !35
+ call void @llvm.lifetime.start.p0(ptr nonnull %c) #5, !dbg !35
call void @llvm.dbg.declare(metadata ptr %c, metadata !31, metadata !DIExpression()), !dbg !36
call void @Capture(ptr nonnull %c) #5, !dbg !37
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %c) #5, !dbg !38
+ call void @llvm.lifetime.end.p0(ptr nonnull %c) #5, !dbg !38
ret void, !dbg !38
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: nounwind readnone speculatable
declare void @llvm.dbg.declare(metadata, metadata, metadata) #3
@@ -45,7 +45,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #3
declare void @Capture(ptr) local_unnamed_addr #4
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { norecurse nounwind readonly safestack "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind safestack "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/SafeStack/X86/call.ll b/llvm/test/Transforms/SafeStack/X86/call.ll
index 9592b33..f14e676 100644
--- a/llvm/test/Transforms/SafeStack/X86/call.ll
+++ b/llvm/test/Transforms/SafeStack/X86/call.ll
@@ -152,8 +152,8 @@ define void @call_lifetime(ptr %p) {
; CHECK: ret void
entry:
%q = alloca [100 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 100, ptr %q)
- call void @llvm.lifetime.end.p0(i64 100, ptr %q)
+ call void @llvm.lifetime.start.p0(ptr %q)
+ call void @llvm.lifetime.end.p0(ptr %q)
ret void
}
@@ -167,5 +167,5 @@ declare void @readnone0(ptr nocapture readnone, ptr nocapture)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind argmemonly
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
diff --git a/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll b/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll
index 8ff369e..5192e47 100644
--- a/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll
+++ b/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll
@@ -14,19 +14,19 @@ entry:
%x = alloca i64, align 8
%y = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -16
call void @capture64(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -16
call void @capture64(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture64(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/coloring.ll b/llvm/test/Transforms/SafeStack/X86/coloring.ll
index 22e1487..288ae00 100644
--- a/llvm/test/Transforms/SafeStack/X86/coloring.ll
+++ b/llvm/test/Transforms/SafeStack/X86/coloring.ll
@@ -11,30 +11,30 @@ entry:
%x = alloca i32, align 4
%x1 = alloca i32, align 4
%x2 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: %[[A1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture(ptr nonnull %[[A1]])
call void @capture(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
- call void @llvm.lifetime.start.p0(i64 4, ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x1)
; CHECK: %[[B1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture(ptr nonnull %[[B1]])
call void @capture(ptr nonnull %x1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x1)
- call void @llvm.lifetime.start.p0(i64 4, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x2)
; CHECK: %[[C1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture(ptr nonnull %[[C1]])
call void @capture(ptr nonnull %x2)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x2)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/coloring2.ll b/llvm/test/Transforms/SafeStack/X86/coloring2.ll
index ae5f375..a4157cb 100644
--- a/llvm/test/Transforms/SafeStack/X86/coloring2.ll
+++ b/llvm/test/Transforms/SafeStack/X86/coloring2.ll
@@ -14,21 +14,21 @@ entry:
%y = alloca i32, align 4
%z = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture32(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
ret void
}
@@ -42,11 +42,11 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture32(ptr %y)
@@ -65,21 +65,21 @@ entry:
%y = alloca i32, align 4
%z = alloca i64, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
ret void
}
@@ -95,9 +95,9 @@ entry:
%z = alloca i64, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -16
call void @capture32(ptr %x)
@@ -108,9 +108,9 @@ entry:
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %z)
ret void
}
@@ -147,8 +147,8 @@ entry:
%z = alloca i64, align 8
%z1 = alloca i64, align 8
%z2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x2)
+ call void @llvm.lifetime.start.p0(ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %x1)
@@ -158,62 +158,62 @@ entry:
br i1 %a, label %if.then, label %if.else4
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -24
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %y)
br i1 %b, label %if.then3, label %if.else
if.then3: ; preds = %if.then
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.start.p0(ptr %y1)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %y1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.end.p0(ptr %y1)
br label %if.end
if.else: ; preds = %if.then
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.start.p0(ptr %y2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %y2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.end.p0(ptr %y2)
br label %if.end
if.end: ; preds = %if.else, %if.then3
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
br label %if.end9
if.else4: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -24
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %z)
br i1 %b, label %if.then6, label %if.else7
if.then6: ; preds = %if.else4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.start.p0(ptr %z1)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %z1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.end.p0(ptr %z1)
br label %if.end8
if.else7: ; preds = %if.else4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.start.p0(ptr %z2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %z2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.end.p0(ptr %z2)
br label %if.end8
if.end8: ; preds = %if.else7, %if.then6
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
br label %if.end9
if.end9: ; preds = %if.end8, %if.end
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x1)
ret void
}
@@ -225,21 +225,21 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
; CHECK: call void @capture32(
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
bb3:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
@@ -250,18 +250,18 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
ret void
bb3:
ret void
@@ -275,14 +275,14 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %y)
@@ -299,14 +299,14 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %y)
@@ -326,10 +326,10 @@ entry:
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
; CHECK: call void @capture32(
call void @capture32(ptr %y)
@@ -347,26 +347,26 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i)
+ call void @llvm.lifetime.start.p0(ptr %B.i)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -400
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %A.i)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -800
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.end.p0(ptr %A.i)
+ call void @llvm.lifetime.end.p0(ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i1)
+ call void @llvm.lifetime.start.p0(ptr %B.i2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -400
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %A.i1)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -800
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %B.i2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.end.p0(ptr %A.i1)
+ call void @llvm.lifetime.end.p0(ptr %B.i2)
ret void
}
@@ -378,11 +378,11 @@ entry:
%buf1 = alloca i8, i32 100000, align 16
%buf2 = alloca i8, i32 100000, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+ call void @llvm.lifetime.end.p0(ptr %buf1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf2)
call void @capture8(ptr %buf1)
call void @capture8(ptr %buf2)
ret void
@@ -404,12 +404,12 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) nounwind
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) nounwind
+ call void @llvm.lifetime.start.p0(ptr %A.i) nounwind
+ call void @llvm.lifetime.start.p0(ptr %B.i) nounwind
call void @capture100x32(ptr %A.i)
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) nounwind
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) nounwind
+ call void @llvm.lifetime.end.p0(ptr %A.i) nounwind
+ call void @llvm.lifetime.end.p0(ptr %B.i) nounwind
br label %block2
block2:
@@ -429,13 +429,13 @@ entry:
%a.i = alloca [4 x %struct.Klass], align 16
%b.i = alloca [4 x %struct.Klass], align 16
; I am used outside the lifetime zone below:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.start.p0(ptr %a.i)
+ call void @llvm.lifetime.start.p0(ptr %b.i)
call void @capture8(ptr %a.i)
call void @capture8(ptr %b.i)
%z3 = load i32, ptr %a.i, align 16
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.end.p0(ptr %a.i)
+ call void @llvm.lifetime.end.p0(ptr %b.i)
ret i32 %z3
}
@@ -445,12 +445,12 @@ entry:
; CHECK: %[[USP:.*]] = load ptr, ptr @__safestack_unsafe_stack_ptr
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.start.p0(ptr %x) nounwind
br label %l2
l2:
call void @capture8(ptr %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.end.p0(ptr %x) nounwind
br label %l2
}
@@ -463,25 +463,25 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.start.p0(ptr %x) nounwind
br label %l2
l2:
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
- call void @llvm.lifetime.start.p0(i64 4, ptr %y) nounwind
+ call void @llvm.lifetime.start.p0(ptr %y) nounwind
call void @capture8(ptr %y)
- call void @llvm.lifetime.end.p0(i64 4, ptr %y) nounwind
+ call void @llvm.lifetime.end.p0(ptr %y) nounwind
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.start.p0(ptr %x) nounwind
call void @capture8(ptr %x)
br label %l2
}
attributes #0 = { safestack }
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture8(ptr)
declare void @capture32(ptr)
declare void @capture64(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll b/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll
index 7a1fdc0..e60522f 100644
--- a/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll
+++ b/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll
@@ -43,12 +43,12 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @capture(ptr) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, metadata, metadata) #3
diff --git a/llvm/test/Transforms/SafeStack/X86/layout-frag.ll b/llvm/test/Transforms/SafeStack/X86/layout-frag.ll
index b858fd6..8a5362b 100644
--- a/llvm/test/Transforms/SafeStack/X86/layout-frag.ll
+++ b/llvm/test/Transforms/SafeStack/X86/layout-frag.ll
@@ -13,16 +13,16 @@ entry:
%x2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %x0)
+ call void @llvm.lifetime.start.p0(ptr %x0)
call void @capture64(ptr %x0)
- call void @llvm.lifetime.end.p0(i64 8, ptr %x0)
+ call void @llvm.lifetime.end.p0(ptr %x0)
- call void @llvm.lifetime.start.p0(i64 1, ptr %x1)
- call void @llvm.lifetime.start.p0(i64 8, ptr %x2)
+ call void @llvm.lifetime.start.p0(ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x2)
call void @capture8(ptr %x1)
call void @capture64(ptr %x2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %x1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x2)
; Test that i64 allocas share space.
; CHECK: getelementptr i8, ptr %unsafe_stack_ptr, i32 -8
@@ -32,7 +32,7 @@ entry:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture8(ptr)
declare void @capture64(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll b/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll
index 76c638e..c01ca2f 100644
--- a/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll
+++ b/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll
@@ -9,9 +9,9 @@ define dso_local void @_ZN1s1tE1F(ptr byval(%class.F) %g) local_unnamed_addr saf
entry:
%ref.tmp.i.i.i = alloca i64, align 1
call void undef(ptr %g)
- call void @llvm.lifetime.start.p0(i64 3, ptr %ref.tmp.i.i.i)
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp.i.i.i)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll b/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll
index ba66548..3ae0aea 100644
--- a/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll
+++ b/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll
@@ -61,10 +61,10 @@ entry:
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
; Function Attrs: nounwind uwtable
define dso_local i32 @main() #0 !dbg !28 {
diff --git a/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll b/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll
index 5fe80db..c7617c1 100644
--- a/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll
+++ b/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll
@@ -43,7 +43,7 @@ entry:
%a = alloca i32, align 4
store ptr %p, ptr %p.addr, align 8, !tbaa !15
call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !33, metadata !DIExpression()), !dbg !35
- call void @llvm.lifetime.start.p0(i64 4, ptr %a) #4, !dbg !36
+ call void @llvm.lifetime.start.p0(ptr %a) #4, !dbg !36
call void @llvm.dbg.declare(metadata ptr %a, metadata !34, metadata !DIExpression()), !dbg !37
%0 = load ptr, ptr %p.addr, align 8, !dbg !38, !tbaa !15
%arrayidx = getelementptr inbounds i32, ptr %0, i64 3, !dbg !38
@@ -58,7 +58,7 @@ entry:
store i32 %call, ptr %a, align 4, !dbg !43, !tbaa !25
%5 = load i32, ptr %a, align 4, !dbg !44, !tbaa !25
%add2 = add nsw i32 %5, 1, !dbg !45
- call void @llvm.lifetime.end.p0(i64 4, ptr %a) #4, !dbg !46
+ call void @llvm.lifetime.end.p0(ptr %a) #4, !dbg !46
ret i32 %add2, !dbg !47
}
@@ -86,10 +86,10 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare void @baz(...) #3
diff --git a/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll b/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll
index b8e1064..0e62921 100644
--- a/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll
+++ b/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll
@@ -53,7 +53,7 @@ entry:
%a = alloca i32, align 4
store ptr %p, ptr %p.addr, align 8, !tbaa !15
call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !33, metadata !DIExpression()), !dbg !35
- call void @llvm.lifetime.start.p0(i64 4, ptr %a) #4, !dbg !36
+ call void @llvm.lifetime.start.p0(ptr %a) #4, !dbg !36
call void @llvm.dbg.declare(metadata ptr %a, metadata !34, metadata !DIExpression()), !dbg !37
%0 = load ptr, ptr %p.addr, align 8, !dbg !38, !tbaa !15
%arrayidx = getelementptr inbounds i32, ptr %0, i64 3, !dbg !38
@@ -68,7 +68,7 @@ entry:
store i32 %call, ptr %a, align 4, !dbg !43, !tbaa !25
%5 = load i32, ptr %a, align 4, !dbg !44, !tbaa !25
%add2 = add nsw i32 %5, 1, !dbg !45
- call void @llvm.lifetime.end.p0(i64 4, ptr %a) #4, !dbg !46
+ call void @llvm.lifetime.end.p0(ptr %a) #4, !dbg !46
ret i32 %add2, !dbg !47
}
@@ -96,10 +96,10 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare void @baz(...) #3
diff --git a/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll
index 3ca94a4..2b091a1 100644
--- a/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll
+++ b/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll
@@ -151,10 +151,10 @@ for.end: ; preds = %cleanup, %if.then
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
attributes #0 = { noinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
attributes #1 = { alwaysinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
diff --git a/llvm/test/Transforms/SampleProfile/profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/profile-mismatch.ll
index 42bc1b8..0a1b896 100644
--- a/llvm/test/Transforms/SampleProfile/profile-mismatch.ll
+++ b/llvm/test/Transforms/SampleProfile/profile-mismatch.ll
@@ -43,13 +43,13 @@ define dso_local i32 @foo(i32 noundef %x) #0 !dbg !12 {
entry:
%y = alloca i32, align 4
call void @llvm.dbg.value(metadata i32 %x, metadata !16, metadata !DIExpression()), !dbg !18
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %y), !dbg !19
+ call void @llvm.lifetime.start.p0(ptr nonnull %y), !dbg !19
call void @llvm.dbg.declare(metadata ptr %y, metadata !17, metadata !DIExpression()), !dbg !20
%add = add nsw i32 %x, 1, !dbg !21
store volatile i32 %add, ptr %y, align 4, !dbg !20, !tbaa !22
%y.0. = load volatile i32, ptr %y, align 4, !dbg !26, !tbaa !22
%add1 = add nsw i32 %y.0., 1, !dbg !27
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %y), !dbg !28
+ call void @llvm.lifetime.end.p0(ptr nonnull %y), !dbg !28
ret i32 %add1, !dbg !29
}
@@ -57,10 +57,10 @@ entry:
declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: noinline nounwind uwtable
define dso_local i32 @bar(i32 noundef %x) #3 !dbg !30 {
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll
index 6d4429b..26ae198 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll
@@ -31,8 +31,8 @@ bb3:
}
declare void @_Z3barv() #1
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll
index b662efa..383289e 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll
@@ -60,12 +60,12 @@ bb:
%i3 = alloca i32, align 4
store i32 0, ptr %i, align 4
call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 1, i32 0, i64 -1), !dbg !62
- call void @llvm.lifetime.start.p0(i64 8, ptr %i1), !dbg !62
+ call void @llvm.lifetime.start.p0(ptr %i1), !dbg !62
call void @llvm.dbg.declare(metadata ptr %i1, metadata !57, metadata !DIExpression()), !dbg !63
- call void @llvm.lifetime.start.p0(i64 4, ptr %i2), !dbg !64
+ call void @llvm.lifetime.start.p0(ptr %i2), !dbg !64
call void @llvm.dbg.declare(metadata ptr %i2, metadata !59, metadata !DIExpression()), !dbg !65
store i32 0, ptr %i2, align 4, !dbg !65, !tbaa !19
- call void @llvm.lifetime.start.p0(i64 4, ptr %i3), !dbg !66
+ call void @llvm.lifetime.start.p0(ptr %i3), !dbg !66
call void @llvm.dbg.declare(metadata ptr %i3, metadata !60, metadata !DIExpression()), !dbg !67
store i32 0, ptr %i3, align 4, !dbg !67, !tbaa !19
br label %bb7, !dbg !66
@@ -78,7 +78,7 @@ bb7: ; preds = %bb25, %bb
bb10: ; preds = %bb7
call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 3, i32 0, i64 -1), !dbg !72
- call void @llvm.lifetime.end.p0(i64 4, ptr %i3), !dbg !72
+ call void @llvm.lifetime.end.p0(ptr %i3), !dbg !72
br label %bb28
bb12: ; preds = %bb7
@@ -119,16 +119,16 @@ bb28: ; preds = %bb10
call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 9, i32 0, i64 -1), !dbg !92
%i29 = load i32, ptr %i2, align 4, !dbg !92, !tbaa !19
%i30 = call i32 (ptr, ...) @printf(ptr @.str, i32 %i29), !dbg !93
- call void @llvm.lifetime.end.p0(i64 4, ptr %i2), !dbg !95
- call void @llvm.lifetime.end.p0(i64 8, ptr %i1), !dbg !95
+ call void @llvm.lifetime.end.p0(ptr %i2), !dbg !95
+ call void @llvm.lifetime.end.p0(ptr %i1), !dbg !95
ret i32 0, !dbg !96
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare dso_local i32 @printf(ptr, ...)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll
index 22317e6..e1d717c 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll
@@ -70,14 +70,14 @@ define dso_local i32 @foo(i32 noundef %x) #0 !dbg !16 {
entry:
%y = alloca i32, align 4
call void @llvm.dbg.value(metadata i32 %x, metadata !20, metadata !DIExpression()), !dbg !22
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %y), !dbg !23
+ call void @llvm.lifetime.start.p0(ptr nonnull %y), !dbg !23
call void @llvm.dbg.declare(metadata ptr %y, metadata !21, metadata !DIExpression()), !dbg !24
call void @llvm.pseudoprobe(i64 6699318081062747564, i64 1, i32 0, i64 -1), !dbg !25
%add = add nsw i32 %x, 1, !dbg !26
store volatile i32 %add, ptr %y, align 4, !dbg !24, !tbaa !27
%y.0. = load volatile i32, ptr %y, align 4, !dbg !31, !tbaa !27
%add1 = add nsw i32 %y.0., 1, !dbg !32
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %y), !dbg !33
+ call void @llvm.lifetime.end.p0(ptr nonnull %y), !dbg !33
ret i32 %add1, !dbg !34
}
@@ -85,10 +85,10 @@ entry:
declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: noinline nounwind uwtable
define dso_local i32 @bar(i32 noundef %x) #3 !dbg !35 {
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll
index cdd365b..c0976de 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll
@@ -119,10 +119,10 @@ if.end: ; preds = %if.else, %if.then
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #4
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
index 20be0c2..0c38d9c 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
@@ -217,10 +217,10 @@ for.end: ; preds = %cleanup, %if.then
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata) #1
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll
index 4e435f4..dbf3dda 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll
@@ -57,10 +57,10 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #2
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #2
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll
index d9db804..e246d26 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll
@@ -70,13 +70,13 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll
index 6bf09ce..d1c5a9d 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll
@@ -175,10 +175,10 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #4
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll
index c839364f..2ed1872 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll
@@ -85,10 +85,10 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3
diff --git a/llvm/test/Transforms/SampleProfile/remarks.ll b/llvm/test/Transforms/SampleProfile/remarks.ll
index 9c0143a..3cb91b7 100644
--- a/llvm/test/Transforms/SampleProfile/remarks.ll
+++ b/llvm/test/Transforms/SampleProfile/remarks.ll
@@ -121,10 +121,10 @@ define i64 @_Z3foov() #0 !dbg !4 {
entry:
%sum = alloca i64, align 8
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr %sum) #4, !dbg !19
+ call void @llvm.lifetime.start.p0(ptr %sum) #4, !dbg !19
call void @llvm.dbg.declare(metadata ptr %sum, metadata !9, metadata !20), !dbg !21
store i64 0, ptr %sum, align 8, !dbg !21, !tbaa !22
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #4, !dbg !26
+ call void @llvm.lifetime.start.p0(ptr %i) #4, !dbg !26
call void @llvm.dbg.declare(metadata ptr %i, metadata !10, metadata !20), !dbg !27
store i32 0, ptr %i, align 4, !dbg !27, !tbaa !28
br label %for.cond, !dbg !26
@@ -135,7 +135,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !35
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #4, !dbg !36
+ call void @llvm.lifetime.end.p0(ptr %i) #4, !dbg !36
br label %for.end
for.body: ; preds = %for.cond
@@ -173,12 +173,12 @@ for.inc: ; preds = %if.end
for.end: ; preds = %for.cond.cleanup
%7 = load i64, ptr %sum, align 8, !dbg !53, !tbaa !22
- call void @llvm.lifetime.end.p0(i64 8, ptr %sum) #4, !dbg !54
+ call void @llvm.lifetime.end.p0(ptr %sum) #4, !dbg !54
ret i64 %7, !dbg !55
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -189,7 +189,7 @@ define i32 @rand() #3 !dbg !59 {
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @main() #0 !dbg !13 {
diff --git a/llvm/test/Transforms/Scalarizer/intrinsics.ll b/llvm/test/Transforms/Scalarizer/intrinsics.ll
index cee44ef..070c765 100644
--- a/llvm/test/Transforms/Scalarizer/intrinsics.ll
+++ b/llvm/test/Transforms/Scalarizer/intrinsics.ll
@@ -8,6 +8,7 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>)
+declare <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>)
; Ternary fp
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
@@ -32,6 +33,8 @@ declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>)
; Unary fp operand, int return type
declare <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float>)
declare <2 x i32> @llvm.llrint.v2i32.v2f32(<2 x float>)
+declare <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float>)
+declare <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float>)
; Bool return type, overloaded on fp operand type
declare <2 x i1> @llvm.is.fpclass(<2 x float>, i32)
@@ -159,6 +162,22 @@ define <2 x float> @scalarize_powi_v2f32(<2 x float> %x, i32 %y) #0 {
ret <2 x float> %powi
}
+define <2 x float> @scalarize_ldexp_v2f32(<2 x float> %x, <2 x i32> %y) #0 {
+; CHECK-LABEL: @scalarize_ldexp_v2f32(
+; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
+; CHECK-NEXT: [[Y:%.*]] = extractelement <2 x i32> [[Y1:%.*]], i64 0
+; CHECK-NEXT: [[POWI_I0:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I0]], i32 [[Y]])
+; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
+; CHECK-NEXT: [[Y_I1:%.*]] = extractelement <2 x i32> [[Y1]], i64 1
+; CHECK-NEXT: [[POWI_I1:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I1]], i32 [[Y_I1]])
+; CHECK-NEXT: [[POWI_UPTO0:%.*]] = insertelement <2 x float> poison, float [[POWI_I0]], i64 0
+; CHECK-NEXT: [[POWI:%.*]] = insertelement <2 x float> [[POWI_UPTO0]], float [[POWI_I1]], i64 1
+; CHECK-NEXT: ret <2 x float> [[POWI]]
+;
+ %powi = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> %y)
+ ret <2 x float> %powi
+}
+
define <2 x i32> @scalarize_smul_fix_sat_v2i32(<2 x i32> %x) #0 {
; CHECK-LABEL: @scalarize_smul_fix_sat_v2i32(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
@@ -243,6 +262,34 @@ define <2 x i32> @scalarize_llrint(<2 x float> %x) #0 {
ret <2 x i32> %rnd
}
+define <2 x i32> @scalarize_lround(<2 x float> %x) #0 {
+; CHECK-LABEL: @scalarize_lround(
+; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
+; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I0]])
+; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
+; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I1]])
+; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
+; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
+; CHECK-NEXT: ret <2 x i32> [[RND]]
+;
+ %rnd = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> %x)
+ ret <2 x i32> %rnd
+}
+
+define <2 x i32> @scalarize_llround(<2 x float> %x) #0 {
+; CHECK-LABEL: @scalarize_llround(
+; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
+; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I0]])
+; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
+; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I1]])
+; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
+; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
+; CHECK-NEXT: ret <2 x i32> [[RND]]
+;
+ %rnd = call <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float> %x)
+ ret <2 x i32> %rnd
+}
+
define <2 x i1> @scalarize_is_fpclass(<2 x float> %x) #0 {
; CHECK-LABEL: @scalarize_is_fpclass(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll b/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll
index 58ca8df..99e908e 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll
@@ -56,7 +56,7 @@ while.end:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @_ZNK1F5beginEv(ptr)
@@ -68,7 +68,7 @@ declare noalias nonnull ptr @_Znwm(i64)
declare void @_ZN1B6appendEv(ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare i1 @llvm.type.test(ptr, metadata)
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll b/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll
index 162a3ab..1499eecb 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll
@@ -437,7 +437,7 @@ define i32 @f9() personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @f9(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[S:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[S]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]])
; CHECK-NEXT: invoke void @"\01??1S2@@QEAA@XZ"(ptr [[S]])
; CHECK-NEXT: to label [[TRY_CONT:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: catch.dispatch:
@@ -450,13 +450,13 @@ define i32 @f9() personality ptr @__CxxFrameHandler3 {
;
entry:
%s = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
invoke void @"\01??1S2@@QEAA@XZ"(ptr %s)
to label %try.cont unwind label %ehcleanup
ehcleanup:
%cleanup.pad = cleanuppad within none []
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
cleanupret from %cleanup.pad unwind label %catch.dispatch
catch.dispatch:
@@ -534,7 +534,7 @@ invoke.cont2: ; preds = %invoke.cont
ehcleanup: ; preds = %invoke.cont, %entry
%0 = cleanuppad within none []
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
cleanupret from %0 unwind label %catch.dispatch
catch.dispatch: ; preds = %ehcleanup, %invoke.cont
@@ -556,8 +556,8 @@ declare void @use_x(i32 %x)
declare i32 @__CxxFrameHandler3(...)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
;.
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll b/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll
index a937d9c..ce58e936 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll
@@ -79,10 +79,10 @@ for.body: ; preds = %for.cond
declare i32 @c(...) #0
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { "use-soft-float"="false" }
attributes #1 = { "target-cpu"="x86-64" }
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
index 62351d7..6129e3b 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
@@ -1338,10 +1338,10 @@ define i32 @test_not_sink_lifetime_marker(i1 zeroext %flag, i32 %x) {
; CHECK-NEXT: [[Z:%.*]] = alloca i32, align 4
; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Z]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Z]])
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret i32 1
@@ -1352,11 +1352,11 @@ entry:
br i1 %flag, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.end.p0(i64 4, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
br label %if.end
if.else:
- call void @llvm.lifetime.end.p0(i64 4, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
br label %if.end
if.end:
@@ -1468,8 +1468,8 @@ declare void @direct_callee()
declare void @direct_callee2()
declare void @direct_callee3()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @creating_too_many_phis(i1 %cond, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
; CHECK-LABEL: @creating_too_many_phis(
diff --git a/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll b/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll
index a430399..307501d 100644
--- a/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll
+++ b/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll
@@ -39,8 +39,8 @@ declare void @f0()
declare void @f1()
declare void @f2()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @_Z4loopi(i1 %cmp) {
; HOIST-LABEL: @_Z4loopi(
diff --git a/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll b/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll
index ea14b17..40e9a49 100644
--- a/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll
+++ b/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare void @escape(ptr)
@@ -15,16 +15,16 @@ define void @caller(i1 %c) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @caller(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I0]])
; CHECK-NEXT: call void @escape(ptr [[I0]])
; CHECK-NEXT: [[I2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I2]])
; CHECK-NEXT: call void @escape(ptr [[I2]])
; CHECK-NEXT: [[I4:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I4]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I4]])
; CHECK-NEXT: call void @escape(ptr [[I4]])
; CHECK-NEXT: [[I6:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I6]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I6]])
; CHECK-NEXT: call void @escape(ptr [[I6]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[V0:%.*]], label [[V1:%.*]]
; CHECK: v0:
@@ -36,19 +36,19 @@ define void @caller(i1 %c) personality ptr @__gxx_personality_v0 {
;
entry:
%i0 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i0)
call void @escape(ptr %i0)
%i2 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i2)
call void @escape(ptr %i2)
%i4 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i4)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i4)
call void @escape(ptr %i4)
%i6 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i6)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i6)
call void @escape(ptr %i6)
br i1 %c, label %v0, label %v1
@@ -66,14 +66,14 @@ invoke.cont:
lpad.v0:
%i8 = landingpad { ptr, i32 } cleanup
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i0)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i4)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i4)
br label %end
lpad.v1:
%i9 = landingpad { ptr, i32 } cleanup
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i2)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i6)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i6)
br label %end
end:
diff --git a/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll b/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll
index 0174eb1..88395a0 100644
--- a/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll
+++ b/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll
@@ -5,32 +5,32 @@ define void @foo() personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[A]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) #[[ATTR1:[0-9]+]]
; CHECK-NEXT: call void @bar()
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[A]]) #[[ATTR1]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) #[[ATTR1]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %a) nounwind
invoke void @bar() to label %invoke.cont unwind label %lpad
invoke.cont:
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %a) nounwind
ret void
lpad:
%b = landingpad { ptr, i32 }
cleanup
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %a) nounwind
resume { ptr, i32 } %b
}
declare void @bar()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @__gxx_personality_v0(...)
;.
diff --git a/llvm/test/Transforms/SimplifyCFG/lifetime.ll b/llvm/test/Transforms/SimplifyCFG/lifetime.ll
index d6bba2c..fac0b61 100644
--- a/llvm/test/Transforms/SimplifyCFG/lifetime.ll
+++ b/llvm/test/Transforms/SimplifyCFG/lifetime.ll
@@ -10,11 +10,11 @@
define void @foo(i1 %x) {
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr %a) nounwind
br i1 %x, label %bb0, label %bb1
bb0:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr %a) nounwind
br label %bb1
bb1:
@@ -24,6 +24,6 @@ bb1:
declare void @f()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll b/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll
index 55f1c01..19e1c73 100644
--- a/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll
+++ b/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll
@@ -22,7 +22,7 @@ define dso_local void @_Z6test01v() addrspace(1) #0 {
; CHECK: do.body:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @C, align 4, !tbaa [[TBAA2:![0-9]+]]
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr [[J]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(ptr [[J]]) #[[ATTR2:[0-9]+]]
; CHECK-NEXT: store i32 0, ptr [[J]], align 4, !tbaa [[TBAA2]]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
@@ -30,11 +30,11 @@ define dso_local void @_Z6test01v() addrspace(1) #0 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 3
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr [[J]]) #[[ATTR2]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(ptr [[J]]) #[[ATTR2]]
; CHECK-NEXT: br label [[DO_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: store i32 undef, ptr [[I]], align 4
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]]
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]]
; CHECK-NEXT: br label [[FOR_COND1:%.*]]
; CHECK: for.cond1:
@@ -43,7 +43,7 @@ define dso_local void @_Z6test01v() addrspace(1) #0 {
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP2]], [[TMP3]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4:%.*]], label [[FOR_COND_CLEANUP3:%.*]]
; CHECK: for.cond.cleanup3:
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[J]], align 4, !tbaa [[TBAA2]]
; CHECK-NEXT: [[INC7:%.*]] = add nsw i32 [[TMP4]], 1
; CHECK-NEXT: store i32 [[INC7]], ptr [[J]], align 4, !tbaa [[TBAA2]]
@@ -64,7 +64,7 @@ entry:
do.body: ; preds = %do.cond, %entry
%0 = load i32, ptr @C, align 4, !tbaa !2
%inc = add nsw i32 %0, 1
- call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr %j) #2
+ call addrspace(1) void @llvm.lifetime.start.p0(ptr %j) #2
store i32 0, ptr %j, align 4, !tbaa !2
br label %for.cond
@@ -74,12 +74,12 @@ for.cond: ; preds = %for.inc6, %do.body
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr %j) #2
+ call addrspace(1) void @llvm.lifetime.end.p0(ptr %j) #2
br label %for.end8
for.body: ; preds = %for.cond
store i32 undef, ptr %i, align 4
- call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call addrspace(1) void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !2
br label %for.cond1
@@ -90,7 +90,7 @@ for.cond1: ; preds = %for.inc, %for.body
br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
for.cond.cleanup3: ; preds = %for.cond1
- call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call addrspace(1) void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body4: ; preds = %for.cond1
@@ -124,10 +124,10 @@ do.end: ; preds = %do.cond
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) addrspace(1) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) addrspace(1) #1
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) addrspace(1) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) addrspace(1) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll b/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll
index 87d6493..0014b91 100644
--- a/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll
+++ b/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll
@@ -8,16 +8,16 @@ define void @pr104567(i8 %x, ptr %f) {
; CHECK-SAME: i8 [[X:%.*]], ptr [[F:%.*]]) {
; CHECK-NEXT: [[START:.*:]]
; CHECK-NEXT: [[Y:%.*]] = alloca [1 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[Y]])
; CHECK-NEXT: [[SWITCH_OFFSET:%.*]] = add nsw i8 [[X]], 4
; CHECK-NEXT: store i8 [[SWITCH_OFFSET]], ptr [[Y]], align 1
; CHECK-NEXT: call void [[F]](ptr [[Y]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[Y]])
; CHECK-NEXT: ret void
;
start:
%y = alloca [1 x i8], align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %y)
+ call void @llvm.lifetime.start.p0(ptr nonnull %y)
switch i8 %x, label %default.unreachable [
i8 0, label %bb4
i8 1, label %bb3
@@ -41,7 +41,7 @@ bb2:
bb5:
call void %f(ptr %y)
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %y)
+ call void @llvm.lifetime.end.p0(ptr nonnull %y)
ret void
}
diff --git a/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll b/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll
index 10e4870..77ce730 100644
--- a/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll
+++ b/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll
@@ -315,8 +315,8 @@ cont3:
; from sharing stack slots for x and y.
declare void @escape_i32_ptr(ptr)
-declare void @llvm.lifetime.start(i64, ptr nocapture)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-LABEL: @dont_merge_lifetimes(
@@ -328,7 +328,7 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-NEXT: i32 42, label [[IF_THEN3:%.*]]
; CHECK-NEXT: ]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: store i32 0, ptr [[X]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[C2:%.*]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN1:%.*]]
@@ -336,11 +336,11 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-NEXT: call void @escape_i32_ptr(ptr nonnull [[X]])
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @abort()
; CHECK-NEXT: unreachable
; CHECK: if.then3:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[Y]])
; CHECK-NEXT: store i32 0, ptr [[Y]], align 4
; CHECK-NEXT: [[TOBOOL5:%.*]] = icmp eq i32 [[C2]], 0
; CHECK-NEXT: br i1 [[TOBOOL5]], label [[IF_END7:%.*]], label [[IF_THEN6:%.*]]
@@ -348,7 +348,7 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-NEXT: call void @escape_i32_ptr(ptr nonnull [[Y]])
; CHECK-NEXT: br label [[IF_END7]]
; CHECK: if.end7:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[Y]])
; CHECK-NEXT: call void @abort()
; CHECK-NEXT: unreachable
; CHECK: if.end9:
@@ -363,7 +363,7 @@ entry:
]
if.then: ; preds = %entry
- call void @llvm.lifetime.start(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start(ptr nonnull %x)
store i32 0, ptr %x, align 4
%tobool = icmp eq i32 %c2, 0
br i1 %tobool, label %if.end, label %if.then1
@@ -373,12 +373,12 @@ if.then1: ; preds = %if.then
br label %if.end
if.end: ; preds = %if.then1, %if.then
- call void @llvm.lifetime.end(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end(ptr nonnull %x)
call void @abort()
unreachable
if.then3: ; preds = %entry
- call void @llvm.lifetime.start(i64 4, ptr nonnull %y)
+ call void @llvm.lifetime.start(ptr nonnull %y)
store i32 0, ptr %y, align 4
%tobool5 = icmp eq i32 %c2, 0
br i1 %tobool5, label %if.end7, label %if.then6
@@ -388,7 +388,7 @@ if.then6: ; preds = %if.then3
br label %if.end7
if.end7: ; preds = %if.then6, %if.then3
- call void @llvm.lifetime.end(i64 4, ptr nonnull %y)
+ call void @llvm.lifetime.end(ptr nonnull %y)
call void @abort()
unreachable
diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll
index 325db79..fa771ad 100644
--- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll
@@ -44,17 +44,17 @@ define dso_local void @_Z7dostuff1AS_i(ptr nocapture byval(%struct.A) align 8 %a
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCT_A]], ptr [[B]], i64 0, i32 0, i64 5
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8
; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, i64 [[INC]], i64 [[TMP1]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 80, ptr nonnull [[AGG_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) [[AGG_TMP]], ptr nonnull align 8 dereferenceable(80) [[B]], i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 80, ptr nonnull [[AGG_TMP5]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP5]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) [[AGG_TMP5]], ptr nonnull align 8 dereferenceable(80) [[A]], i64 80, i1 false)
; CHECK-NEXT: [[ADD]] = add nsw i32 [[I_TR]], 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP1]], ptr align 8 [[AGG_TMP]], i64 80, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP52]], ptr align 8 [[AGG_TMP5]], i64 80, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[A]], ptr align 8 [[AGG_TMP1]], i64 80, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[B]], ptr align 8 [[AGG_TMP52]], i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 80, ptr nonnull [[AGG_TMP]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 80, ptr nonnull [[AGG_TMP5]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP5]])
; CHECK-NEXT: br label [[TAILRECURSE]]
; CHECK: return:
; CHECK-NEXT: ret void
@@ -74,14 +74,14 @@ if.end: ; preds = %entry
%1 = load i64, ptr %arrayidx4, align 8
%call = call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str
, i64 %inc, i64 %1)
- call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %agg.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) %agg.tmp, ptr nonnull align 8 dereferenceable(80) %b, i64 80, i1 false)
- call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %agg.tmp5)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp5)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) %agg.tmp5, ptr nonnull align 8 dereferenceable(80) %a, i64 80, i1 false)
%add = add nsw i32 %i, 1
call void @_Z7dostuff1AS_i(ptr nonnull byval(%struct.A) align 8 %agg.tmp, ptr nonnull byval(%struct.A) align 8 %agg.tmp5, i32 %add)
- call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %agg.tmp)
- call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %agg.tmp5)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp5)
br label %return
return: ; preds = %entry, %if.end
@@ -95,10 +95,10 @@ declare dso_local noundef i32 @printf(ptr nocapture noundef readonly, ...) local
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: noinline norecurse nounwind optnone uwtable
define dso_local i32 @main() local_unnamed_addr #3 {
diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll
index 256fb04..dedd081 100644
--- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll
@@ -44,14 +44,14 @@ define dso_local i32 @_Z3fooi1S(i32 %count, ptr nocapture readonly byval(%struct
; CHECK: if.end:
; CHECK-NEXT: [[ADD]] = add nsw i32 [[COUNT_TR]], 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP1]], ptr nonnull align 8 dereferenceable(20) [[P1]], i64 20, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr nonnull [[AGG_TMP14]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr nonnull [[AGG_TMP_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP14]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP14]], ptr nonnull align 8 dereferenceable(20) [[AGG_TMP1]], i64 20, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP_I]], ptr nonnull align 8 dereferenceable(20) [[AGG_TMP14]], i64 20, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP_I1]], ptr align 8 [[AGG_TMP_I]], i64 20, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[P1]], ptr align 8 [[AGG_TMP_I1]], i64 20, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr nonnull [[AGG_TMP14]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr nonnull [[AGG_TMP_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP14]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP_I]])
; CHECK-NEXT: br label [[TAILRECURSE]]
; CHECK: return:
; CHECK-NEXT: ret i32 [[CALL]]
@@ -72,13 +72,13 @@ if.then: ; preds = %entry
if.end: ; preds = %entry
%add = add nsw i32 %count, 1
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp1, ptr nonnull align 8 dereferenceable(20) %p1, i64 20, i1 false)
- call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %agg.tmp14)
- call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %agg.tmp.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp14)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp.i)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp14, ptr nonnull align 8 dereferenceable(20) %agg.tmp1, i64 20, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp.i, ptr nonnull align 8 dereferenceable(20) %agg.tmp14, i64 20, i1 false)
%call.i = call i32 @_Z3fooi1S(i32 %add, ptr nonnull byval(%struct.S) align 8 %agg.tmp.i)
- call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %agg.tmp14)
- call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %agg.tmp.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp14)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp.i)
br label %return
return: ; preds = %if.end, %if.then
@@ -89,10 +89,10 @@ return: ; preds = %if.end, %if.then
declare dso_local i32 @_Z3zoo1S(ptr byval(%struct.S) align 8) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
diff --git a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll
index 293deca..b77ae9c 100644
--- a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll
@@ -49,11 +49,11 @@ define dso_local void @_Z19test_multiple_exitsi(i32 %param) local_unnamed_addr #
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[PARAM_TR]], 10
; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TEMP]]) #1
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]]) #1
; CHECK-NEXT: call void @_Z11capture_argPi(ptr nonnull [[TEMP]])
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[PARAM_TR]], 1
; CHECK-NEXT: call void @_Z19test_multiple_exitsi(i32 [[ADD]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TEMP]]) #1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]]) #1
; CHECK-NEXT: br label [[IF_END14:%.*]]
; CHECK: if.else:
; CHECK-NEXT: [[PARAM_OFF:%.*]] = add i32 [[PARAM_TR]], -10
@@ -80,11 +80,11 @@ entry:
br i1 %0, label %if.then, label %if.else
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %temp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %temp) #2
call void @_Z11capture_argPi(ptr nonnull %temp)
%add = add nuw nsw i32 %param, 1
call void @_Z19test_multiple_exitsi(i32 %add)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %temp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %temp) #2
br label %if.end14
if.else: ; preds = %entry
@@ -113,10 +113,10 @@ if.end14: ; preds = %if.then5, %if.then1
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { nofree noinline norecurse nounwind uwtable }
attributes #1 = { nounwind uwtable }
diff --git a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll
index c9ac9a5d..2f1aded 100644
--- a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll
@@ -34,11 +34,11 @@ define dso_local void @_Z4testi(i32 %recurseCount) local_unnamed_addr #1 {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[RECURSECOUNT_TR]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TEMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
; CHECK-NEXT: store i32 10, ptr [[TEMP]], align 4
; CHECK-NEXT: call void @_Z15globalIncrementPKi(ptr nonnull [[TEMP]])
; CHECK-NEXT: [[SUB]] = add nsw i32 [[RECURSECOUNT_TR]], -1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TEMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
; CHECK-NEXT: br label [[TAILRECURSE]]
; CHECK: return:
; CHECK-NEXT: ret void
@@ -49,12 +49,12 @@ entry:
br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %temp) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %temp) #6
store i32 10, ptr %temp, align 4
call void @_Z15globalIncrementPKi(ptr nonnull %temp)
%sub = add nsw i32 %recurseCount, -1
call void @_Z4testi(i32 %sub)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %temp) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %temp) #6
br label %return
return: ; preds = %entry, %if.end
@@ -62,10 +62,10 @@ return: ; preds = %entry, %if.end
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { nofree noinline norecurse nounwind uwtable }
attributes #1 = { nounwind uwtable }
diff --git a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll
index 14bfbb1..36eaf6e 100644
--- a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll
+++ b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll
@@ -61,8 +61,8 @@ define i32 @main() {
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Util/dbg-call-bitcast.ll b/llvm/test/Transforms/Util/dbg-call-bitcast.ll
index d8d80ab..f0c579c 100644
--- a/llvm/test/Transforms/Util/dbg-call-bitcast.ll
+++ b/llvm/test/Transforms/Util/dbg-call-bitcast.ll
@@ -2,7 +2,7 @@
define dso_local void @_Z1fv() {
%1 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @llvm.dbg.declare(metadata ptr %1, metadata !16, metadata !DIExpression()), !dbg !19
; CHECK: %[[A:.*]] = alloca i32, align 4
; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref)
@@ -11,13 +11,13 @@ define dso_local void @_Z1fv() {
; CHECK-NOT: #dbg_value
; CHECK: call void @_Z1gPv
call void @_Z1gPv(ptr nonnull %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret void, !dbg !21
}
define dso_local void @_Z2fv() {
%1 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @llvm.dbg.declare(metadata ptr %1, metadata !16, metadata !DIExpression()), !dbg !19
; CHECK: %[[A:.*]] = alloca i32, align 4
; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref)
@@ -29,14 +29,14 @@ block2:
; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref)
; CHECK: call void @_Z1gPv
call void @_Z1gPv(ptr nonnull %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret void, !dbg !21
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.dbg.declare(metadata, metadata, metadata)
declare dso_local void @_Z1gPv(ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9, !10}
diff --git a/llvm/test/Verifier/intrinsic-immarg.ll b/llvm/test/Verifier/intrinsic-immarg.ll
index c1bb932..d5aef3d 100644
--- a/llvm/test/Verifier/intrinsic-immarg.ll
+++ b/llvm/test/Verifier/intrinsic-immarg.ll
@@ -163,26 +163,6 @@ define void @test_scatter_8i32(<8 x i32> %a1, <8 x ptr> %ptr, <8 x i1> %mask, i3
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-define void @test_lifetime_start(i64 %arg0) {
- ; CHECK: immarg operand has non-immediate parameter
- ; CHECK-NEXT: i64 %arg0
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 %arg0, ptr %ptr)
- %ptr = alloca i64
- call void @llvm.lifetime.start.p0(i64 %arg0, ptr %ptr)
- ret void
-}
-
-declare void @llvm.lifetime.end.p0(i64, ptr)
-define void @test_lifetime_end(i64 %arg0) {
- ; CHECK: immarg operand has non-immediate parameter
- ; CHECK-NEXT: i64 %arg0
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 %arg0, ptr %ptr)
- %ptr = alloca i64
- call void @llvm.lifetime.end.p0(i64 %arg0, ptr %ptr)
- ret void
-}
-
declare ptr @llvm.invariant.start.p0(i64, ptr)
define void @test_invariant_start(i64 %arg0, ptr %ptr) {
; CHECK: immarg operand has non-immediate parameter
diff --git a/llvm/test/Verifier/opaque-ptr.ll b/llvm/test/Verifier/opaque-ptr.ll
index 10e43a4..3ac9044 100644
--- a/llvm/test/Verifier/opaque-ptr.ll
+++ b/llvm/test/Verifier/opaque-ptr.ll
@@ -40,13 +40,13 @@ define void @atomicrmw(ptr %a, i32 %i) {
define void @opaque_mangle() {
; CHECK-LABEL: @opaque_mangle(
; CHECK-NEXT: [[A:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %a)
- call void @llvm.lifetime.end.p0(i64 8, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -65,10 +65,8 @@ define void @intrinsic_calls(ptr %a) {
ret void
}
-; CHECK: @llvm.lifetime.start.p0
-; CHECK: @llvm.lifetime.end.p0
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)
diff --git a/llvm/test/lit.site.cfg.py.in b/llvm/test/lit.site.cfg.py.in
index 893e2cb..973e0ec9 100644
--- a/llvm/test/lit.site.cfg.py.in
+++ b/llvm/test/lit.site.cfg.py.in
@@ -66,7 +66,6 @@ config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@
config.have_vc_rev = @LLVM_APPEND_VC_REV@
config.force_vc_rev = "@LLVM_FORCE_VC_REVISION@"
config.has_logf128 = @LLVM_HAS_LOGF128@
-config.has_key_instructions = @LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS@
import lit.llvm
lit.llvm.initialize(lit_config, config)
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll
index 96ff2d7..ef60118 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll
@@ -14,7 +14,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -27,7 +27,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -49,10 +49,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -61,7 +61,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -74,7 +74,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected
index 6504830..4bae52e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected
@@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -50,7 +50,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -131,7 +131,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected
index 7c1ea5e..12c6e4e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected
@@ -17,7 +17,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -28,7 +28,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -51,7 +51,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -64,7 +64,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -86,10 +86,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -100,7 +100,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -111,7 +111,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -133,7 +133,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -146,7 +146,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected
index 94af952..d67a303 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected
@@ -17,7 +17,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -28,7 +28,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -51,7 +51,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -64,7 +64,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -86,10 +86,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -100,7 +100,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -111,7 +111,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -133,7 +133,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -146,7 +146,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected
index 6504830..4bae52e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected
@@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -50,7 +50,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -131,7 +131,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected
index a656c4ae..fb3a76f 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected
@@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -50,7 +50,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -131,7 +131,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/dxil-dis/lifetimes.ll b/llvm/test/tools/dxil-dis/lifetimes.ll
index cb3e629..3c1666f 100644
--- a/llvm/test/tools/dxil-dis/lifetimes.ll
+++ b/llvm/test/tools/dxil-dis/lifetimes.ll
@@ -6,17 +6,17 @@ define void @test_lifetimes() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [2 x i32], align 4
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i32], [2 x i32]* [[ALLOCA]], i32 0, i32 0
; CHECK-NEXT: [[BITCAST:%.*]] = bitcast [2 x i32]* [[ALLOCA]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start(i64 4, i8* nonnull [[BITCAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start(i64 8, i8* nonnull [[BITCAST]])
; CHECK-NEXT: store i32 0, i32* [[GEP]], align 4
; CHECK-NEXT: [[BITCAST:%.*]] = bitcast [2 x i32]* [[ALLOCA]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.end(i64 4, i8* nonnull [[BITCAST]])
+; CHECK-NEXT: call void @llvm.lifetime.end(i64 8, i8* nonnull [[BITCAST]])
; CHECK-NEXT: ret void
;
%a = alloca [2 x i32], align 4
%gep = getelementptr [2 x i32], ptr %a, i32 0, i32 0
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
store i32 0, ptr %gep, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
@@ -29,10 +29,10 @@ define void @test_lifetimes() {
; CHECK-DAG: declare void @llvm.lifetime.end(i64, i8* nocapture) [[LIFETIME_ATTRS]]
; Function Attrs: nounwind memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64, ptr) #0
+declare void @llvm.lifetime.end.p0(ptr) #0
; Function Attrs: nounwind memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64, ptr) #0
+declare void @llvm.lifetime.start.p0(ptr) #0
attributes #0 = { nounwind memory(argmem: readwrite) }
diff --git a/llvm/test/tools/llvm-ir2vec/entities.ll b/llvm/test/tools/llvm-ir2vec/entities.ll
index 737044c..4ed6400 100644
--- a/llvm/test/tools/llvm-ir2vec/entities.ll
+++ b/llvm/test/tools/llvm-ir2vec/entities.ll
@@ -1,6 +1,6 @@
; RUN: llvm-ir2vec entities | FileCheck %s
-CHECK: 92
+CHECK: 93
CHECK-NEXT: Ret 0
CHECK-NEXT: Br 1
CHECK-NEXT: Switch 2
@@ -48,48 +48,49 @@ CHECK-NEXT: SIToFP 43
CHECK-NEXT: FPTrunc 44
CHECK-NEXT: FPExt 45
CHECK-NEXT: PtrToInt 46
-CHECK-NEXT: IntToPtr 47
-CHECK-NEXT: BitCast 48
-CHECK-NEXT: AddrSpaceCast 49
-CHECK-NEXT: CleanupPad 50
-CHECK-NEXT: CatchPad 51
-CHECK-NEXT: ICmp 52
-CHECK-NEXT: FCmp 53
-CHECK-NEXT: PHI 54
-CHECK-NEXT: Call 55
-CHECK-NEXT: Select 56
-CHECK-NEXT: UserOp1 57
-CHECK-NEXT: UserOp2 58
-CHECK-NEXT: VAArg 59
-CHECK-NEXT: ExtractElement 60
-CHECK-NEXT: InsertElement 61
-CHECK-NEXT: ShuffleVector 62
-CHECK-NEXT: ExtractValue 63
-CHECK-NEXT: InsertValue 64
-CHECK-NEXT: LandingPad 65
-CHECK-NEXT: Freeze 66
-CHECK-NEXT: FloatTy 67
+CHECK-NEXT: PtrToAddr 47
+CHECK-NEXT: IntToPtr 48
+CHECK-NEXT: BitCast 49
+CHECK-NEXT: AddrSpaceCast 50
+CHECK-NEXT: CleanupPad 51
+CHECK-NEXT: CatchPad 52
+CHECK-NEXT: ICmp 53
+CHECK-NEXT: FCmp 54
+CHECK-NEXT: PHI 55
+CHECK-NEXT: Call 56
+CHECK-NEXT: Select 57
+CHECK-NEXT: UserOp1 58
+CHECK-NEXT: UserOp2 59
+CHECK-NEXT: VAArg 60
+CHECK-NEXT: ExtractElement 61
+CHECK-NEXT: InsertElement 62
+CHECK-NEXT: ShuffleVector 63
+CHECK-NEXT: ExtractValue 64
+CHECK-NEXT: InsertValue 65
+CHECK-NEXT: LandingPad 66
+CHECK-NEXT: Freeze 67
CHECK-NEXT: FloatTy 68
CHECK-NEXT: FloatTy 69
CHECK-NEXT: FloatTy 70
CHECK-NEXT: FloatTy 71
CHECK-NEXT: FloatTy 72
CHECK-NEXT: FloatTy 73
-CHECK-NEXT: VoidTy 74
-CHECK-NEXT: LabelTy 75
-CHECK-NEXT: MetadataTy 76
-CHECK-NEXT: UnknownTy 77
-CHECK-NEXT: TokenTy 78
-CHECK-NEXT: IntegerTy 79
-CHECK-NEXT: FunctionTy 80
-CHECK-NEXT: PointerTy 81
-CHECK-NEXT: StructTy 82
-CHECK-NEXT: ArrayTy 83
-CHECK-NEXT: VectorTy 84
+CHECK-NEXT: FloatTy 74
+CHECK-NEXT: VoidTy 75
+CHECK-NEXT: LabelTy 76
+CHECK-NEXT: MetadataTy 77
+CHECK-NEXT: UnknownTy 78
+CHECK-NEXT: TokenTy 79
+CHECK-NEXT: IntegerTy 80
+CHECK-NEXT: FunctionTy 81
+CHECK-NEXT: PointerTy 82
+CHECK-NEXT: StructTy 83
+CHECK-NEXT: ArrayTy 84
CHECK-NEXT: VectorTy 85
-CHECK-NEXT: PointerTy 86
-CHECK-NEXT: UnknownTy 87
-CHECK-NEXT: Function 88
-CHECK-NEXT: Pointer 89
-CHECK-NEXT: Constant 90
-CHECK-NEXT: Variable 91
+CHECK-NEXT: VectorTy 86
+CHECK-NEXT: PointerTy 87
+CHECK-NEXT: UnknownTy 88
+CHECK-NEXT: Function 89
+CHECK-NEXT: Pointer 90
+CHECK-NEXT: Constant 91
+CHECK-NEXT: Variable 92
diff --git a/llvm/test/tools/llvm-ir2vec/triplets.ll b/llvm/test/tools/llvm-ir2vec/triplets.ll
index a7fd9e4..6f64bab 100644
--- a/llvm/test/tools/llvm-ir2vec/triplets.ll
+++ b/llvm/test/tools/llvm-ir2vec/triplets.ll
@@ -25,41 +25,41 @@ entry:
}
; TRIPLETS: MAX_RELATION=3
-; TRIPLETS-NEXT: 12 79 0
-; TRIPLETS-NEXT: 12 91 2
-; TRIPLETS-NEXT: 12 91 3
+; TRIPLETS-NEXT: 12 80 0
+; TRIPLETS-NEXT: 12 92 2
+; TRIPLETS-NEXT: 12 92 3
; TRIPLETS-NEXT: 12 0 1
-; TRIPLETS-NEXT: 0 74 0
-; TRIPLETS-NEXT: 0 91 2
-; TRIPLETS-NEXT: 16 79 0
-; TRIPLETS-NEXT: 16 91 2
-; TRIPLETS-NEXT: 16 91 3
+; TRIPLETS-NEXT: 0 75 0
+; TRIPLETS-NEXT: 0 92 2
+; TRIPLETS-NEXT: 16 80 0
+; TRIPLETS-NEXT: 16 92 2
+; TRIPLETS-NEXT: 16 92 3
; TRIPLETS-NEXT: 16 0 1
-; TRIPLETS-NEXT: 0 74 0
-; TRIPLETS-NEXT: 0 91 2
-; TRIPLETS-NEXT: 30 81 0
-; TRIPLETS-NEXT: 30 90 2
+; TRIPLETS-NEXT: 0 75 0
+; TRIPLETS-NEXT: 0 92 2
+; TRIPLETS-NEXT: 30 82 0
+; TRIPLETS-NEXT: 30 91 2
; TRIPLETS-NEXT: 30 30 1
-; TRIPLETS-NEXT: 30 81 0
-; TRIPLETS-NEXT: 30 90 2
+; TRIPLETS-NEXT: 30 82 0
+; TRIPLETS-NEXT: 30 91 2
; TRIPLETS-NEXT: 30 32 1
-; TRIPLETS-NEXT: 32 74 0
-; TRIPLETS-NEXT: 32 91 2
-; TRIPLETS-NEXT: 32 89 3
+; TRIPLETS-NEXT: 32 75 0
+; TRIPLETS-NEXT: 32 92 2
+; TRIPLETS-NEXT: 32 90 3
; TRIPLETS-NEXT: 32 32 1
-; TRIPLETS-NEXT: 32 74 0
-; TRIPLETS-NEXT: 32 91 2
-; TRIPLETS-NEXT: 32 89 3
+; TRIPLETS-NEXT: 32 75 0
+; TRIPLETS-NEXT: 32 92 2
+; TRIPLETS-NEXT: 32 90 3
; TRIPLETS-NEXT: 32 31 1
-; TRIPLETS-NEXT: 31 79 0
-; TRIPLETS-NEXT: 31 89 2
+; TRIPLETS-NEXT: 31 80 0
+; TRIPLETS-NEXT: 31 90 2
; TRIPLETS-NEXT: 31 31 1
-; TRIPLETS-NEXT: 31 79 0
-; TRIPLETS-NEXT: 31 89 2
+; TRIPLETS-NEXT: 31 80 0
+; TRIPLETS-NEXT: 31 90 2
; TRIPLETS-NEXT: 31 12 1
-; TRIPLETS-NEXT: 12 79 0
-; TRIPLETS-NEXT: 12 91 2
-; TRIPLETS-NEXT: 12 91 3
+; TRIPLETS-NEXT: 12 80 0
+; TRIPLETS-NEXT: 12 92 2
+; TRIPLETS-NEXT: 12 92 3
; TRIPLETS-NEXT: 12 0 1
-; TRIPLETS-NEXT: 0 74 0
-; TRIPLETS-NEXT: 0 91 2
+; TRIPLETS-NEXT: 0 75 0
+; TRIPLETS-NEXT: 0 92 2
diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s
index 5cf5ed5..234a3e2 100644
--- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s
+++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-arithmetic.s
@@ -3002,357 +3002,357 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
# CHECK-NEXT: 1 8 4.00 8 SMX60_VIEU[4] VWSUB_VX vwsub.vx v8, v16, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VV vaaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VV vaaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADDU_VX vaaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADDU_VX vaaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VV vaadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VV vaadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VAADD_VX vaadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VAADD_VX vaadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VV vasubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VV vasubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUBU_VX vasubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUBU_VX vasubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VV vasub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VV vasub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VASUB_VX vasub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VASUB_VX vasub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMADC_VI vmadc.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
@@ -3882,445 +3882,445 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
# CHECK-NEXT: 1 16 4.00 16 SMX60_VIEU[4] VRSUB_VX vrsub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VI vsaddu.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VI vsaddu.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VV vsaddu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VV vsaddu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADDU_VX vsaddu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADDU_VX vsaddu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VI vsadd.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VI vsadd.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VV vsadd.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VV vsadd.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSADD_VX vsadd.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSADD_VX vsadd.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VV vssubu.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VV vssubu.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUBU_VX vssubu.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUBU_VX vssubu.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VV vssub.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VV vssub.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 5 4.00 5 SMX60_VIEU[4] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSUB_VX vssub.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSUB_VX vssub.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VWADDU_WV vwaddu.wv v8, v16, v24
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
@@ -4574,7 +4574,7 @@ vwsub.wx v8, v16, x30
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6]
-# CHECK-NEXT: - 1120.00 - - - - 3292.00 -
+# CHECK-NEXT: - 1120.00 - - - - 4084.00 -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions:
@@ -5267,11 +5267,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5279,29 +5279,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5311,11 +5311,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5323,29 +5323,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5355,11 +5355,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5367,29 +5367,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5399,11 +5399,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5411,29 +5411,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vaadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vaadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5443,11 +5443,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5455,29 +5455,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5487,11 +5487,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5499,29 +5499,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5531,11 +5531,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5543,29 +5543,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -5575,11 +5575,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -5587,29 +5587,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vasub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vasub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 4.00 - vmadc.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6147,11 +6147,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6159,29 +6159,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6191,11 +6191,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6203,29 +6203,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6235,11 +6235,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6247,29 +6247,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsaddu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsaddu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6279,11 +6279,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6291,29 +6291,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6323,11 +6323,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6335,29 +6335,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6367,11 +6367,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6379,29 +6379,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsadd.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsadd.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6411,11 +6411,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6423,29 +6423,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6455,11 +6455,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6467,29 +6467,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssubu.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssubu.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6499,11 +6499,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6511,29 +6511,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -6543,11 +6543,11 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
@@ -6555,29 +6555,29 @@ vwsub.wx v8, v16, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssub.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssub.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 4.00 - vwaddu.wv v8, v16, v24
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s
index 89d3872..5a5f366 100644
--- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s
+++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-bitwise.s
@@ -2630,269 +2630,269 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
# CHECK-NEXT: 1 16 4.00 16 SMX60_VIEU[4] VSRL_VX vsrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VI vssra.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VI vssra.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VV vssra.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VV vssra.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRA_VX vssra.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRA_VX vssra.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VI vssrl.vi v8, v8, 12
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VI vssrl.vi v8, v8, 12
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VV vssrl.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VV vssrl.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSSRL_VX vssrl.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSSRL_VX vssrl.vx v8, v8, t5
# CHECK: Resources:
# CHECK-NEXT: [0] - SMX60_FP
@@ -2906,7 +2906,7 @@ vssrl.vx v8, v8, x30
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6]
-# CHECK-NEXT: - 708.00 - - - - 2436.00 -
+# CHECK-NEXT: - 708.00 - - - - 3060.00 -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions:
@@ -4069,43 +4069,43 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -4113,43 +4113,43 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -4157,43 +4157,43 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssra.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssra.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -4201,43 +4201,43 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vi v8, v8, 12
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vi v8, v8, 12
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -4245,43 +4245,43 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -4289,40 +4289,40 @@ vssrl.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 2.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vssrl.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vssrl.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vssrl.vx v8, v8, t5
diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s
index 572ebf2..a166f15 100644
--- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s
+++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-mul-div.s
@@ -1906,93 +1906,93 @@ vsmul.vx v8, v8, x30
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
# CHECK-NEXT: 1 8 4.00 8 SMX60_VIEU[4] VWMULSU_VX vwmulsu.vx v8, v16, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VV vsmul.vv v8, v8, v8
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VV vsmul.vv v8, v8, v8
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 1.00 7 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 7 4.00 7 SMX60_VIEU[4] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSMUL_VX vsmul.vx v8, v8, t5
+# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VSMUL_VX vsmul.vx v8, v8, t5
# CHECK: Resources:
# CHECK-NEXT: [0] - SMX60_FP
@@ -2006,7 +2006,7 @@ vsmul.vx v8, v8, x30
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6]
-# CHECK-NEXT: - 486.00 - - - - 3748.00 -
+# CHECK-NEXT: - 486.00 - - - - 4196.00 -
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions:
@@ -2901,43 +2901,43 @@ vsmul.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vv v8, v8, v8
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vv v8, v8, v8
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu
@@ -2945,40 +2945,40 @@ vsmul.vx v8, v8, x30
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu
# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 4.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 8.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 16.00 - vsmul.vx v8, v8, t5
# CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu
-# CHECK-NEXT: - - - - - - 1.00 - vsmul.vx v8, v8, t5
+# CHECK-NEXT: - - - - - - 32.00 - vsmul.vx v8, v8, t5
diff --git a/llvm/test/tools/llvm-profdata/merge-traces.proftext b/llvm/test/tools/llvm-profdata/merge-traces.proftext
index bcf29ba..3512f33 100644
--- a/llvm/test/tools/llvm-profdata/merge-traces.proftext
+++ b/llvm/test/tools/llvm-profdata/merge-traces.proftext
@@ -1,24 +1,36 @@
-# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s -o %t.profdata
-# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE1,SEEN1
-# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t.profdata -o %t.profdata
-# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE2,SEEN2
-# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t.profdata -o %t.profdata
-# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE2,SEEN3
-# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t.profdata -o %t.profdata
-# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=SAMPLE2,SEEN4
-
-# SEEN1: Temporal Profile Traces (samples=1 seen=1):
-# SEEN2: Temporal Profile Traces (samples=2 seen=2):
-# SEEN3: Temporal Profile Traces (samples=2 seen=3):
-# SEEN4: Temporal Profile Traces (samples=2 seen=4):
-# SAMPLE1: Temporal Profile Trace 0 (weight=1 count=3):
-# SAMPLE1: a
-# SAMPLE1: b
-# SAMPLE1: c
-# SAMPLE2: Temporal Profile Trace 1 (weight=1 count=3):
-# SAMPLE2: a
-# SAMPLE2: b
-# SAMPLE2: c
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s --text | FileCheck %s --check-prefixes=CHECK,SEEN1,SAMPLE1
+
+# Merge %s twice so it has two traces
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN2,SAMPLE2
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s -o %t-2.profdata
+
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN3,SAMPLE2
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %t-2.profdata %s --text | FileCheck %s --check-prefixes=CHECK,SEEN3,SAMPLE2
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %t-2.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN3,SAMPLE2
+
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE2
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %t-2.profdata %s %s --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE2
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %t-2.profdata %t-2.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE2
+
+# Test that we can increase the reservoir size, even if inputs are sampled
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=2 %s %s %s %s -o %t-4.profdata
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=4 %t-4.profdata %t-4.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN8,SAMPLE4
+
+# Test that decreasing the reservoir size truncates traces
+# RUN: llvm-profdata merge --temporal-profile-trace-reservoir-size=1 %t-4.profdata --text | FileCheck %s --check-prefixes=CHECK,SEEN4,SAMPLE1
+
+# CHECK: :temporal_prof_traces
+# CHECK: # Num Temporal Profile Traces:
+# SAMPLE1: 1
+# SAMPLE2: 2
+# SAMPLE4: 4
+# CHECK: # Temporal Profile Trace Stream Size:
+# SEEN1: 1
+# SEEN2: 2
+# SEEN3: 3
+# SEEN4: 4
+# SEEN8: 8
+# CHECK: a,b,c,
# Header
:ir
diff --git a/llvm/test/tools/llvm-profdata/read-traces.proftext b/llvm/test/tools/llvm-profdata/read-traces.proftext
index 87f69fe..5e822a9 100644
--- a/llvm/test/tools/llvm-profdata/read-traces.proftext
+++ b/llvm/test/tools/llvm-profdata/read-traces.proftext
@@ -3,19 +3,16 @@
# RUN: llvm-profdata merge -text %t.2.profdata -o %t.3.proftext
# RUN: diff %t.1.proftext %t.3.proftext
-# RUN: llvm-profdata show --temporal-profile-traces %t.1.proftext | FileCheck %s
+# RUN: llvm-profdata merge -text %s | FileCheck %s
-# CHECK: Temporal Profile Traces (samples=3 seen=3):
-# CHECK: Temporal Profile Trace 0 (weight=1 count=3):
-# CHECK: foo
-# CHECK: bar
-# CHECK: goo
-# CHECK: Temporal Profile Trace 1 (weight=3 count=3):
-# CHECK: foo
-# CHECK: goo
-# CHECK: bar
-# CHECK: Temporal Profile Trace 2 (weight=1 count=1):
-# CHECK: goo
+# CHECK: :temporal_prof_traces
+# CHECK: # Num Temporal Profile Traces:
+# CHECK-NEXT: 3
+# CHECK: # Temporal Profile Trace Stream Size:
+# CHECK-NEXT: 3
+# CHECK-DAG: foo,bar,goo,
+# CHECK-DAG: foo,goo,bar,
+# CHECK-DAG: goo,
# Header
:ir
diff --git a/llvm/test/tools/llvm-profdata/trace-limit.proftext b/llvm/test/tools/llvm-profdata/trace-limit.proftext
index e246ee8..6b4f974 100644
--- a/llvm/test/tools/llvm-profdata/trace-limit.proftext
+++ b/llvm/test/tools/llvm-profdata/trace-limit.proftext
@@ -11,7 +11,7 @@
# RUN: llvm-profdata merge --temporal-profile-max-trace-length=1000 %s -o %t.profdata
# RUN: llvm-profdata show --temporal-profile-traces %t.profdata | FileCheck %s --check-prefixes=CHECK,ALL
-# NONE: Temporal Profile Traces (samples=0
+# NONE: Temporal Profile Traces (samples=0 seen=0):
# CHECK: Temporal Profile Traces (samples=1 seen=1):
# SOME: Trace 0 (weight=1 count=2):
# ALL: Trace 0 (weight=1 count=3):
diff --git a/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll b/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll
index d9ed9df..5db1989 100644
--- a/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll
+++ b/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll
@@ -4,15 +4,15 @@
; INTERESTING: store
; REDUCED: define void @test(ptr %a) {
; REDUCED-NEXT: %a1 = alloca i32
-; REDUCED-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %a1)
+; REDUCED-NEXT: call void @llvm.lifetime.start.p0(ptr %a1)
; REDUCED-NEXT: store i32 0, ptr %a
; REDUCED-NEXT: store i32 1, ptr %a
-; REDUCED-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %a1)
+; REDUCED-NEXT: call void @llvm.lifetime.end.p0(ptr %a1)
define void @test() {
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 0, ptr %a
store i32 1, ptr %a
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll b/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll
index b68f718..75b152f 100644
--- a/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll
+++ b/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll
@@ -69,13 +69,13 @@ define void @alloca_constexpr_elt() {
}
; CHECK-LABEL: @alloca_lifetimes(
-; ZERO: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
-; ONE: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
-; POISON: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+; ZERO: call void @llvm.lifetime.start.p0(ptr %alloca)
+; ONE: call void @llvm.lifetime.start.p0(ptr %alloca)
+; POISON: call void @llvm.lifetime.start.p0(ptr %alloca)
define void @alloca_lifetimes() {
%alloca = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
store i32 0, ptr %alloca
- call void @llvm.lifetime.end.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret void
}
diff --git a/llvm/tools/llvm-objdump/MachODump.cpp b/llvm/tools/llvm-objdump/MachODump.cpp
index d6e29a3..6dccf21 100644
--- a/llvm/tools/llvm-objdump/MachODump.cpp
+++ b/llvm/tools/llvm-objdump/MachODump.cpp
@@ -1749,7 +1749,7 @@ static void DumpLiteralPointerSection(MachOObjectFile *O,
StringRef BytesStr = unwrapOrError(Sect->getContents(), O->getFileName());
- const char *Contents = reinterpret_cast<const char *>(BytesStr.data());
+ const char *Contents = BytesStr.data();
switch (section_type) {
case MachO::S_CSTRING_LITERALS:
@@ -1965,7 +1965,7 @@ static void DumpSectionContents(StringRef Filename, MachOObjectFile *O,
StringRef BytesStr =
unwrapOrError(Section.getContents(), O->getFileName());
- const char *sect = reinterpret_cast<const char *>(BytesStr.data());
+ const char *sect = BytesStr.data();
uint32_t sect_size = BytesStr.size();
uint64_t sect_addr = Section.getAddress();
@@ -2049,7 +2049,7 @@ static void DumpInfoPlistSectionContents(StringRef Filename,
outs() << "Contents of (" << SegName << "," << SectName << ") section\n";
StringRef BytesStr =
unwrapOrError(Section.getContents(), O->getFileName());
- const char *sect = reinterpret_cast<const char *>(BytesStr.data());
+ const char *sect = BytesStr.data();
outs() << format("%.*s", BytesStr.size(), sect) << "\n";
return;
}
@@ -3237,7 +3237,7 @@ static const char *GuessCstringPointer(uint64_t ReferenceValue,
uint64_t object_offset = Sec.offset + sect_offset;
StringRef MachOContents = info->O->getData();
uint64_t object_size = MachOContents.size();
- const char *object_addr = (const char *)MachOContents.data();
+ const char *object_addr = MachOContents.data();
if (object_offset < object_size) {
const char *name = object_addr + object_offset;
return name;
@@ -3258,7 +3258,7 @@ static const char *GuessCstringPointer(uint64_t ReferenceValue,
uint64_t object_offset = Sec.offset + sect_offset;
StringRef MachOContents = info->O->getData();
uint64_t object_size = MachOContents.size();
- const char *object_addr = (const char *)MachOContents.data();
+ const char *object_addr = MachOContents.data();
if (object_offset < object_size) {
const char *name = object_addr + object_offset;
return name;
@@ -3447,7 +3447,7 @@ static uint64_t GuessPointerPointer(uint64_t ReferenceValue,
uint64_t object_offset = Sec.offset + sect_offset;
StringRef MachOContents = info->O->getData();
uint64_t object_size = MachOContents.size();
- const char *object_addr = (const char *)MachOContents.data();
+ const char *object_addr = MachOContents.data();
if (object_offset < object_size) {
uint64_t pointer_value;
memcpy(&pointer_value, object_addr + object_offset,
@@ -4350,7 +4350,7 @@ walk_pointer_list_64(const char *listname, const SectionRef S,
outs() << "Contents of (" << SegName << "," << SectName << ") section\n";
StringRef BytesStr = unwrapOrError(S.getContents(), O->getFileName());
- const char *Contents = reinterpret_cast<const char *>(BytesStr.data());
+ const char *Contents = BytesStr.data();
for (uint32_t i = 0; i < S.getSize(); i += sizeof(uint64_t)) {
uint32_t left = S.getSize() - i;
@@ -4399,7 +4399,7 @@ walk_pointer_list_32(const char *listname, const SectionRef S,
outs() << "Contents of (" << SegName << "," << SectName << ") section\n";
StringRef BytesStr = unwrapOrError(S.getContents(), O->getFileName());
- const char *Contents = reinterpret_cast<const char *>(BytesStr.data());
+ const char *Contents = BytesStr.data();
for (uint32_t i = 0; i < S.getSize(); i += sizeof(uint32_t)) {
uint32_t left = S.getSize() - i;
diff --git a/llvm/tools/llvm-objdump/llvm-objdump.cpp b/llvm/tools/llvm-objdump/llvm-objdump.cpp
index c19c698..815759d 100644
--- a/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -636,8 +636,14 @@ static bool isCSKYElf(const ObjectFile &Obj) {
return Elf && Elf->getEMachine() == ELF::EM_CSKY;
}
+static bool isRISCVElf(const ObjectFile &Obj) {
+ const auto *Elf = dyn_cast<ELFObjectFileBase>(&Obj);
+ return Elf && Elf->getEMachine() == ELF::EM_RISCV;
+}
+
static bool hasMappingSymbols(const ObjectFile &Obj) {
- return isArmElf(Obj) || isAArch64Elf(Obj) || isCSKYElf(Obj);
+ return isArmElf(Obj) || isAArch64Elf(Obj) || isCSKYElf(Obj) ||
+ isRISCVElf(Obj);
}
static void printRelocation(formatted_raw_ostream &OS, StringRef FileName,
diff --git a/llvm/unittests/ADT/StringRefTest.cpp b/llvm/unittests/ADT/StringRefTest.cpp
index ec9cdc1..d5f8dc4 100644
--- a/llvm/unittests/ADT/StringRefTest.cpp
+++ b/llvm/unittests/ADT/StringRefTest.cpp
@@ -619,6 +619,19 @@ TEST(StringRefTest, Hashing) {
hash_value(StringRef("hello world").slice(1, -1)));
}
+TEST(StringRefTest, getAutoSenseRadix) {
+ struct RadixPair {
+ const char *Str;
+ unsigned Expected;
+ } RadixNumbers[] = {{"123", 10}, {"1", 10}, {"0b1", 2}, {"01", 8}, {"0o1", 8},
+ {"0x1", 16}, {"0", 10}, {"00", 8}, {"", 10}};
+ for (size_t i = 0; i < std::size(RadixNumbers); ++i) {
+ StringRef number = RadixNumbers[i].Str;
+ unsigned radix = getAutoSenseRadix(number);
+ EXPECT_EQ(radix, RadixNumbers[i].Expected);
+ }
+}
+
struct UnsignedPair {
const char *Str;
uint64_t Expected;
diff --git a/llvm/unittests/Analysis/IR2VecTest.cpp b/llvm/unittests/Analysis/IR2VecTest.cpp
index e288585..f7838cc4 100644
--- a/llvm/unittests/Analysis/IR2VecTest.cpp
+++ b/llvm/unittests/Analysis/IR2VecTest.cpp
@@ -320,11 +320,13 @@ TEST_F(IR2VecTestFixture, GetInstVecMap) {
EXPECT_TRUE(InstMap.count(AddInst));
EXPECT_TRUE(InstMap.count(RetInst));
- EXPECT_EQ(InstMap.at(AddInst).size(), 2u);
- EXPECT_EQ(InstMap.at(RetInst).size(), 2u);
+ const auto &AddEmb = InstMap.at(AddInst);
+ const auto &RetEmb = InstMap.at(RetInst);
+ EXPECT_EQ(AddEmb.size(), 2u);
+ EXPECT_EQ(RetEmb.size(), 2u);
- EXPECT_TRUE(InstMap.at(AddInst).approximatelyEquals(Embedding(2, 27.6)));
- EXPECT_TRUE(InstMap.at(RetInst).approximatelyEquals(Embedding(2, 16.8)));
+ EXPECT_TRUE(AddEmb.approximatelyEquals(Embedding(2, 27.9)));
+ EXPECT_TRUE(RetEmb.approximatelyEquals(Embedding(2, 17.0)));
}
TEST_F(IR2VecTestFixture, GetBBVecMap) {
@@ -337,9 +339,9 @@ TEST_F(IR2VecTestFixture, GetBBVecMap) {
EXPECT_TRUE(BBMap.count(BB));
EXPECT_EQ(BBMap.at(BB).size(), 2u);
- // BB vector should be sum of add and ret: {27.6, 27.6} + {16.8, 16.8} =
- // {44.4, 44.4}
- EXPECT_TRUE(BBMap.at(BB).approximatelyEquals(Embedding(2, 44.4)));
+ // BB vector should be sum of add and ret: {27.9, 27.9} + {17.0, 17.0} =
+ // {44.9, 44.9}
+ EXPECT_TRUE(BBMap.at(BB).approximatelyEquals(Embedding(2, 44.9)));
}
TEST_F(IR2VecTestFixture, GetBBVector) {
@@ -349,7 +351,7 @@ TEST_F(IR2VecTestFixture, GetBBVector) {
const auto &BBVec = Emb->getBBVector(*BB);
EXPECT_EQ(BBVec.size(), 2u);
- EXPECT_TRUE(BBVec.approximatelyEquals(Embedding(2, 44.4)));
+ EXPECT_TRUE(BBVec.approximatelyEquals(Embedding(2, 44.9)));
}
TEST_F(IR2VecTestFixture, GetFunctionVector) {
@@ -360,8 +362,8 @@ TEST_F(IR2VecTestFixture, GetFunctionVector) {
EXPECT_EQ(FuncVec.size(), 2u);
- // Function vector should match BB vector (only one BB): {44.4, 44.4}
- EXPECT_TRUE(FuncVec.approximatelyEquals(Embedding(2, 44.4)));
+ // Function vector should match BB vector (only one BB): {44.9, 44.9}
+ EXPECT_TRUE(FuncVec.approximatelyEquals(Embedding(2, 44.9)));
}
static constexpr unsigned MaxOpcodes = Vocabulary::MaxOpcodes;
diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp
index 1a9296f..fceefbc 100644
--- a/llvm/unittests/Analysis/MemorySSATest.cpp
+++ b/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -1092,8 +1092,8 @@ TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
// %baz = getelementptr i8, ptr %foo, i64 2
// store i8 0, ptr %foo
// store i8 0, ptr %bar
- // call void @llvm.lifetime.end.p0(i64 3, ptr %foo)
- // call void @llvm.lifetime.start.p0(i64 3, ptr %foo)
+ // call void @llvm.lifetime.end.p0(ptr %foo)
+ // call void @llvm.lifetime.start.p0(ptr %foo)
// store i8 0, ptr %foo
// store i8 0, ptr %bar
// call void @llvm.memset.p0i8(ptr %baz, i8 0, i64 1)
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 6af2006..559a0b7 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/FloatingPointPredicateUtils.h"
#include "llvm/AsmParser/Parser.h"
@@ -2208,6 +2209,41 @@ TEST_F(ComputeKnownFPClassTest, Constants) {
}
}
+TEST_F(ComputeKnownFPClassTest, fcmpImpliesClass_fabs_zero) {
+ parseAssembly("define float @test(float %x) {\n"
+ " %A = call float @llvm.fabs.f32(float %x)\n"
+ " ret float %A\n"
+ "}\n");
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_OEQ, *F, A, fcZero)),
+ fcZero);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_UEQ, *F, A, fcZero)),
+ fcZero | fcNan);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_UNE, *F, A, fcZero)),
+ ~fcZero);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_ONE, *F, A, fcZero)),
+ ~fcNan & ~fcZero);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_ORD, *F, A, fcZero)),
+ ~fcNan);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_UNO, *F, A, fcZero)),
+ fcNan);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_OGT, *F, A, fcZero)),
+ fcSubnormal | fcNormal | fcInf);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_UGT, *F, A, fcZero)),
+ fcSubnormal | fcNormal | fcInf | fcNan);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_OGE, *F, A, fcZero)),
+ ~fcNan);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_UGE, *F, A, fcZero)),
+ fcAllFlags);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_OLT, *F, A, fcZero)),
+ fcNone);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_ULT, *F, A, fcZero)),
+ fcNan);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_OLE, *F, A, fcZero)),
+ fcZero);
+ EXPECT_EQ(std::get<1>(fcmpImpliesClass(FCmpInst::FCMP_ULE, *F, A, fcZero)),
+ fcZero | fcNan);
+}
+
TEST_F(ValueTrackingTest, isNonZeroRecurrence) {
parseAssembly(R"(
define i1 @test(i8 %n, i8 %r) {
diff --git a/llvm/unittests/CodeGen/SelectionDAGTestBase.h b/llvm/unittests/CodeGen/SelectionDAGTestBase.h
index edc730d..8a0a05f 100644
--- a/llvm/unittests/CodeGen/SelectionDAGTestBase.h
+++ b/llvm/unittests/CodeGen/SelectionDAGTestBase.h
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
@@ -72,12 +71,8 @@ protected:
if (!DAG)
reportFatalUsageError("Failed to create SelectionDAG?");
OptimizationRemarkEmitter ORE(F);
- FunctionAnalysisManager FAM;
- FAM.registerPass([&] { return TM->getTargetIRAnalysis(); });
-
- TargetTransformInfo TTI = TM->getTargetIRAnalysis().run(*F, FAM);
DAG->init(*MF, ORE, nullptr, nullptr, nullptr, nullptr, nullptr, MMI,
- nullptr, TTI.hasBranchDivergence(F));
+ nullptr);
}
TargetLoweringBase::LegalizeTypeAction getTypeAction(EVT VT) {
diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index d6b578a..b7a060b 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -23,6 +23,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include <cstdlib>
#include <optional>
using namespace llvm;
@@ -5360,6 +5361,144 @@ TEST_F(OpenMPIRBuilderTest, CreateReductions) {
EXPECT_TRUE(findGEPZeroOne(ReductionFn->getArg(1), FirstRHS, SecondRHS));
}
+static void createScan(llvm::Value *scanVar, llvm::Type *scanType,
+ OpenMPIRBuilder &OMPBuilder, IRBuilder<> &Builder,
+ OpenMPIRBuilder::LocationDescription Loc,
+ OpenMPIRBuilder::InsertPointTy &allocaIP,
+ ScanInfo *&ScanRedInfo) {
+ using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
+ ASSERT_EXPECTED_INIT(InsertPointTy, retIp,
+ OMPBuilder.createScan(Loc, allocaIP, {scanVar},
+ {scanType}, true, ScanRedInfo));
+ Builder.restoreIP(retIp);
+}
+/*
+ Following is the pseudocode of the code generated by the test case
+ <declare pointer to buffer> ptr
+ size num_iters = 100
+ // temp buffer allocation
+ omp masked {
+ buff = malloc(num_iters*scanvarstype)
+ *ptr = buff
+ }
+ barrier;
+ // input phase loop
+ for (i: 0..<num_iters>) {
+ <input phase>;
+ buffer = *ptr;
+ buffer[i] = red;
+ }
+ // scan reduction
+ omp masked
+ {
+ for (int k = 0; k != ceil(log2(num_iters)); ++k) {
+ i=pow(2,k)
+ for (size cnt = last_iter; cnt >= i; --cnt) {
+ buffer = *ptr;
+ buffer[cnt] op= buffer[cnt-i];
+ }
+ }
+ }
+ barrier;
+ // scan phase loop
+ for (0..<num_iters>) {
+ buffer = *ptr;
+ red = buffer[i] ;
+ <scan phase>;
+ }
+ // temp buffer deletion
+ omp masked {
+ free(*ptr)
+ }
+ barrier;
+*/
+TEST_F(OpenMPIRBuilderTest, ScanReduction) {
+ using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ IRBuilder<> Builder(BB);
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+ Value *TripCount = F->getArg(0);
+ Type *LCTy = TripCount->getType();
+ Value *StartVal = ConstantInt::get(LCTy, 1);
+ Value *StopVal = ConstantInt::get(LCTy, 100);
+ Value *Step = ConstantInt::get(LCTy, 1);
+ auto AllocaIP = Builder.saveIP();
+
+ llvm::Value *ScanVar = Builder.CreateAlloca(Builder.getFloatTy());
+ llvm::Value *OrigVar = Builder.CreateAlloca(Builder.getFloatTy());
+ unsigned NumBodiesGenerated = 0;
+ ScanInfo *ScanRedInfo;
+ ASSERT_EXPECTED_INIT(ScanInfo *, ScanInformation,
+ OMPBuilder.scanInfoInitialize());
+ ScanRedInfo = ScanInformation;
+ auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, llvm::Value *LC) {
+ NumBodiesGenerated += 1;
+ Builder.restoreIP(CodeGenIP);
+ createScan(ScanVar, Builder.getFloatTy(), OMPBuilder, Builder, Loc,
+ AllocaIP, ScanRedInfo);
+ return Error::success();
+ };
+ llvm::SmallVector<CanonicalLoopInfo *> loops;
+ ASSERT_EXPECTED_INIT(llvm::SmallVector<CanonicalLoopInfo *>, loopvec,
+ OMPBuilder.createCanonicalScanLoops(
+ Loc, LoopBodyGenCB, StartVal, StopVal, Step, false,
+ false, Builder.saveIP(), "scan", ScanRedInfo));
+ loops = loopvec;
+ CanonicalLoopInfo *InputLoop = loops.front();
+ CanonicalLoopInfo *ScanLoop = loops.back();
+ Builder.restoreIP(ScanLoop->getAfterIP());
+ InputLoop->assertOK();
+ ScanLoop->assertOK();
+
+ EXPECT_EQ(ScanLoop->getAfter(), Builder.GetInsertBlock());
+ EXPECT_EQ(NumBodiesGenerated, 2U);
+ SmallVector<OpenMPIRBuilder::ReductionInfo> ReductionInfos = {
+ {Builder.getFloatTy(), OrigVar, ScanVar,
+ /*EvaluationKind=*/OpenMPIRBuilder::EvalKind::Scalar, sumReduction,
+ /*ReductionGenClang=*/nullptr, sumAtomicReduction}};
+ OpenMPIRBuilder::LocationDescription RedLoc({InputLoop->getAfterIP(), DL});
+ llvm::BasicBlock *Cont = splitBB(Builder, false, "omp.scan.loop.cont");
+ ASSERT_EXPECTED_INIT(
+ InsertPointTy, retIp,
+ OMPBuilder.emitScanReduction(RedLoc, ReductionInfos, ScanRedInfo));
+ Builder.restoreIP(retIp);
+ Builder.CreateBr(Cont);
+ Builder.SetInsertPoint(Cont);
+ unsigned NumMallocs = 0;
+ unsigned NumFrees = 0;
+ unsigned NumMasked = 0;
+ unsigned NumEndMasked = 0;
+ unsigned NumLog = 0;
+ unsigned NumCeil = 0;
+ for (Instruction &I : instructions(F)) {
+ if (!isa<CallInst>(I))
+ continue;
+ CallInst *Call = dyn_cast<CallInst>(&I);
+ StringRef Name = Call->getCalledFunction()->getName();
+ if (Name.equals_insensitive("malloc")) {
+ NumMallocs += 1;
+ } else if (Name.equals_insensitive("free")) {
+ NumFrees += 1;
+ } else if (Name.equals_insensitive("__kmpc_masked")) {
+ NumMasked += 1;
+ } else if (Name.equals_insensitive("__kmpc_end_masked")) {
+ NumEndMasked += 1;
+ } else if (Name.equals_insensitive("llvm.log2.f64")) {
+ NumLog += 1;
+ } else if (Name.equals_insensitive("llvm.ceil.f64")) {
+ NumCeil += 1;
+ }
+ }
+ EXPECT_EQ(NumBodiesGenerated, 2U);
+ EXPECT_EQ(NumMasked, 3U);
+ EXPECT_EQ(NumEndMasked, 3U);
+ EXPECT_EQ(NumMallocs, 1U);
+ EXPECT_EQ(NumFrees, 1U);
+ EXPECT_EQ(NumLog, 1U);
+ EXPECT_EQ(NumCeil, 1U);
+}
+
TEST_F(OpenMPIRBuilderTest, CreateTwoReductions) {
using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
OpenMPIRBuilder OMPBuilder(*M);
diff --git a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp
index aac0f96..7780bba 100644
--- a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp
+++ b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp
@@ -189,11 +189,7 @@ TEST(BasicBlockDbgInfoTest, DropSourceAtomOnSplit) {
ASSERT_TRUE(After);
const DebugLoc &OrigTerminatorDL = After->getTerminator()->getDebugLoc();
ASSERT_TRUE(OrigTerminatorDL);
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
EXPECT_EQ(OrigTerminatorDL->getAtomGroup(), 1u);
-#else
- EXPECT_EQ(OrigTerminatorDL->getAtomGroup(), 0u);
-#endif
}
// Test splitBasicBlock.
@@ -204,11 +200,7 @@ TEST(BasicBlockDbgInfoTest, DropSourceAtomOnSplit) {
const DebugLoc &OrigTerminatorDL = After->getTerminator()->getDebugLoc();
ASSERT_TRUE(OrigTerminatorDL);
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
EXPECT_EQ(OrigTerminatorDL->getAtomGroup(), 1u);
-#else
- EXPECT_EQ(OrigTerminatorDL->getAtomGroup(), 0u);
-#endif
BasicBlock *Before = After->getSinglePredecessor();
ASSERT_TRUE(Before);
diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index 4f2ede3..773c32e 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -466,15 +466,11 @@ TEST_F(IRBuilderTest, Lifetime) {
CallInst *Start1 = Builder.CreateLifetimeStart(Var1);
CallInst *Start2 = Builder.CreateLifetimeStart(Var2);
- CallInst *Start3 = Builder.CreateLifetimeStart(Var3, Builder.getInt64(100));
+ CallInst *Start3 = Builder.CreateLifetimeStart(Var3);
- EXPECT_EQ(Start1->getArgOperand(0), Builder.getInt64(-1));
- EXPECT_EQ(Start2->getArgOperand(0), Builder.getInt64(-1));
- EXPECT_EQ(Start3->getArgOperand(0), Builder.getInt64(100));
-
- EXPECT_EQ(Start1->getArgOperand(1), Var1);
- EXPECT_EQ(Start2->getArgOperand(1)->stripPointerCasts(), Var2);
- EXPECT_EQ(Start3->getArgOperand(1), Var3);
+ EXPECT_EQ(Start1->getArgOperand(0), Var1);
+ EXPECT_EQ(Start2->getArgOperand(0), Var2);
+ EXPECT_EQ(Start3->getArgOperand(0), Var3);
Value *End1 = Builder.CreateLifetimeEnd(Var1);
Builder.CreateLifetimeEnd(Var2);
diff --git a/llvm/unittests/IR/MetadataTest.cpp b/llvm/unittests/IR/MetadataTest.cpp
index ba8367f..7425703 100644
--- a/llvm/unittests/IR/MetadataTest.cpp
+++ b/llvm/unittests/IR/MetadataTest.cpp
@@ -1470,17 +1470,10 @@ TEST_F(DILocationTest, Merge) {
PickMergedSourceLocations = false;
}
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
#define EXPECT_ATOM(Loc, Group, Rank) \
EXPECT_EQ(Group, M->getAtomGroup()); \
EXPECT_EQ(Rank, M->getAtomRank());
-#else
-#define EXPECT_ATOM(Loc, Group, Rank) \
- EXPECT_EQ(0u, M->getAtomGroup()); \
- EXPECT_EQ(0u, M->getAtomRank()); \
- (void)Group; \
- (void)Rank;
-#endif
+
// Identical, including source atom numbers.
{
auto *A = DILocation::get(Context, 2, 7, N, nullptr, false, /*AtomGroup*/ 1,
@@ -1753,15 +1746,8 @@ TEST_F(DILocationTest, KeyInstructions) {
EXPECT_EQ(Context.pImpl->NextAtomGroup, 1u);
DILocation *A1 =
DILocation::get(Context, 1, 0, getSubprogram(), nullptr, false, 1, 2);
- // The group is only applied to the DILocation if we've built LLVM with
- // EXPERIMENTAL_KEY_INSTRUCTIONS.
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
EXPECT_EQ(A1->getAtomGroup(), 1u);
EXPECT_EQ(A1->getAtomRank(), 2u);
-#else
- EXPECT_EQ(A1->getAtomGroup(), 0u);
- EXPECT_EQ(A1->getAtomRank(), 0u);
-#endif
// Group number 1 has been "used" so next available is 2.
EXPECT_EQ(Context.pImpl->NextAtomGroup, 2u);
diff --git a/llvm/unittests/Support/DebugLogTest.cpp b/llvm/unittests/Support/DebugLogTest.cpp
index c24d1a5..b28c59c 100644
--- a/llvm/unittests/Support/DebugLogTest.cpp
+++ b/llvm/unittests/Support/DebugLogTest.cpp
@@ -6,11 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// This macro is defined in the LLVM build system, but we undefine it here
-// so that we test at least once in-tree the case where __SHORT_FILE__ is not
-// defined.
-#undef __SHORT_FILE__
-
#include "llvm/Support/DebugLog.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/llvm/unittests/Target/AArch64/AArch64SelectionDAGTest.cpp b/llvm/unittests/Target/AArch64/AArch64SelectionDAGTest.cpp
index 28d69f3..f06f03b 100644
--- a/llvm/unittests/Target/AArch64/AArch64SelectionDAGTest.cpp
+++ b/llvm/unittests/Target/AArch64/AArch64SelectionDAGTest.cpp
@@ -8,7 +8,6 @@
#include "AArch64SelectionDAGInfo.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
@@ -63,12 +62,8 @@ protected:
if (!DAG)
report_fatal_error("DAG?");
OptimizationRemarkEmitter ORE(F);
- FunctionAnalysisManager FAM;
- FAM.registerPass([&] { return TM->getTargetIRAnalysis(); });
-
- TargetTransformInfo TTI = TM->getTargetIRAnalysis().run(*F, FAM);
DAG->init(*MF, ORE, nullptr, nullptr, nullptr, nullptr, nullptr, MMI,
- nullptr, TTI.hasBranchDivergence(F));
+ nullptr);
}
TargetLoweringBase::LegalizeTypeAction getTypeAction(EVT VT) {
diff --git a/llvm/unittests/TextAPI/TextStubV5Tests.cpp b/llvm/unittests/TextAPI/TextStubV5Tests.cpp
index 24577b3..f6689f7 100644
--- a/llvm/unittests/TextAPI/TextStubV5Tests.cpp
+++ b/llvm/unittests/TextAPI/TextStubV5Tests.cpp
@@ -1167,6 +1167,33 @@ TEST(TBDv5, InvalidMinOS) {
EXPECT_EQ("invalid min_deployment section\n", ErrorMessage);
}
+TEST(TBDv5, RISCV) {
+ static const char TBDv5File[] = R"({
+"tapi_tbd_version": 5,
+"main_library": {
+ "target_info": [
+ {
+ "target": "riscv32-ios",
+ "min_deployment": "34.1"
+ }
+ ],
+ "install_names":[
+ { "name":"/S/L/F/Foo.framework/Foo" }
+ ]
+}})";
+
+ Expected<TBDFile> Result =
+ TextAPIReader::get(MemoryBufferRef(TBDv5File, "Test.tbd"));
+ EXPECT_TRUE(!!Result);
+ Target ExpectedTarget = Target(AK_riscv32, PLATFORM_IOS, VersionTuple(34, 1));
+ TBDFile ReadFile = std::move(Result.get());
+ EXPECT_EQ(FileType::TBD_V5, ReadFile->getFileType());
+ EXPECT_EQ(std::string("/S/L/F/Foo.framework/Foo"),
+ ReadFile->getInstallName());
+ EXPECT_TRUE(ReadFile->targets().begin() != ReadFile->targets().end());
+ EXPECT_EQ(*ReadFile->targets().begin(), ExpectedTarget);
+}
+
TEST(TBDv5, SimSupport) {
static const char TBDv5File[] = R"({
"tapi_tbd_version": 5,
diff --git a/llvm/unittests/Transforms/Utils/CloningTest.cpp b/llvm/unittests/Transforms/Utils/CloningTest.cpp
index b8b0357..fe81986 100644
--- a/llvm/unittests/Transforms/Utils/CloningTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CloningTest.cpp
@@ -1203,13 +1203,9 @@ TEST_F(CloneInstruction, cloneKeyInstructions) {
ASSERT_FALSE(verifyModule(*M, &errs()));
-#ifdef EXPERIMENTAL_KEY_INSTRUCTIONS
#define EXPECT_ATOM(Inst, G) \
EXPECT_TRUE(Inst->getDebugLoc()); \
EXPECT_EQ(Inst->getDebugLoc()->getAtomGroup(), uint64_t(G));
-#else
-#define EXPECT_ATOM(Inst, G) (void)Inst;
-#endif
Function *F = M->getFunction("test");
BasicBlock *BB = &*F->begin();
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h b/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h
index 7dfd11a..56f6858 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h
+++ b/llvm/unittests/Transforms/Vectorize/VPlanTestBase.h
@@ -72,10 +72,13 @@ protected:
Loop *L = LI->getLoopFor(LoopHeader);
PredicatedScalarEvolution PSE(*SE, *L);
- auto Plan = VPlanTransforms::buildPlainCFG(L, *LI);
+ auto Plan = VPlanTransforms::buildVPlan0(L, *LI, IntegerType::get(*Ctx, 64),
+ {}, PSE);
+
VFRange R(ElementCount::getFixed(1), ElementCount::getFixed(2));
- VPlanTransforms::prepareForVectorization(*Plan, IntegerType::get(*Ctx, 64),
- PSE, true, false, L, {}, false, R);
+ VPlanTransforms::handleEarlyExits(*Plan, false, R);
+ VPlanTransforms::addMiddleCheck(*Plan, true, false);
+
VPlanTransforms::createLoopRegions(*Plan);
return Plan;
}
diff --git a/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp b/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp
index 49f7194..293e64e 100644
--- a/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp
+++ b/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp
@@ -561,7 +561,8 @@ static AttributeSet getIntrinsicArgAttributeSet(LLVMContext &C, unsigned ID,
} // getIntrinsicArgAttributeSet
)";
- // Compute unique function attribute sets.
+ // Compute unique function attribute sets. Note that ID 255 will be used for
+ // intrinsics with no function attributes.
std::map<const CodeGenIntrinsic *, unsigned, FnAttributeComparator>
UniqFnAttributes;
OS << R"(
@@ -570,6 +571,8 @@ static AttributeSet getIntrinsicFnAttributeSet(LLVMContext &C, unsigned ID) {
default: llvm_unreachable("Invalid attribute set number");)";
for (const CodeGenIntrinsic &Int : Ints) {
+ if (!hasFnAttributes(Int))
+ continue;
unsigned ID = UniqFnAttributes.size();
if (!UniqFnAttributes.try_emplace(&Int, ID).second)
continue;
@@ -621,9 +624,7 @@ static AttributeSet getIntrinsicFnAttributeSet(LLVMContext &C, unsigned ID) {
static constexpr uint16_t IntrinsicsToAttributesMap[] = {)";
- // Compute the maximum number of attribute arguments and the map. For function
- // attributes, we only consider whether the intrinsics has any function
- // arguments or not.
+ // Compute unique argument attributes.
std::map<const CodeGenIntrinsic *, unsigned, AttributeComparator>
UniqAttributes;
for (const CodeGenIntrinsic &Int : Ints) {
@@ -631,86 +632,153 @@ static constexpr uint16_t IntrinsicsToAttributesMap[] = {)";
UniqAttributes.try_emplace(&Int, ID);
}
- // Emit an array of AttributeList. Most intrinsics will have at least one
- // entry, for the function itself (index ~1), which is usually nounwind.
- for (const CodeGenIntrinsic &Int : Ints) {
- uint16_t FnAttrIndex = UniqFnAttributes[&Int];
- OS << formatv("\n {} << 8 | {}, // {}", FnAttrIndex,
- UniqAttributes[&Int], Int.Name);
- }
+ constexpr uint16_t NoFunctionAttrsID = 255;
+ if (UniqAttributes.size() > 256)
+ PrintFatalError("Too many unique argument attributes for table!");
+ // Note, ID 255 is used to indicate no function attributes.
+ if (UniqFnAttributes.size() > 255)
+ PrintFatalError("Too many unique function attributes for table!");
// Assign a 16-bit packed ID for each intrinsic. The lower 8-bits will be its
// "argument attribute ID" (index in UniqAttributes) and upper 8 bits will be
// its "function attribute ID" (index in UniqFnAttributes).
- if (UniqAttributes.size() > 256)
- PrintFatalError("Too many unique argument attributes for table!");
- if (UniqFnAttributes.size() > 256)
- PrintFatalError("Too many unique function attributes for table!");
+ for (const CodeGenIntrinsic &Int : Ints) {
+ uint16_t FnAttrIndex =
+ hasFnAttributes(Int) ? UniqFnAttributes[&Int] : NoFunctionAttrsID;
+ OS << formatv("\n {} << 8 | {}, // {}", FnAttrIndex,
+ UniqAttributes[&Int], Int.Name);
+ }
OS << R"(
-};
+}; // IntrinsicsToAttributesMap
+)";
+
+ // For a given intrinsic, its attributes are constructed by populating the
+ // local array `AS` below with its non-empty argument attributes followed by
+ // function attributes if any. Each argument attribute is constructed as:
+ //
+ // getIntrinsicArgAttributeSet(C, ArgAttrID, FT->getContainedType(ArgNo));
+ //
+ // Create a table that records, for each argument attributes, the set of
+ // <ArgNo, ArgAttrID> pairs that are needed to construct its argument
+ // attributes. These tables for all intrinsics will be concatenated into one
+ // large table and then for each intrinsic, we remember the Staring index and
+ // number of size of its slice of entries (i.e., number of arguments with
+ // non-empty attributes), so that we can build the attribute list for an
+ // intrinsic without using a switch-case.
+
+ // Find the max number of attributes to create the local array and create
+ // a concatenated list of <ArgNo, AttrID> pairs.
+ struct ArgNoAttrIDPair {
+ uint16_t ArgNo, ArgAttrID;
+ ArgNoAttrIDPair(uint16_t ArgNo, uint16_t ArgAttrID)
+ : ArgNo(ArgNo), ArgAttrID(ArgAttrID) {}
+ };
+
+ // For each unique ID in UniqAttributes, reacord the starting index in the
+ // flattened ArgNoAttrIDPair table, and the number of non-empty arg
+ // attributes.
+ struct ArgAttributesInfo {
+ uint16_t StartIndex;
+ uint16_t NumAttrs;
+ ArgAttributesInfo(uint16_t StartIndex, uint16_t NumAttrs)
+ : StartIndex(StartIndex), NumAttrs(NumAttrs) {}
+ };
+ SmallVector<ArgNoAttrIDPair> ArgAttrIdTable;
+ SmallVector<ArgAttributesInfo> ArgAttributesInfoTable(UniqAttributes.size(),
+ {0, 0});
-AttributeList Intrinsic::getAttributes(LLVMContext &C, ID id,
- FunctionType *FT) {)";
- // Find the max number of attributes to create the local array.
unsigned MaxNumAttrs = 0;
for (const auto [IntPtr, UniqueID] : UniqAttributes) {
const CodeGenIntrinsic &Int = *IntPtr;
- unsigned NumAttrs =
- llvm::count_if(Int.ArgumentAttributes,
- [](const auto &Attrs) { return !Attrs.empty(); });
+ unsigned NumAttrs = 0;
+ unsigned StartIndex = ArgAttrIdTable.size();
+
+ for (const auto &[ArgNo, Attrs] : enumerate(Int.ArgumentAttributes)) {
+ if (Attrs.empty())
+ continue;
+
+ uint16_t ArgAttrID = UniqArgAttributes.find(Attrs)->second;
+ ArgAttrIdTable.emplace_back((uint16_t)ArgNo, ArgAttrID);
+ ++NumAttrs;
+ }
+
+ // Record the start index and size of the list for this unique ID.
+ if (NumAttrs)
+ ArgAttributesInfoTable[UniqueID] =
+ ArgAttributesInfo(StartIndex, NumAttrs);
+
NumAttrs += hasFnAttributes(Int);
MaxNumAttrs = std::max(MaxNumAttrs, NumAttrs);
}
+ if (ArgAttrIdTable.size() >= std::numeric_limits<uint16_t>::max())
+ PrintFatalError("Size of ArgAttrIdTable exceeds supported limit");
+
+ // Emit the 2 tables (flattened ArgNo, ArgAttrID) and ArgAttrIdTableIndex
+ OS << R"(
+namespace {
+struct ArgNoAttrIDPair {
+ uint16_t ArgNo, ArgAttrID;
+};
+} // namespace
+
+static constexpr ArgNoAttrIDPair ArgAttrIdTable[] = {
+)";
+ for (const auto &[ArgNo, ArgAttrID] : ArgAttrIdTable)
+ OS << formatv(" {{{}, {}},\n", ArgNo, ArgAttrID);
+ OS << R"(}; // ArgAttrIdTable
+
+namespace {
+struct ArgAttributesInfo {
+ uint16_t StartIndex;
+ uint16_t NumAttrs;
+};
+} // namespace
+
+static constexpr ArgAttributesInfo ArgAttributesInfoTable[] = {
+)";
+ for (const auto &[StartIndex, NumAttrs] : ArgAttributesInfoTable)
+ OS << formatv(" {{{}, {}},\n", StartIndex, NumAttrs);
+ OS << "}; // ArgAttributesInfoTable\n";
+
+ // Now emit the Intrinsic::getAttributes function. This will first map
+ // from intrinsic ID -> unique arg/function attr ID (using the
+ // IntrinsicsToAttributesMap) table. Then it will use the unique arg ID to
+ // construct all the argument attributes (using the ArgAttributesInfoTable and
+ // ArgAttrIdTable) and then add on the function attributes if any.
OS << formatv(R"(
+AttributeList Intrinsic::getAttributes(LLVMContext &C, ID id,
+ FunctionType *FT) {{
if (id == 0)
return AttributeList();
uint16_t PackedID = IntrinsicsToAttributesMap[id - 1];
uint8_t FnAttrID = PackedID >> 8;
+ uint8_t ArgAttrID = PackedID & 0xFF;
std::pair<unsigned, AttributeSet> AS[{}];
- unsigned NumAttrs = 0;
- bool HasFnAttr = false;
- switch(PackedID & 0xFF) {{
- default: llvm_unreachable("Invalid attribute number");
-)",
- MaxNumAttrs);
- for (const auto [IntPtr, UniqueID] : UniqAttributes) {
- OS << formatv(" case {}:\n", UniqueID);
- const CodeGenIntrinsic &Int = *IntPtr;
-
- unsigned NumAttrs = 0;
-
- for (const auto &[AttrIdx, Attrs] : enumerate(Int.ArgumentAttributes)) {
- if (Attrs.empty())
- continue;
+ // Construct an ArrayRef for easier range checking.
+ ArrayRef<ArgAttributesInfo> ArgAttributesInfoTableAR(ArgAttributesInfoTable);
+ if (ArgAttrID >= ArgAttributesInfoTableAR.size())
+ llvm_unreachable("Invalid arguments attribute ID");
- unsigned ArgAttrID = UniqArgAttributes.find(Attrs)->second;
- OS << formatv(" AS[{}] = {{{}, getIntrinsicArgAttributeSet(C, {}, "
- "FT->getContainedType({}))};\n",
- NumAttrs++, AttrIdx, ArgAttrID, AttrIdx);
- }
-
- if (hasFnAttributes(Int))
- OS << " HasFnAttr = true;\n";
-
- if (NumAttrs)
- OS << formatv(" NumAttrs = {};\n", NumAttrs);
- OS << " break;\n";
+ auto [StartIndex, NumAttrs] = ArgAttributesInfoTableAR[ArgAttrID];
+ for (unsigned Idx = 0; Idx < NumAttrs; ++Idx) {{
+ auto [ArgNo, ArgAttrID] = ArgAttrIdTable[StartIndex + Idx];
+ AS[Idx] = {{ArgNo,
+ getIntrinsicArgAttributeSet(C, ArgAttrID, FT->getContainedType(ArgNo))};
}
-
- OS << R"( }
- if (HasFnAttr) {
- AS[NumAttrs++] = {AttributeList::FunctionIndex,
+ if (FnAttrID != {}) {
+ AS[NumAttrs++] = {{AttributeList::FunctionIndex,
getIntrinsicFnAttributeSet(C, FnAttrID)};
}
return AttributeList::get(C, ArrayRef(AS, NumAttrs));
}
#endif // GET_INTRINSIC_ATTRIBUTES
-)";
+)",
+ MaxNumAttrs, NoFunctionAttrsID);
}
void IntrinsicEmitter::EmitIntrinsicToBuiltinMap(
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn
index b609d4a..f8c4838 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn
@@ -122,6 +122,7 @@ static_library("clangd") {
"SemanticHighlighting.cpp",
"SemanticSelection.cpp",
"SourceCode.cpp",
+ "SymbolDocumentation.cpp",
"SystemIncludeExtractor.cpp",
"TUScheduler.cpp",
"TidyProvider.cpp",
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clangd/unittests/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clangd/unittests/BUILD.gn
index ad32aa9..8aba04a 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clangd/unittests/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clangd/unittests/BUILD.gn
@@ -108,6 +108,7 @@ unittest("ClangdTests") {
"SourceCodeTests.cpp",
"StdLibTests.cpp",
"SymbolCollectorTests.cpp",
+ "SymbolDocumentationTests.cpp",
"SymbolInfoTests.cpp",
"SyncAPI.cpp",
"TUSchedulerTests.cpp",
diff --git a/llvm/utils/gn/secondary/clang/test/BUILD.gn b/llvm/utils/gn/secondary/clang/test/BUILD.gn
index ed68387..020e35e 100644
--- a/llvm/utils/gn/secondary/clang/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/test/BUILD.gn
@@ -60,7 +60,6 @@ write_lit_config("lit_site_cfg") {
"CLANG_VENDOR_UTI=org.llvm.clang",
"ENABLE_BACKTRACES=1",
"ENABLE_SHARED=0",
- "LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS=0",
"LLVM_EXTERNAL_LIT=",
"LLVM_HOST_TRIPLE=$llvm_current_triple",
"LLVM_INCLUDE_SPIRV_TOOLS_TESTS=0",
diff --git a/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn b/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn
index 103954e5..4e485b1 100644
--- a/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn
@@ -27,3 +27,11 @@ unittest("ClangReplInterpreterTests") {
ldflags = [ "-rdynamic" ]
}
}
+
+# FIXME: This is included conditionally in the CMake build if
+# compiler-rt is available. The test currently fails in the GN
+# build. Figure out if we can get it to pass.
+group("dummy_sources") {
+ sources = [ "OutOfProcessInterpreterTests.cpp" ]
+ not_needed(sources)
+}
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index d270686..5f2817c 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -1104,14 +1104,12 @@ if (current_toolchain == default_toolchain) {
"__fwd/get.h",
"__fwd/ios.h",
"__fwd/istream.h",
- "__fwd/map.h",
"__fwd/mdspan.h",
"__fwd/memory.h",
"__fwd/memory_resource.h",
"__fwd/ostream.h",
"__fwd/pair.h",
"__fwd/queue.h",
- "__fwd/set.h",
"__fwd/span.h",
"__fwd/sstream.h",
"__fwd/stack.h",
diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
index 30a1e03..a7099aa 100644
--- a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
@@ -5,6 +5,7 @@ static_library("lib") {
configs += [ "//llvm/utils/gn/build:lldb_code" ]
deps = [
"//lldb/source/API:liblldb",
+ "//lldb/source/Host",
"//llvm/lib/Option",
"//llvm/lib/Support",
]
@@ -14,7 +15,6 @@ static_library("lib") {
include_dirs = [ "." ]
- # FIXME: link in //lldb/source/Host on Win (and netbsd)
# FIXME: link in pthread
# FIXME: rpath/install_name stuff on macOS for framework on macOS
@@ -72,6 +72,7 @@ static_library("lib") {
"LLDBUtils.cpp",
"OutputRedirector.cpp",
"ProgressEvent.cpp",
+ "Protocol/DAPTypes.cpp",
"Protocol/ProtocolBase.cpp",
"Protocol/ProtocolEvents.cpp",
"Protocol/ProtocolRequests.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn
index 2838901..449adf4 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AVR/BUILD.gn
@@ -49,6 +49,7 @@ static_library("LLVMAVRCodeGen") {
"AVRSubtarget.cpp",
"AVRTargetMachine.cpp",
"AVRTargetObjectFile.cpp",
+ "AVRTargetTransformInfo.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/llvm/test/BUILD.gn b/llvm/utils/gn/secondary/llvm/test/BUILD.gn
index 08cddc1..aaac823 100644
--- a/llvm/utils/gn/secondary/llvm/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/test/BUILD.gn
@@ -65,7 +65,6 @@ write_lit_config("lit_site_cfg") {
"LLVM_ENABLE_FFI=0",
"LLVM_ENABLE_HTTPLIB=0",
"LLVM_ENABLE_PROFCHECK=0",
- "LLVM_EXPERIMENTAL_KEY_INSTRUCTIONS=0",
"LLVM_FORCE_VC_REVISION=",
"LLVM_HAS_LOGF128=0",
"LLVM_HAVE_OPT_VIEWER_MODULES=0",
diff --git a/llvm/utils/lit/lit/llvm/config.py b/llvm/utils/lit/lit/llvm/config.py
index 649636d..b04fb25 100644
--- a/llvm/utils/lit/lit/llvm/config.py
+++ b/llvm/utils/lit/lit/llvm/config.py
@@ -107,6 +107,8 @@ class LLVMConfig(object):
features.add("system-solaris")
elif platform.system() == "OS/390":
features.add("system-zos")
+ elif sys.platform == "cygwin":
+ features.add("system-cygwin")
# Native compilation: host arch == default triple arch
# Both of these values should probably be in every site config (e.g. as
diff --git a/mlir/cmake/modules/AddMLIR.cmake b/mlir/cmake/modules/AddMLIR.cmake
index ff4269e..14eefb5 100644
--- a/mlir/cmake/modules/AddMLIR.cmake
+++ b/mlir/cmake/modules/AddMLIR.cmake
@@ -388,6 +388,9 @@ function(add_mlir_library name)
if(TARGET ${name})
target_link_libraries(${name} INTERFACE ${LLVM_COMMON_LIBS})
+ if(ARG_INSTALL_WITH_TOOLCHAIN)
+ set_target_properties(${name} PROPERTIES MLIR_INSTALL_WITH_TOOLCHAIN TRUE)
+ endif()
if(NOT ARG_DISABLE_INSTALL)
add_mlir_library_install(${name})
endif()
@@ -617,28 +620,29 @@ endfunction(add_mlir_aggregate)
# This is usually done as part of add_mlir_library but is broken out for cases
# where non-standard library builds can be installed.
function(add_mlir_library_install name)
- if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
- get_target_export_arg(${name} MLIR export_to_mlirtargets UMBRELLA mlir-libraries)
- install(TARGETS ${name}
- COMPONENT ${name}
- ${export_to_mlirtargets}
- LIBRARY DESTINATION lib${LLVM_LIBDIR_SUFFIX}
- ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX}
- RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
- # Note that CMake will create a directory like:
- # objects-${CMAKE_BUILD_TYPE}/obj.LibName
- # and put object files there.
- OBJECTS DESTINATION lib${LLVM_LIBDIR_SUFFIX}
- )
+ get_target_property(_install_with_toolchain ${name} MLIR_INSTALL_WITH_TOOLCHAIN)
+ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY OR _install_with_toolchain)
+ get_target_export_arg(${name} MLIR export_to_mlirtargets UMBRELLA mlir-libraries)
+ install(TARGETS ${name}
+ COMPONENT ${name}
+ ${export_to_mlirtargets}
+ LIBRARY DESTINATION lib${LLVM_LIBDIR_SUFFIX}
+ ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX}
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
+ # Note that CMake will create a directory like:
+ # objects-${CMAKE_BUILD_TYPE}/obj.LibName
+ # and put object files there.
+ OBJECTS DESTINATION lib${LLVM_LIBDIR_SUFFIX}
+ )
- if (NOT LLVM_ENABLE_IDE)
- add_llvm_install_targets(install-${name}
- DEPENDS ${name}
- COMPONENT ${name})
- endif()
- set_property(GLOBAL APPEND PROPERTY MLIR_ALL_LIBS ${name})
+ if (NOT LLVM_ENABLE_IDE)
+ add_llvm_install_targets(install-${name}
+ DEPENDS ${name}
+ COMPONENT ${name})
+ endif()
+ set_property(GLOBAL APPEND PROPERTY MLIR_ALL_LIBS ${name})
+ set_property(GLOBAL APPEND PROPERTY MLIR_EXPORTS ${name})
endif()
- set_property(GLOBAL APPEND PROPERTY MLIR_EXPORTS ${name})
endfunction()
# Declare an mlir library which is part of the public C-API.
diff --git a/mlir/docs/DialectConversion.md b/mlir/docs/DialectConversion.md
index cf577ec..556e73c2 100644
--- a/mlir/docs/DialectConversion.md
+++ b/mlir/docs/DialectConversion.md
@@ -202,17 +202,62 @@ struct MyConversionPattern : public ConversionPattern {
#### Type Safety
-The types of the remapped operands provided to a conversion pattern must be of a
-type expected by the pattern. The expected types of a pattern are determined by
-a provided [TypeConverter](#type-converter). If no type converter is provided,
-the types of the remapped operands are expected to match the types of the
-original operands. If a type converter is provided, the types of the remapped
-operands are expected to be legal as determined by the converter. If the
-remapped operand types are not of an expected type, and a materialization to the
-expected type could not be performed, the pattern fails application before the
-`matchAndRewrite` hook is invoked. This ensures that patterns do not have to
-explicitly ensure type safety, or sanitize the types of the incoming remapped
-operands. More information on type conversion is detailed in the
+The types of the remapped operands provided to a conversion pattern (through
+the adaptor or `ArrayRef` of operands) depend on type conversion rules.
+
+If the pattern was initialized with a [type converter](#type-converter), the
+conversion driver passes values whose types match the legalized types of the
+operands of the matched operation as per the type converter. To that end, the
+conversion driver may insert target materializations to convert the most
+recently mapped values to the expected legalized types. The driver tries to
+reuse existing materializations on a best-effort basis, but this is not
+guaranteed by the infrastructure. If the operand types of the matched op could
+not be legalized, the pattern fails to apply before the `matchAndRewrite` hook
+is invoked.
+
+Example:
+```c++
+// Type converter that converts all FloatTypes to IntegerTypes.
+TypeConverter converter;
+converter.addConversion([](FloatType t) {
+ return IntegerType::get(t.getContext(), t.getWidth());
+});
+
+// Assuming that `MyConversionPattern` was initialized with `converter`.
+struct MyConversionPattern : public ConversionPattern {
+ virtual LogicalResult
+ matchAndRewrite(Operation *op, ArrayRef<Value> operands, /* ... */) const {
+// ^^^^^^^^
+// If `op` has a FloatType operand, the respective value in `operands`
+// is guaranteed to have the legalized IntegerType. If another pattern
+// previously replaced the operand SSA value with an SSA value of the
+// legalized type (via "replaceOp" or "applySignatureConversion"), you
+// will get that SSA value directly (unless the replacement value was
+// also replaced). Otherwise, you will get a materialization to the
+// legalized type.
+```
+
+If the pattern was initialized without a type converter, the conversion driver
+passes the most recently mapped values to the pattern, excluding any
+materializations. If a value with the same type as the original operand is
+desired, users can directly take the respective operand from the matched
+operation.
+
+Example: When initializing the pattern from the above example without a type
+converter, `operands` contains the most recent replacement values, regardless
+of their types.
+
+Note: When running without a type converter, materializations are intentionally
+excluded from the lookup process because their presence may depend on other
+patterns. Passing materializations would make the conversion infrastructure
+fragile and unpredictable. Moreover, there could be multiple materializations
+to different types. (This can be the case when multiple patterns are running
+with different type converters.) In such a case, it would be unclear which
+materialization to pass.
+
+The above rules ensure that patterns do not have to explicitly ensure type
+safety, or sanitize the types of the incoming remapped operands. More
+information on type conversion is detailed in the
[dedicated section](#type-conversion) below.
## Type Conversion
diff --git a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
index 92aacda..2c64693 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
+++ b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
@@ -907,7 +907,8 @@ def AMDGPU_GatherToLDSOp :
The elements gathered by the subgroup will be written contiguously in order of lane ID
starting at `$dst[$dstIndices]`. Byte-sized (ex. i8) or short-sized (ex. i16)
types will be zero-padded/extended to 32 bits before being written. 96-bit types
- (ex. vector<3xf32>) will be zero-padded to 128 bits before being written.
+ (ex. vector<3xf32>) will be zero-padded to 128 bits before being written. Only the
+ offsets held by lane 0 are used.
* `$transferType`: type of the data to be transferred by each thread. This is used to determine
the size of the data to be transferred and the number of threads in the subgroup.
The transfer type must be a scalar type or a vector type with a single element type.
diff --git a/mlir/include/mlir/Dialect/CommonFolders.h b/mlir/include/mlir/Dialect/CommonFolders.h
index b5a1242..1137651 100644
--- a/mlir/include/mlir/Dialect/CommonFolders.h
+++ b/mlir/include/mlir/Dialect/CommonFolders.h
@@ -15,10 +15,16 @@
#ifndef MLIR_DIALECT_COMMONFOLDERS_H
#define MLIR_DIALECT_COMMONFOLDERS_H
+#include "mlir/IR/Attributes.h"
+#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
-#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/BuiltinTypeInterfaces.h"
+#include "mlir/IR/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
+
+#include <cassert>
+#include <cstddef>
#include <optional>
namespace mlir {
@@ -30,11 +36,13 @@ class PoisonAttr;
/// Uses `resultType` for the type of the returned attribute.
/// Optional PoisonAttr template argument allows to specify 'poison' attribute
/// which will be directly propagated to result.
-template <class AttrElementT,
+template <class AttrElementT, //
class ElementValueT = typename AttrElementT::ValueType,
class PoisonAttr = ub::PoisonAttr,
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
class CalculationT = function_ref<
- std::optional<ElementValueT>(ElementValueT, ElementValueT)>>
+ std::optional<ResultElementValueT>(ElementValueT, ElementValueT)>>
Attribute constFoldBinaryOpConditional(ArrayRef<Attribute> operands,
Type resultType,
CalculationT &&calculate) {
@@ -65,7 +73,7 @@ Attribute constFoldBinaryOpConditional(ArrayRef<Attribute> operands,
if (!calRes)
return {};
- return AttrElementT::get(resultType, *calRes);
+ return ResultAttrElementT::get(resultType, *calRes);
}
if (isa<SplatElementsAttr>(operands[0]) &&
@@ -99,7 +107,7 @@ Attribute constFoldBinaryOpConditional(ArrayRef<Attribute> operands,
return {};
auto lhsIt = *maybeLhsIt;
auto rhsIt = *maybeRhsIt;
- SmallVector<ElementValueT, 4> elementResults;
+ SmallVector<ResultElementValueT, 4> elementResults;
elementResults.reserve(lhs.getNumElements());
for (size_t i = 0, e = lhs.getNumElements(); i < e; ++i, ++lhsIt, ++rhsIt) {
auto elementResult = calculate(*lhsIt, *rhsIt);
@@ -119,11 +127,13 @@ Attribute constFoldBinaryOpConditional(ArrayRef<Attribute> operands,
/// attribute.
/// Optional PoisonAttr template argument allows to specify 'poison' attribute
/// which will be directly propagated to result.
-template <class AttrElementT,
+template <class AttrElementT, //
class ElementValueT = typename AttrElementT::ValueType,
class PoisonAttr = ub::PoisonAttr,
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
class CalculationT = function_ref<
- std::optional<ElementValueT>(ElementValueT, ElementValueT)>>
+ std::optional<ResultElementValueT>(ElementValueT, ElementValueT)>>
Attribute constFoldBinaryOpConditional(ArrayRef<Attribute> operands,
CalculationT &&calculate) {
assert(operands.size() == 2 && "binary op takes two operands");
@@ -139,64 +149,73 @@ Attribute constFoldBinaryOpConditional(ArrayRef<Attribute> operands,
return operands[1];
}
- auto getResultType = [](Attribute attr) -> Type {
+ auto getAttrType = [](Attribute attr) -> Type {
if (auto typed = dyn_cast_or_null<TypedAttr>(attr))
return typed.getType();
return {};
};
- Type lhsType = getResultType(operands[0]);
- Type rhsType = getResultType(operands[1]);
+ Type lhsType = getAttrType(operands[0]);
+ Type rhsType = getAttrType(operands[1]);
if (!lhsType || !rhsType)
return {};
if (lhsType != rhsType)
return {};
return constFoldBinaryOpConditional<AttrElementT, ElementValueT, PoisonAttr,
+ ResultAttrElementT, ResultElementValueT,
CalculationT>(
operands, lhsType, std::forward<CalculationT>(calculate));
}
template <class AttrElementT,
class ElementValueT = typename AttrElementT::ValueType,
- class PoisonAttr = void,
+ class PoisonAttr = void, //
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
class CalculationT =
- function_ref<ElementValueT(ElementValueT, ElementValueT)>>
+ function_ref<ResultElementValueT(ElementValueT, ElementValueT)>>
Attribute constFoldBinaryOp(ArrayRef<Attribute> operands, Type resultType,
CalculationT &&calculate) {
- return constFoldBinaryOpConditional<AttrElementT, ElementValueT, PoisonAttr>(
+ return constFoldBinaryOpConditional<AttrElementT, ElementValueT, PoisonAttr,
+ ResultAttrElementT>(
operands, resultType,
- [&](ElementValueT a, ElementValueT b) -> std::optional<ElementValueT> {
- return calculate(a, b);
- });
+ [&](ElementValueT a, ElementValueT b)
+ -> std::optional<ResultElementValueT> { return calculate(a, b); });
}
-template <class AttrElementT,
+template <class AttrElementT, //
class ElementValueT = typename AttrElementT::ValueType,
class PoisonAttr = ub::PoisonAttr,
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
class CalculationT =
- function_ref<ElementValueT(ElementValueT, ElementValueT)>>
+ function_ref<ResultElementValueT(ElementValueT, ElementValueT)>>
Attribute constFoldBinaryOp(ArrayRef<Attribute> operands,
CalculationT &&calculate) {
- return constFoldBinaryOpConditional<AttrElementT, ElementValueT, PoisonAttr>(
+ return constFoldBinaryOpConditional<AttrElementT, ElementValueT, PoisonAttr,
+ ResultAttrElementT>(
operands,
- [&](ElementValueT a, ElementValueT b) -> std::optional<ElementValueT> {
- return calculate(a, b);
- });
+ [&](ElementValueT a, ElementValueT b)
+ -> std::optional<ResultElementValueT> { return calculate(a, b); });
}
/// Performs constant folding `calculate` with element-wise behavior on the one
/// attributes in `operands` and returns the result if possible.
+/// Uses `resultType` for the type of the returned attribute.
/// Optional PoisonAttr template argument allows to specify 'poison' attribute
/// which will be directly propagated to result.
-template <class AttrElementT,
+template <class AttrElementT, //
class ElementValueT = typename AttrElementT::ValueType,
class PoisonAttr = ub::PoisonAttr,
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
class CalculationT =
- function_ref<std::optional<ElementValueT>(ElementValueT)>>
+ function_ref<std::optional<ResultElementValueT>(ElementValueT)>>
Attribute constFoldUnaryOpConditional(ArrayRef<Attribute> operands,
+ Type resultType,
CalculationT &&calculate) {
- if (!llvm::getSingleElement(operands))
+ if (!resultType || !llvm::getSingleElement(operands))
return {};
static_assert(
@@ -214,7 +233,7 @@ Attribute constFoldUnaryOpConditional(ArrayRef<Attribute> operands,
auto res = calculate(op.getValue());
if (!res)
return {};
- return AttrElementT::get(op.getType(), *res);
+ return ResultAttrElementT::get(resultType, *res);
}
if (isa<SplatElementsAttr>(operands[0])) {
// Both operands are splats so we can avoid expanding the values out and
@@ -224,7 +243,7 @@ Attribute constFoldUnaryOpConditional(ArrayRef<Attribute> operands,
auto elementResult = calculate(op.getSplatValue<ElementValueT>());
if (!elementResult)
return {};
- return DenseElementsAttr::get(op.getType(), *elementResult);
+ return DenseElementsAttr::get(cast<ShapedType>(resultType), *elementResult);
} else if (isa<ElementsAttr>(operands[0])) {
// Operands are ElementsAttr-derived; perform an element-wise fold by
// expanding the values.
@@ -234,7 +253,7 @@ Attribute constFoldUnaryOpConditional(ArrayRef<Attribute> operands,
if (!maybeOpIt)
return {};
auto opIt = *maybeOpIt;
- SmallVector<ElementValueT> elementResults;
+ SmallVector<ResultElementValueT> elementResults;
elementResults.reserve(op.getNumElements());
for (size_t i = 0, e = op.getNumElements(); i < e; ++i, ++opIt) {
auto elementResult = calculate(*opIt);
@@ -242,19 +261,81 @@ Attribute constFoldUnaryOpConditional(ArrayRef<Attribute> operands,
return {};
elementResults.push_back(*elementResult);
}
- return DenseElementsAttr::get(op.getShapedType(), elementResults);
+ return DenseElementsAttr::get(cast<ShapedType>(resultType), elementResults);
}
return {};
}
-template <class AttrElementT,
+/// Performs constant folding `calculate` with element-wise behavior on the one
+/// attributes in `operands` and returns the result if possible.
+/// Uses the operand element type for the element type of the returned
+/// attribute.
+/// Optional PoisonAttr template argument allows to specify 'poison' attribute
+/// which will be directly propagated to result.
+template <class AttrElementT, //
+ class ElementValueT = typename AttrElementT::ValueType,
+ class PoisonAttr = ub::PoisonAttr,
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
+ class CalculationT =
+ function_ref<std::optional<ResultElementValueT>(ElementValueT)>>
+Attribute constFoldUnaryOpConditional(ArrayRef<Attribute> operands,
+ CalculationT &&calculate) {
+ if (!llvm::getSingleElement(operands))
+ return {};
+
+ static_assert(
+ std::is_void_v<PoisonAttr> || !llvm::is_incomplete_v<PoisonAttr>,
+ "PoisonAttr is undefined, either add a dependency on UB dialect or pass "
+ "void as template argument to opt-out from poison semantics.");
+ if constexpr (!std::is_void_v<PoisonAttr>) {
+ if (isa<PoisonAttr>(operands[0]))
+ return operands[0];
+ }
+
+ auto getAttrType = [](Attribute attr) -> Type {
+ if (auto typed = dyn_cast_or_null<TypedAttr>(attr))
+ return typed.getType();
+ return {};
+ };
+
+ Type operandType = getAttrType(operands[0]);
+ if (!operandType)
+ return {};
+
+ return constFoldUnaryOpConditional<AttrElementT, ElementValueT, PoisonAttr,
+ ResultAttrElementT, ResultElementValueT,
+ CalculationT>(
+ operands, operandType, std::forward<CalculationT>(calculate));
+}
+
+template <class AttrElementT, //
+ class ElementValueT = typename AttrElementT::ValueType,
+ class PoisonAttr = ub::PoisonAttr,
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
+ class CalculationT = function_ref<ResultElementValueT(ElementValueT)>>
+Attribute constFoldUnaryOp(ArrayRef<Attribute> operands, Type resultType,
+ CalculationT &&calculate) {
+ return constFoldUnaryOpConditional<AttrElementT, ElementValueT, PoisonAttr,
+ ResultAttrElementT>(
+ operands, resultType,
+ [&](ElementValueT a) -> std::optional<ResultElementValueT> {
+ return calculate(a);
+ });
+}
+
+template <class AttrElementT, //
class ElementValueT = typename AttrElementT::ValueType,
class PoisonAttr = ub::PoisonAttr,
- class CalculationT = function_ref<ElementValueT(ElementValueT)>>
+ class ResultAttrElementT = AttrElementT,
+ class ResultElementValueT = typename ResultAttrElementT::ValueType,
+ class CalculationT = function_ref<ResultElementValueT(ElementValueT)>>
Attribute constFoldUnaryOp(ArrayRef<Attribute> operands,
CalculationT &&calculate) {
- return constFoldUnaryOpConditional<AttrElementT, ElementValueT, PoisonAttr>(
- operands, [&](ElementValueT a) -> std::optional<ElementValueT> {
+ return constFoldUnaryOpConditional<AttrElementT, ElementValueT, PoisonAttr,
+ ResultAttrElementT>(
+ operands, [&](ElementValueT a) -> std::optional<ResultElementValueT> {
return calculate(a);
});
}
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index 2ed7d38..f946bb7 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -804,8 +804,8 @@ def GPU_LaunchOp : GPU_Op<"launch", [
Optional<Index>:$clusterSizeY,
Optional<Index>:$clusterSizeZ,
Optional<I32>:$dynamicSharedMemorySize,
- OptionalAttr<SymbolRefAttr>:$kernelFunc,
- OptionalAttr<SymbolRefAttr>:$kernelModule)>,
+ OptionalAttr<FlatSymbolRefAttr>:$module,
+ OptionalAttr<FlatSymbolRefAttr>:$function)>,
Results<(outs Optional<GPU_AsyncToken>:$asyncToken)> {
let summary = "GPU kernel launch operation";
@@ -839,7 +839,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
- a variadic number of Workgroup memory attributions.
- a variadic number of Private memory attributions.
- The `kernelFunc` and `kernelModule` attributes are optional and specifies
+ The `function` and `module` attributes are optional and specifies
the kernel name and a module in which the kernel should be outlined.
Syntax:
@@ -850,6 +850,8 @@ def GPU_LaunchOp : GPU_Op<"launch", [
`blocks` `(` ssa-id-list `)` `in` ssa-reassignment
`threads` `(` ssa-id-list `)` `in` ssa-reassignment
(dynamic_shared_memory_size ssa-use)?
+ (`module(` symbol-ref-id `)`)?
+ (`function(` symbol-ref-id `)`)?
memory-attribution
region attr-dict?
ssa-reassignment ::= `(` ssa-id `=` ssa-use (`,` ssa-id `=` ssa-use)* `)`
@@ -907,6 +909,14 @@ def GPU_LaunchOp : GPU_Op<"launch", [
// sizes are immediately usable inside body region.
"some_op"(%cx, %bx, %tx) : (index, index, index) -> ()
}
+
+ // Launch with module and function attributes.
+ gpu.launch blocks(%bx, %by, %bz) in (%sz_bx = %0, %sz_by = %1, %sz_bz = %2)
+ threads(%tx, %ty, %tz) in (%sz_tx = %3, %sz_ty = %4, %sz_tz = %5)
+ module(@kernel_module) function(@kernel_func) {
+ "some_op"(%bx, %tx) : (index, index) -> ()
+ %42 = load %val1[%bx] : memref<?xf32, 1>
+ }
```
Rationale: using operation/block arguments gives analyses a clear way of
@@ -931,7 +941,9 @@ def GPU_LaunchOp : GPU_Op<"launch", [
CArg<"TypeRange", "{}">:$privateAttributions,
CArg<"Value", "nullptr">:$clusterSizeX,
CArg<"Value", "nullptr">:$clusterSizeY,
- CArg<"Value", "nullptr">:$clusterSizeZ)>
+ CArg<"Value", "nullptr">:$clusterSizeZ,
+ CArg<"FlatSymbolRefAttr", "nullptr">:$module,
+ CArg<"FlatSymbolRefAttr", "nullptr">:$function)>,
];
let extraClassDeclaration = [{
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index d38298f..76b08e6 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -348,15 +348,11 @@ def LLVM_PtrMaskOp
// Memory marker intrinsics.
//
-/// Base operation for lifetime markers. The LLVM intrinsics require the size
-/// operand to be an immediate. In MLIR it is encoded as an attribute.
-class LLVM_LifetimeBaseOp<string opName> : LLVM_ZeroResultIntrOp<opName, [1],
- [DeclareOpInterfaceMethods<PromotableOpInterface>],
- /*requiresAccessGroup=*/0, /*requiresAliasAnalysis=*/0,
- /*requiresArgAndResultAttrs=*/0, /*requiresOpBundles=*/0,
- /*immArgPositions=*/[0], /*immArgAttrNames=*/["size"]> {
- let arguments = (ins I64Attr:$size, LLVM_AnyPointer:$ptr);
- let assemblyFormat = "$size `,` $ptr attr-dict `:` qualified(type($ptr))";
+/// Base operation for lifetime markers.
+class LLVM_LifetimeBaseOp<string opName> : LLVM_ZeroResultIntrOp<opName, [0],
+ [DeclareOpInterfaceMethods<PromotableOpInterface>]> {
+ let arguments = (ins LLVM_AnyPointer:$ptr);
+ let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr))";
}
def LLVM_LifetimeStartOp : LLVM_LifetimeBaseOp<"lifetime.start">;
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 30df3b7..8d50726 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -402,6 +402,44 @@ def NVVM_ReduxOp :
}
//===----------------------------------------------------------------------===//
+// NVVM Performance Monitor events
+//===----------------------------------------------------------------------===//
+
+def NVVM_PMEventOp : NVVM_PTXBuilder_Op<"pmevent">,
+ Arguments<(ins OptionalAttr<I16Attr>:$maskedEventId,
+ OptionalAttr<I32Attr>:$eventId)> {
+ let summary = "Trigger one or more Performance Monitor events.";
+
+ let description = [{
+ Triggers one or more of a fixed number of performance monitor events, with
+ event index or mask specified by immediate operand.
+
+ Without `mask` it triggers a single performance monitor event indexed by
+ immediate operand a, in the range 0..15.
+
+ With `mask` it triggers one or more of the performance monitor events. Each
+ bit in the 16-bit immediate operand controls an event.
+
+ [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#miscellaneous-instructions-pmevent)
+ }];
+
+ string llvmBuilder = [{
+ llvm::Value *mId = builder.getInt16(* $maskedEventId);
+ createIntrinsicCall(builder, llvm::Intrinsic::nvvm_pm_event_mask, {mId});
+ }];
+
+ let assemblyFormat = "attr-dict (`id` `=` $eventId^)? (`mask` `=` $maskedEventId^)?";
+
+ let extraClassDeclaration = [{
+ bool hasIntrinsic() { return !getEventId(); }
+ }];
+ let extraClassDefinition = [{
+ std::string $cppClass::getPtx() { return std::string("pmevent %0;"); }
+ }];
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// NVVM Split arrive/wait barrier
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index a2354e2..90da243 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -189,6 +189,20 @@ def ROCDL_BallotOp :
let assemblyFormat = "$pred attr-dict `:` type($res)";
}
+def ROCDL_ReadfirstlaneOp : ROCDL_IntrOp<"readfirstlane", [], [0], [AllTypesMatch<["res", "src"]>], 1>,
+ Arguments<(ins LLVM_Type:$src)> {
+ let results = (outs LLVM_Type:$res);
+ let summary = "Get the value in first active lane.";
+
+ let description = [{
+ Returns the value in the lowest active lane of the input operand.
+ }];
+
+ let assemblyFormat = [{
+ $src attr-dict `:` type($res)
+ }];
+}
+
def ROCDL_ReadlaneOp : ROCDL_IntrOp<"readlane", [], [0], [AllTypesMatch<["res", "src0"]>], 1>,
Arguments<(ins LLVM_Type:$src0,
I32:$src1)> {
@@ -201,7 +215,7 @@ def ROCDL_ReadlaneOp : ROCDL_IntrOp<"readlane", [], [0], [AllTypesMatch<["res",
let assemblyFormat = [{
$src0 `,` $src1 attr-dict `:` `(` type($src0) `,` type($src1) `)` `->` type($res)
- }];
+ }];
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/Linalg.h b/mlir/include/mlir/Dialect/Linalg/IR/Linalg.h
index 62c04bb..eb4e381 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/Linalg.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/Linalg.h
@@ -145,8 +145,7 @@ std::pair<int64_t, int64_t> getFmrFromWinogradConv2DFmr(WinogradConv2DFmr fmr);
#define GET_OP_CLASSES
#include "mlir/Dialect/Linalg/IR/LinalgRelayoutOps.h.inc"
-namespace mlir {
-namespace linalg {
+namespace mlir::linalg {
/// Returns the outer shape in the packed domain before applying the
/// transposition.
@@ -155,7 +154,194 @@ template <typename OpTy,
std::is_same_v<OpTy, linalg::UnPackOp>>>
SmallVector<int64_t> getPackedOuterShapeWithoutTransposition(OpTy packOrUnPack);
-} // namespace linalg
-} // namespace mlir
+/// Specialization of `linalg.matmul` op that has a transpose map on A
+class MatmulTransposeAOp : public MatmulOp {
+ /// Create an affine map for a transpose-A matmul. Used only in the builders.
+ static SmallVector<AffineMap> getDefaultIndexingMaps(OpBuilder &builder);
+
+public:
+ using MatmulOp::MatmulOp;
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<MatmulOp>(); }
+
+ /// Build a transpose A matmul.
+ static void build(OpBuilder &builder, OperationState &result,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static MatmulTransposeAOp create(OpBuilder &builder, Location location,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose A matmul with a specific result type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static MatmulTransposeAOp create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose A matmul with a specific result type and a cast type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static MatmulTransposeAOp create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Checks if the affine map is the expected one for this operation
+ static bool isDefaultIndexingMaps(Attribute attr);
+
+ static bool classof(Operation *op);
+};
+
+/// Specialization of `linalg.matmul` op that has a transpose map on B
+class MatmulTransposeBOp : public MatmulOp {
+ /// Create an affine map for a transpose-B matmul. Used only in the builders.
+ static SmallVector<AffineMap> getDefaultIndexingMaps(OpBuilder &builder);
+
+public:
+ using MatmulOp::MatmulOp;
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<MatmulOp>(); }
+
+ /// Build a transpose B matmul.
+ static void build(OpBuilder &builder, OperationState &result,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static MatmulTransposeBOp create(OpBuilder &builder, Location location,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose B matmul with a specific result type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static MatmulTransposeBOp create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose B matmul with a specific result type and a cast type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static MatmulTransposeBOp create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Checks if the affine map is the expected one for this operation
+ static bool isDefaultIndexingMaps(Attribute attr);
+
+ static bool classof(Operation *op);
+};
+
+/// Specialization of `linalg.batch_matmul` op that has a transpose map on A
+class BatchMatmulTransposeAOp : public BatchMatmulOp {
+ /// Create an affine map for a transpose-A batch_matmul. Used only in the
+ /// builders.
+ static SmallVector<AffineMap> getDefaultIndexingMaps(OpBuilder &builder);
+
+public:
+ using BatchMatmulOp::BatchMatmulOp;
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<BatchMatmulOp>(); }
+
+ /// Build a transpose A matmul.
+ static void build(OpBuilder &builder, OperationState &result,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static BatchMatmulTransposeAOp
+ create(OpBuilder &builder, Location location, ValueRange inputs,
+ ValueRange outputs, ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose A matmul with a specific result type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static BatchMatmulTransposeAOp
+ create(OpBuilder &builder, Location location, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose A matmul with a specific result type and a cast type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static BatchMatmulTransposeAOp
+ create(OpBuilder &builder, Location location, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Checks if the affine map is the expected one for this operation
+ static bool isDefaultIndexingMaps(Attribute attr);
+
+ static bool classof(Operation *op);
+};
+
+/// Specialization of `linalg.batch_matmul` op that has a transpose map on B
+class BatchMatmulTransposeBOp : public BatchMatmulOp {
+ /// Create an affine map for a transpose-B batch_matmul. Used only in the
+ /// builders.
+ static SmallVector<AffineMap> getDefaultIndexingMaps(OpBuilder &builder);
+
+public:
+ using BatchMatmulOp::BatchMatmulOp;
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<BatchMatmulOp>(); }
+
+ /// Build a transpose B matmul.
+ static void build(OpBuilder &builder, OperationState &result,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static BatchMatmulTransposeBOp
+ create(OpBuilder &builder, Location location, ValueRange inputs,
+ ValueRange outputs, ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose B matmul with a specific result type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static BatchMatmulTransposeBOp
+ create(OpBuilder &builder, Location location, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Build a transpose B matmul with a specific result type and a cast type.
+ static void build(OpBuilder &builder, OperationState &result,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ static BatchMatmulTransposeBOp
+ create(OpBuilder &builder, Location location, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes = {});
+
+ /// Checks if the affine map is the expected one for this operation
+ static bool isDefaultIndexingMaps(Attribute attr);
+
+ static bool classof(Operation *op);
+};
+
+} // namespace mlir::linalg
#endif // MLIR_DIALECT_LINALG_IR_LINALG_H
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index ba73cfb..9f1e88a0 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -474,7 +474,7 @@ def LinalgStructuredInterface
int64_t resultIndex =
opOperand->getOperandNumber() - $_op.getNumDpsInputs();
assert(resultIndex >= 0 &&
- resultIndex < this->getOperation()->getNumResults());
+ resultIndex < $_op.getNumDpsInits());
Operation *yieldOp = getBlock()->getTerminator();
return &yieldOp->getOpOperand(resultIndex);
}]
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
index 3637147..9aae1b8 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml
@@ -1056,152 +1056,6 @@ structured_op: !LinalgStructuredOpConfig
scalar_arg: BZp
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
- name: matmul_transpose_a
- cpp_class_name: MatmulTransposeAOp
- doc: |-
- Performs a matrix multiplication of two 2D inputs with lhs operand
- transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- implements:
- - LinalgContractionOpInterface
-structured_op: !LinalgStructuredOpConfig
- args:
- - !LinalgOperandDefConfig
- name: A
- kind: input_tensor
- type_var: T1
- shape_map: affine_map<()[s0, s1, s2] -> (s0, s1)>
- - !LinalgOperandDefConfig
- name: B
- kind: input_tensor
- type_var: T2
- shape_map: affine_map<()[s0, s1, s2] -> (s0, s2)>
- - !LinalgOperandDefConfig
- name: C
- kind: output_tensor
- type_var: U
- shape_map: affine_map<()[s0, s1, s2] -> (s2, s1)>
- - !LinalgOperandDefConfig
- name: cast
- kind: type_fn_attr
- default_fn: cast_signed
- indexing_maps: !LinalgIndexingMapsConfig
- static_indexing_maps:
- - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2, d0)>
- - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2, d1)>
- - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0, d1)>
- iterator_types:
- - parallel
- - parallel
- - reduction
- assignments:
- - !ScalarAssign
- arg: C
- value: !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: add
- operands:
- - !ScalarExpression
- scalar_arg: C
- - !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: mul
- operands:
- - !ScalarExpression
- scalar_fn:
- kind: type
- attr_name: cast
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: A
- - !ScalarExpression
- scalar_fn:
- kind: type
- attr_name: cast
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: B
---- !LinalgOpConfig
-metadata: !LinalgOpMetadata
- name: matmul_transpose_b
- cpp_class_name: MatmulTransposeBOp
- doc: |-
- Performs a matrix multiplication of two 2D inputs with rhs operand
- transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- implements:
- - LinalgContractionOpInterface
-structured_op: !LinalgStructuredOpConfig
- args:
- - !LinalgOperandDefConfig
- name: A
- kind: input_tensor
- type_var: T1
- shape_map: affine_map<()[s0, s1, s2] -> (s0, s1)>
- - !LinalgOperandDefConfig
- name: B
- kind: input_tensor
- type_var: T2
- shape_map: affine_map<()[s0, s1, s2] -> (s2, s1)>
- - !LinalgOperandDefConfig
- name: C
- kind: output_tensor
- type_var: U
- shape_map: affine_map<()[s0, s1, s2] -> (s0, s2)>
- - !LinalgOperandDefConfig
- name: cast
- kind: type_fn_attr
- default_fn: cast_signed
- indexing_maps: !LinalgIndexingMapsConfig
- static_indexing_maps:
- - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0, d2)>
- - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d1, d2)>
- - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0, d1)>
- iterator_types:
- - parallel
- - parallel
- - reduction
- assignments:
- - !ScalarAssign
- arg: C
- value: !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: add
- operands:
- - !ScalarExpression
- scalar_arg: C
- - !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: mul
- operands:
- - !ScalarExpression
- scalar_fn:
- kind: type
- attr_name: cast
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: A
- - !ScalarExpression
- scalar_fn:
- kind: type
- attr_name: cast
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: B
---- !LinalgOpConfig
-metadata: !LinalgOpMetadata
name: mmt4d
cpp_class_name: Mmt4DOp
doc: |-
@@ -1359,146 +1213,6 @@ structured_op: !LinalgStructuredOpConfig
scalar_arg: rhs
--- !LinalgOpConfig
metadata: !LinalgOpMetadata
- name: batch_matmul_transpose_a
- cpp_class_name: BatchMatmulTransposeAOp
- doc: |-
- Performs a batched matrix multiplication of two 3D inputs where lhs operand
- has its non-batch dimensions transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- implements:
- - LinalgContractionOpInterface
-structured_op: !LinalgStructuredOpConfig
- args:
- - !LinalgOperandDefConfig
- name: A
- kind: input_tensor
- type_var: T1
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s1, s2)>
- - !LinalgOperandDefConfig
- name: B
- kind: input_tensor
- type_var: T2
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s1, s3)>
- - !LinalgOperandDefConfig
- name: C
- kind: output_tensor
- type_var: U
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s2, s3)>
- indexing_maps: !LinalgIndexingMapsConfig
- static_indexing_maps:
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d3, d1)>
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d3, d2)>
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d1, d2)>
- iterator_types:
- - parallel
- - parallel
- - parallel
- - reduction
- assignments:
- - !ScalarAssign
- arg: C
- value: !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: add
- operands:
- - !ScalarExpression
- scalar_arg: C
- - !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: mul
- operands:
- - !ScalarExpression
- scalar_fn:
- kind: type
- fn_name: cast_signed
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: A
- - !ScalarExpression
- scalar_fn:
- kind: type
- fn_name: cast_signed
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: B
---- !LinalgOpConfig
-metadata: !LinalgOpMetadata
- name: batch_matmul_transpose_b
- cpp_class_name: BatchMatmulTransposeBOp
- doc: |-
- Performs a batched matrix multiplication of two 3D inputs where rhs operand
- has its non-batch dimensions transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- implements:
- - LinalgContractionOpInterface
-structured_op: !LinalgStructuredOpConfig
- args:
- - !LinalgOperandDefConfig
- name: A
- kind: input_tensor
- type_var: T1
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s1, s2)>
- - !LinalgOperandDefConfig
- name: B
- kind: input_tensor
- type_var: T2
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s3, s2)>
- - !LinalgOperandDefConfig
- name: C
- kind: output_tensor
- type_var: U
- shape_map: affine_map<()[s0, s1, s2, s3] -> (s0, s1, s3)>
- indexing_maps: !LinalgIndexingMapsConfig
- static_indexing_maps:
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d1, d3)>
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d2, d3)>
- - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0, d1, d2)>
- iterator_types:
- - parallel
- - parallel
- - parallel
- - reduction
- assignments:
- - !ScalarAssign
- arg: C
- value: !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: add
- operands:
- - !ScalarExpression
- scalar_arg: C
- - !ScalarExpression
- scalar_fn:
- kind: binary
- fn_name: mul
- operands:
- - !ScalarExpression
- scalar_fn:
- kind: type
- fn_name: cast_signed
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: A
- - !ScalarExpression
- scalar_fn:
- kind: type
- fn_name: cast_signed
- type_var: U
- operands:
- - !ScalarExpression
- scalar_arg: B
---- !LinalgOpConfig
-metadata: !LinalgOpMetadata
name: quantized_batch_matmul
cpp_class_name: QuantizedBatchMatmulOp
doc: |-
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index ca0cc03..e32d3d0 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -785,6 +785,9 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
/// Returns a list of AffineMap with the default matmul indexing charactristic.
static SmallVector<AffineMap> getDefaultIndexingMaps(MLIRContext *context);
+ /// Returns true if the AffineMap is the default matmul indexing charactristic.
+ static bool isDefaultIndexingMaps(Attribute attr);
+
/// Returns true if the given broadcast map \p bcastMap is valid for this op.
bool isValidLhsRhsBroadcastMap(AffineMap bcastMap);
@@ -1057,6 +1060,9 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
/// Returns a list with default AffineMap(s), i.e. without broadcasts and transpositions.
static SmallVector<AffineMap> getDefaultIndexingMaps(MLIRContext *context);
+ /// Returns true if the AffineMap is the default batch matmul indexing charactristic.
+ static bool isDefaultIndexingMaps(Attribute attr);
+
/// Returns true if the given broadcast map \p bcastMap is valid for this op.
bool isValidLhsRhsBroadcastMap(AffineMap bcastMap, bool isLHS = true);
@@ -1181,6 +1187,9 @@ def BatchReduceMatmulOp : LinalgStructuredBase_Op<"batch_reduce_matmul", [
/// Returns a list of AffineMap with the default batch_reduce_matmul indexing charactristic.
static SmallVector<AffineMap> getDefaultIndexingMaps(MLIRContext *context);
+ /// Returns true if the AffineMap is the default batch reduce matmul indexing charactristic.
+ static bool isDefaultIndexingMaps(Attribute attr);
+
/// Returns true if the given broadcast map \p bcastMap is valid for this op.
bool isValidLhsRhsBroadcastMap(AffineMap bcastMap, bool isLHS = true);
diff --git a/mlir/include/mlir/Dialect/Linalg/Passes.td b/mlir/include/mlir/Dialect/Linalg/Passes.td
index 373842c..f236629 100644
--- a/mlir/include/mlir/Dialect/Linalg/Passes.td
+++ b/mlir/include/mlir/Dialect/Linalg/Passes.td
@@ -89,6 +89,45 @@ def LinalgInlineScalarOperandsPass : Pass<"linalg-inline-scalar-operands"> {
];
}
+def LinalgMorphOpsPass : Pass<"linalg-morph-ops"> {
+ let summary = "Convert named op to category ops or generic and vice-versa";
+
+ let description = [{
+ Convert a linalg op from one representation to another equivalent.
+ For example, a linalg named op `linalg.add` can also be written as an
+ category op `linalg.elementwise`, and can also be re-written as
+ a `linalg.generic`, giving the morphism:
+
+ named-op <--> category_op (elementwise, contraction, ..) <--> generic
+
+ Note that the set of `linalg.generic` subsumes named and category ops
+ and therefore not all `linalg.genric` can be converted to named or
+ category op. Similarly, catgory ops subsume named ops.
+
+ Note:
+ Legacy converters:
+ `--linalg-generalize-named-ops` is the path `named-op --> generic-op`
+ `--linalg-specialize-generic-ops` is the path `named-op <-- generic-op`
+ }];
+ let dependentDialects = ["linalg::LinalgDialect"];
+
+ let options = [
+ // named-op <--> category <--> generic
+
+ // Lowering options
+ Option<"namedToCategory", "named-to-category", "bool", /*default=*/"false",
+ "convert named ops to category op e.g. `linalg.elementwise`">,
+ Option<"categoryToGeneric", "category-to-generic", "bool", /*default=*/"false",
+ "convert category ops e.g. `linalg.elementwise` to `linalg.generic`">,
+ Option<"namedToGeneric", "named-to-generic", "bool", /*default=*/"false",
+ "convert named ops e.g. `linalg.add` to `linalg.generic`">,
+
+ // Lifting options
+ // TODOs: `generic-to-category`, `category-to-named`
+ Option<"genericToNamed", "generic-to-named", "bool", /*default=*/"false",
+ "convert linalg.generic to equivalent named ops"> ];
+}
+
def LinalgGeneralizeNamedOpsPass : Pass<"linalg-generalize-named-ops"> {
let summary = "Convert named ops into generic ops";
let dependentDialects = ["linalg::LinalgDialect"];
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 61ce23f..a19cce4 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -2348,6 +2348,9 @@ def VectorizeChildrenAndApplyPatternsOp :
operation that is contained inside the vectorization target.
This transformation supports the following attributes:
+ - `fold_type_extensions_into_contract`: a `UnitAttr` to enable the folding of
+ type extension operations into `vector.contract` to create a mixed precision
+ operation.
- `vectorize_padding`: a `UnitAttr` to activate the vectorization of
`tensor.pad` ops. Different pipelines may prefer to lower such ops to
loops.
@@ -2368,6 +2371,7 @@ def VectorizeChildrenAndApplyPatternsOp :
}];
let arguments = (ins TransformHandleTypeInterface:$target,
+ UnitAttr:$fold_type_extensions_into_contract,
UnitAttr:$vectorize_padding,
UnitAttr:$vectorize_nd_extract,
UnitAttr:$flatten_1d_depthwise_conv,
@@ -2381,6 +2385,7 @@ def VectorizeChildrenAndApplyPatternsOp :
let builders = [
OpBuilder<(ins "Value":$target,
+ CArg<"bool", "false">:$foldTypeExtensionsIntoContract,
CArg<"bool", "false">:$vectorizePadding,
CArg<"bool", "false">:$vectorizeNDExtract,
CArg<"bool", "false">:$flatten1DDepthwise)>
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index d4ffe0a..1e5b5d4 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1831,6 +1831,10 @@ void populateLinalgNamedOpsGeneralizationPatterns(RewritePatternSet &patterns);
void populateLinalgGenericOpsSpecializationPatterns(
RewritePatternSet &patterns);
+/// Populates `patterns` that convert linalg named ops e.g. `linalg.add`
+/// to equivalent `linalg.elementwise`.
+void populateLinalgNamedToElementwisePatterns(RewritePatternSet &patterns);
+
/// Populates `patterns` with patterns that fold operations like
/// `linalg.transform` into elementwise op map.
void populateLinalgFoldIntoElementwisePatterns(RewritePatternSet &patterns);
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
index 364c172..63410b8 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
@@ -32,6 +32,7 @@
#include "mlir/Interfaces/ViewLikeInterface.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Alignment.h"
// Pull in all enum type definitions and utility function declarations.
#include "mlir/Dialect/Vector/IR/VectorEnums.h.inc"
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index dc55704..b3b8afd 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1729,18 +1729,18 @@ def Vector_LoadOp : Vector_Op<"load", [
"Value":$base,
"ValueRange":$indices,
CArg<"bool", "false">:$nontemporal,
- CArg<"uint64_t", "0">:$alignment), [{
+ CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
return build($_builder, $_state, resultType, base, indices, nontemporal,
- alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+ alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>,
OpBuilder<(ins "TypeRange":$resultTypes,
"Value":$base,
"ValueRange":$indices,
CArg<"bool", "false">:$nontemporal,
- CArg<"uint64_t", "0">:$alignment), [{
+ CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
return build($_builder, $_state, resultTypes, base, indices, nontemporal,
- alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+ alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>
];
@@ -1847,9 +1847,9 @@ def Vector_StoreOp : Vector_Op<"store", [
"Value":$base,
"ValueRange":$indices,
CArg<"bool", "false">:$nontemporal,
- CArg<"uint64_t", "0">:$alignment), [{
+ CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
return build($_builder, $_state, valueToStore, base, indices, nontemporal,
- alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+ alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>
];
@@ -1876,7 +1876,9 @@ def Vector_MaskedLoadOp :
Arguments<(ins Arg<AnyMemRef, "", [MemRead]>:$base,
Variadic<Index>:$indices,
VectorOfNonZeroRankOf<[I1]>:$mask,
- AnyVectorOfNonZeroRank:$pass_thru)>,
+ AnyVectorOfNonZeroRank:$pass_thru,
+ ConfinedAttr<OptionalAttr<I64Attr>,
+ [AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)>,
Results<(outs AnyVectorOfNonZeroRank:$result)> {
let summary = "loads elements from memory into a vector as defined by a mask vector";
@@ -1912,6 +1914,12 @@ def Vector_MaskedLoadOp :
%1 = vector.maskedload %base[%i, %j], %mask, %pass_thru
: memref<?x?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
```
+
+ An optional `alignment` attribute allows to specify the byte alignment of the
+ load operation. It must be a positive power of 2. The operation must access
+ memory at an address aligned to this boundary. Violations may lead to
+ architecture-specific faults or performance penalties.
+ A value of 0 indicates no specific alignment requirement.
}];
let extraClassDeclaration = [{
MemRefType getMemRefType() {
@@ -1932,6 +1940,29 @@ def Vector_MaskedLoadOp :
let hasCanonicalizer = 1;
let hasFolder = 1;
let hasVerifier = 1;
+
+ let builders = [
+ OpBuilder<(ins "VectorType":$resultType,
+ "Value":$base,
+ "ValueRange":$indices,
+ "Value":$mask,
+ "Value":$passthrough,
+ CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
+ return build($_builder, $_state, resultType, base, indices, mask, passthrough,
+ alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
+ nullptr);
+ }]>,
+ OpBuilder<(ins "TypeRange":$resultTypes,
+ "Value":$base,
+ "ValueRange":$indices,
+ "Value":$mask,
+ "Value":$passthrough,
+ CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
+ return build($_builder, $_state, resultTypes, base, indices, mask, passthrough,
+ alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
+ nullptr);
+ }]>
+ ];
}
def Vector_MaskedStoreOp :
@@ -1939,7 +1970,9 @@ def Vector_MaskedStoreOp :
Arguments<(ins Arg<AnyMemRef, "", [MemWrite]>:$base,
Variadic<Index>:$indices,
VectorOfNonZeroRankOf<[I1]>:$mask,
- AnyVectorOfNonZeroRank:$valueToStore)> {
+ AnyVectorOfNonZeroRank:$valueToStore,
+ ConfinedAttr<OptionalAttr<I64Attr>,
+ [AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)> {
let summary = "stores elements from a vector into memory as defined by a mask vector";
@@ -1974,6 +2007,12 @@ def Vector_MaskedStoreOp :
vector.maskedstore %base[%i, %j], %mask, %value
: memref<?x?xf32>, vector<16xi1>, vector<16xf32>
```
+
+ An optional `alignment` attribute allows to specify the byte alignment of the
+ store operation. It must be a positive power of 2. The operation must access
+ memory at an address aligned to this boundary. Violations may lead to
+ architecture-specific faults or performance penalties.
+ A value of 0 indicates no specific alignment requirement.
}];
let extraClassDeclaration = [{
MemRefType getMemRefType() {
@@ -1992,6 +2031,18 @@ def Vector_MaskedStoreOp :
let hasCanonicalizer = 1;
let hasFolder = 1;
let hasVerifier = 1;
+
+ let builders = [
+ OpBuilder<(ins "Value":$base,
+ "ValueRange":$indices,
+ "Value":$mask,
+ "Value":$valueToStore,
+ CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
+ return build($_builder, $_state, base, indices, mask, valueToStore,
+ alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
+ nullptr);
+ }]>
+ ];
}
def Vector_GatherOp :
diff --git a/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td b/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td
index 676621b..b80ee2c 100644
--- a/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td
+++ b/mlir/include/mlir/Dialect/WasmSSA/IR/WasmSSAOps.td
@@ -329,11 +329,11 @@ def WasmSSA_IfOp : WasmSSA_Op<"if", [Terminator,
```mlir
// Runs the if clause is %a is non-zero
- "wasmssa.if"(%a)[^bb1] ({
+ wasmssa.if %a {
// Execute if %a is non-zero
- },{
+ } else {
// else clause
- }) : (i32) -> ()
+ }
```
}];
let arguments = (ins I32:$condition, Variadic<WasmSSA_ValType>: $inputs);
@@ -359,6 +359,7 @@ def WasmSSA_IfOp : WasmSSA_Op<"if", [Terminator,
return createBlock(getElse());
}
}];
+ let assemblyFormat = "$condition (`(`$inputs^`)` `:` type($inputs))? attr-dict `:` $if custom<ElseRegion>($else) `>` $target";
}
def WasmSSA_LocalOp : WasmSSA_Op<"local", [
@@ -445,7 +446,7 @@ def WasmSSA_MemOp : WasmSSA_Op<"memory", [Symbol]> {
```mlir
// Define the `mem_0` memory with defined bounds of 0 -> 65536
- "wasmssa.memory"() <{limits = !wasmssa<limit[0:65536]>, sym_name = "mem_0"}> : () -> ()
+ wasmssa.memory @mem_0 !wasmssa<limit[0:65536]>
```
}];
let arguments = (ins SymbolNameAttr: $sym_name,
@@ -456,6 +457,8 @@ def WasmSSA_MemOp : WasmSSA_Op<"memory", [Symbol]> {
"::llvm::StringRef":$symbol,
"wasmssa::LimitType":$limit)>
];
+
+ let assemblyFormat = "$sym_name custom<WasmVisibility>($sym_visibility) $limits attr-dict";
}
def WasmSSA_MemImportOp : WasmSSA_Op<"import_mem", [Symbol, ImportOpInterface]> {
@@ -494,6 +497,7 @@ def WasmSSA_TableOp : WasmSSA_Op<"table", [Symbol]> {
let builders = [OpBuilder<(ins
"::llvm::StringRef":$symbol,
"wasmssa::TableType":$type)>];
+ let assemblyFormat = "$sym_name custom<WasmVisibility>($sym_visibility) $type attr-dict";
}
def WasmSSA_TableImportOp : WasmSSA_Op<"import_table", [Symbol, ImportOpInterface]> {
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/XeGPU/IR/CMakeLists.txt
index 3f8cac4..728f1aa 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/CMakeLists.txt
@@ -12,3 +12,9 @@ mlir_tablegen(XeGPUEnums.h.inc -gen-enum-decls)
mlir_tablegen(XeGPUEnums.cpp.inc -gen-enum-defs)
add_public_tablegen_target(MLIRXeGPUEnumsIncGen)
add_dependencies(mlir-headers MLIRXeGPUEnumsIncGen)
+
+set(LLVM_TARGET_DEFINITIONS XeGPUAttrs.td)
+mlir_tablegen(XeGPUAttrInterface.h.inc -gen-attr-interface-decls)
+mlir_tablegen(XeGPUAttrInterface.cpp.inc -gen-attr-interface-defs)
+add_public_tablegen_target(MLIRXeGPUAttrInterfaceIncGen)
+add_dependencies(mlir-headers MLIRXeGPUAttrInterfaceIncGen)
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h
index 8e2784f..3592da4 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h
@@ -15,6 +15,7 @@
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/TypeUtilities.h"
+#include "mlir/IR/Value.h"
#include "mlir/Interfaces/ShapedOpInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Interfaces/ViewLikeInterface.h"
@@ -22,17 +23,19 @@
namespace mlir {
namespace xegpu {
class TensorDescType;
+class LayoutAttr;
+class SliceAttr;
} // namespace xegpu
} // namespace mlir
+#include <mlir/Dialect/XeGPU/IR/XeGPUAttrInterface.h.inc>
+#include <mlir/Dialect/XeGPU/IR/XeGPUDialect.h.inc>
#include <mlir/Dialect/XeGPU/IR/XeGPUEnums.h.inc>
+
#define GET_ATTRDEF_CLASSES
#include <mlir/Dialect/XeGPU/IR/XeGPUAttrs.h.inc>
#define GET_TYPEDEF_CLASSES
#include <mlir/Dialect/XeGPU/IR/XeGPUTypes.h.inc>
-
-#include <mlir/Dialect/XeGPU/IR/XeGPUDialect.h.inc>
-
#define GET_OP_CLASSES
#include <mlir/Dialect/XeGPU/IR/XeGPU.h.inc>
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
index 64eb21c..1f420c1 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
@@ -175,7 +175,38 @@ def XeGPU_FenceScopeAttr:
let assemblyFormat = "$value";
}
-def XeGPU_LayoutAttr : XeGPUAttr<"Layout", "layout"> {
+def LayoutTrait: AttrInterface<"LayoutTrait"> {
+ let cppNamespace = "::mlir::xegpu";
+ let description = [{
+ Common trait for all XeGPU layouts.
+ }];
+
+ let methods = [
+ InterfaceMethod<"Get the rank of attribute",
+ "int64_t",
+ "getRank">,
+ InterfaceMethod<"Get the SgLayout field of the attribute as integer array",
+ "std::optional<SmallVector<int64_t>>",
+ "getSgLayoutAsInt">,
+ InterfaceMethod<"Get the SgData field of the attribute as integer array",
+ "std::optional<SmallVector<int64_t>>",
+ "getSgDataAsInt">,
+ InterfaceMethod<[{Delinearizes a linear subgroup ID into its multidimensional
+ indices based on the effective subgroup layout.}],
+ "FailureOr<SmallVector<Value>>",
+ "delinearizeSubgroupId",
+ (ins "OpBuilder &": $builder, "Location":$loc, "Value":$linearId)>,
+ InterfaceMethod<[{Generates instructions to compute multidimensional offsets for blocks
+ assigned to a subgroup identified by linearId. The shape parameter
+ represents the workgroup-level problem size. Each subgroup may access
+ multiple blocks according to round-robin distribution rules.}],
+ "FailureOr<SmallVector<SmallVector<Value>>>",
+ "getOffsets",
+ (ins "OpBuilder &": $builder, "Location":$loc, "Value":$linearId, "ArrayRef<int64_t>":$shape)>
+ ];
+}
+
+def XeGPU_LayoutAttr : XeGPUAttr<"Layout", "layout", [LayoutTrait]> {
let summary = [{
Describes the data distribution to subgroups and work-items for a tensor
specified by the tensor descriptor.
@@ -330,12 +361,143 @@ def XeGPU_LayoutAttr : XeGPUAttr<"Layout", "layout"> {
return LayoutAttr::get(getContext(), getSgLayout(), getSgData(), nullptr,
getLaneLayout(), getLaneData(), getOrder());
}
+
+ std::optional<SmallVector<int64_t>> getSgLayoutAsInt() const {
+ if (DenseI32ArrayAttr layout = getSgLayout())
+ return llvm::to_vector_of<int64_t>(layout.asArrayRef());
+ return std::nullopt;
+ }
+
+ std::optional<SmallVector<int64_t>> getSgDataAsInt() const {
+ if (DenseI32ArrayAttr data = getSgData())
+ return llvm::to_vector_of<int64_t>(data.asArrayRef());
+ return std::nullopt;
+ }
+
+ /// Delinearizes a linear subgroup ID into its multidimensional indices
+ /// based on the effective subgroup layout.
+ FailureOr<SmallVector<Value>>
+ delinearizeSubgroupId(OpBuilder &builder, Location loc, Value linearId);
+
+ /// Generates instructions to compute multidimensional offsets for blocks
+ /// assigned to a subgroup identified by linearId. The shape parameter
+ /// represents the workgroup-level problem size. Each subgroup may access
+ /// multiple blocks according to round-robin distribution rules.
+ FailureOr<SmallVector<SmallVector<Value>>>
+ getOffsets(OpBuilder &builder, Location loc, Value linearId, ArrayRef<int64_t> shape);
+
}];
let assemblyFormat = "`<` struct(params) `>`";
let genVerifyDecl = 1;
}
+
+def XeGPU_SliceAttr : XeGPUAttr<"Slice", "slice", [LayoutTrait]> {
+ let summary = [{Describes the data distribution and sharing among subgroups or work-items.}];
+
+ let description = [{
+ Like LayoutAttr, SliceAttr describes data distribution among subgroups or work-items.
+ However, whereas LayoutAttr requires the data to have the same rank as the attribute,
+ SliceAttr permits the data to have a lower rank. In this case, compute units in the
+ specified dimensions (given by `$dims`) share the data, provided that the remaining
+ ranks match the data rank. SliceAttr is commonly used by operations such as
+ vector.multi_reduction and vector.broadcast.
+
+ Example:
+ ```
+ #l = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>
+ #r = #xegpu.slice<#l, dim = [0]>
+
+ %exp = math.exp %input {layout_result_0 = #l}: vector<256x128xf32>
+ %red = vector.multi_reduction<add>, %exp, %acc [0] {layout_result_0 = #r}: vector<256x128xf32> to vector<128xf32>
+ %bcast = vector.broadcast %red {layout_result_0 = #l} : vector<128xf32> to vector<256x128xf32>
+ ```
+ In this example, %red is conceptually divided into 4 vectors of type vector<32xf32>, each assigned to
+ a group of subgroups. Each group consists of 8 subgroups from the same column of sg_layout, sharing a
+ single reduction result of type vector<32xf32>.
+
+ }];
+
+ let parameters = (ins
+ "xegpu::LayoutTrait": $parent,
+ "DenseI64ArrayAttr": $dims
+ );
+
+ let extraClassDeclaration = [{
+
+ int64_t getRank() const {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ return parent.getRank() - attr.getDims().size();
+ }
+
+ DenseI32ArrayAttr getOrder() const {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ return parent.getOrder();
+ }
+
+ bool isWgLayout() const {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ return parent.isWgLayout();
+ }
+
+ bool isSgLayout() const {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ return parent.isSgLayout();
+ }
+
+ /// Returns the SgLayout of the attribute, computed by applying
+ /// the slice dimensions to the underlying LayoutAttr.
+ std::optional<SmallVector<int64_t>> getSgLayoutAsInt() const {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ if (auto layout = parent.getSgLayoutAsInt()) {
+ ArrayRef<int64_t> dims = attr.getDims().asArrayRef();
+ return XeGPUDialect::slice(llvm::ArrayRef<int64_t>(*layout), dims);
+ }
+ return std::nullopt;
+ }
+
+ /// Returns the SgData of the attribute, computed by applying
+ /// the slice dimensions to the underlying LayoutAttr.
+ std::optional<SmallVector<int64_t>> getSgDataAsInt() const {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ if (auto data = parent.getSgDataAsInt()) {
+ ArrayRef<int64_t> dims = attr.getDims().asArrayRef();
+ return XeGPUDialect::slice(llvm::ArrayRef<int64_t>(*data), dims);
+ }
+ return std::nullopt;
+ }
+
+ /// flatten a nested SliceAttr, e.g., for 2-level nested SliceAttr
+ /// #xegpu.slice<#xegpu.slice<#xegpu.layout<sg_layout = [4, 8, 12]>, dims = [0]>, dims = [0]>
+ /// it will coalese two slice operations and return a simplified SliceAttr
+ /// #xegpu.slice<#xegpu.layout<sg_layout = [4, 8, 12]>, dims = [0, 1]>
+ SliceAttr flatten() const;
+
+ /// Delinearizes a linear subgroup ID into its multidimensional indices
+ /// based on the effective subgroup layout.
+ FailureOr<SmallVector<Value>>
+ delinearizeSubgroupId(OpBuilder &builder, Location loc, Value linearId);
+
+ /// Generates instructions to compute multidimensional offsets for blocks
+ /// assigned to a subgroup identified by linearId. The shape parameter
+ /// represents the workgroup-level problem size. Each subgroup may access
+ /// multiple blocks according to round-robin distribution rules.
+ FailureOr<SmallVector<SmallVector<Value>>>
+ getOffsets(OpBuilder &builder, Location loc, Value linearId, ArrayRef<int64_t> shape);
+
+ }];
+
+ let assemblyFormat = "`<` qualified($parent) `,` `dims` `=` $dims `>`";
+ let genVerifyDecl = 1;
+}
+
def XeGPU_RangeAttr : XeGPUAttr<"Range", "range"> {
let summary = [{Specifies a half-open range}];
let description = [{
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUDialect.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUDialect.td
index 549018b..76d58e5 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUDialect.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUDialect.td
@@ -41,6 +41,18 @@ def XeGPU_Dialect : Dialect {
/// Checks if the given shape can be evenly distributed based on the layout
/// and data factors provided by the LayoutAttr.
static bool isEvenlyDistributable(llvm::ArrayRef<int64_t> shape, xegpu::LayoutAttr attr);
+
+ /// drops/slices the shape in the specified dims, and return the rest. e.g.,
+ /// for shape = [32, 64, 8], dims = [0, 2], it will return [64]
+ template<typename T, typename U>
+ static llvm::SmallVector<T> slice(llvm::ArrayRef<T> shape, llvm::ArrayRef<U> dims) {
+ llvm::SmallVector<T> result;
+ for (auto [i, v]: llvm::enumerate(shape)) {
+ if (!llvm::is_contained(dims, i))
+ result.push_back(v);
+ }
+ return result;
+ }
}];
}
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
index 75b16a87..1a6a34c 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
@@ -29,7 +29,7 @@ class XeGPU_Op<string mnemonic, list<Trait> traits = []>:
void printProperties(::mlir::MLIRContext *ctx,
::mlir::OpAsmPrinter &p, const Properties &prop,
::mlir::ArrayRef<::llvm::StringRef> elidedProps) {
-
+
DictionaryAttr propAttr = dyn_cast_if_present<mlir::DictionaryAttr>(getPropertiesAsAttr(ctx, prop));
// filter out the elidedProps from propAttr, and get the resultAttr
@@ -43,7 +43,7 @@ class XeGPU_Op<string mnemonic, list<Trait> traits = []>:
}
if (!filteredAttrs.empty()) {
- p << "<" << DictionaryAttr::get(ctx, filteredAttrs) << ">";
+ p << "<" << DictionaryAttr::get(ctx, filteredAttrs) << ">";
}
}
@@ -60,8 +60,7 @@ class XeGPU_Op<string mnemonic, list<Trait> traits = []>:
}
-def XeGPU_CreateNdDescOp: XeGPU_Op<"create_nd_tdesc", [Pure, ViewLikeOpInterface,
- AttrSizedOperandSegments, OffsetSizeAndStrideOpInterface]> {
+def XeGPU_CreateNdDescOp: XeGPU_Op<"create_nd_tdesc", [Pure, ViewLikeOpInterface, AttrSizedOperandSegments]> {
let summary = "Create nd-tensor descriptor operation";
let description = [{
@@ -181,82 +180,38 @@ def XeGPU_CreateNdDescOp: XeGPU_Op<"create_nd_tdesc", [Pure, ViewLikeOpInterface
return getType().getShape();
}
- /// wrapper for matching with OffsetSizeAndStrideOpInterface
- OperandRange getSizes() {
- return getShape();
+ SmallVector<OpFoldResult> getMixedOffsets() {
+ auto statics = getConstOffsets().value_or(SmallVector<int64_t>());
+ auto dynamics = getOffsets();
+ if (statics.size() == 0 && dynamics.size() == 0)
+ return {};
+ return getMixedValues(statics, dynamics, getContext());
}
- ArrayRef<int64_t> getStaticOffsets(){
- auto attr = getConstOffsetsAttr();
-
- if (attr)
- return attr;
+ SmallVector<OpFoldResult> getMixedSizes() {
+ SmallVector<int64_t> statics;
- int64_t rank = getMixedSizes().size();
-
- setConstOffsets(llvm::SmallVector<int64_t, 4>(rank, 0));
+ /// Get the static sizes/shape, the value passed to const_shape
+ /// will overide the value in memref shape.
+ if (auto memrefTy = llvm::dyn_cast<MemRefType>(getSourceType()))
+ statics = llvm::to_vector(memrefTy.getShape());
+ if (auto attr = getConstShapeAttr())
+ statics = llvm::to_vector(attr.asArrayRef());
- attr = getConstOffsetsAttr();
- return attr;
+ return getMixedValues(statics, getShape(), getContext());
}
- /// wrapper for matching with OffsetSizeAndStrideOpInterface
- /// If source is IntegerType or `const_shape` is filled,
- /// it will return `const_shape`, such that mixes of `shape`
- /// and `const_shape` will be used to represent the shape of
- /// source operand. They overide static shape from source memref type.
- ArrayRef<int64_t> getStaticSizes() {
- /// To be compatible with OffsetSizeAndStrideOpInterface, which expects valid return value and perform checks
- static llvm::SmallVector<int64_t, 4> emptyShape;
-
- auto attr = getConstShapeAttr();
- if (attr)
- return attr;
-
- if (llvm::isa<IntegerType>(getSourceType()))
- return emptyShape;
-
- auto memrefType = llvm::dyn_cast<MemRefType>(getSourceType());
- assert(memrefType && "Incorrect use of getStaticSizes");
- return memrefType.getShape();
- }
+ SmallVector<OpFoldResult> getMixedStrides() {
+ SmallVector<int64_t> statics;
- /// wrapper for matching with OffsetSizeAndStrideOpInterface
- /// If source is IntegerType or `const_strides` is filled, it
- /// will return `const_strides`, such that mixes of `strides`
- /// and `const_strides` will be used to represent the strides of
- /// source operand. They overide static strides from source memref type.
- ArrayRef<int64_t> getStaticStrides() {
- /// To be compatible with OffsetSizeAndStrideOpInterface, which expects valid return value and perform checks
- static llvm::SmallVector<int64_t, 4> emptyStrides;
-
- auto attr = getConstStridesAttr();
- if (attr)
- return attr;
-
- if (llvm::isa<IntegerType>(getSourceType()))
- return emptyStrides;
-
- auto memrefType = llvm::dyn_cast<MemRefType>(getSourceType());
- assert(memrefType && "Incorrect use of getStaticStrides");
- auto [strides, _] = memrefType.getStridesAndOffset();
- // reuse the storage of ConstStridesAttr since strides from
- // memref is not persistant
- setConstStrides(strides);
- attr = getConstStridesAttr();
- return attr;
- }
+ /// Get the static strides, the value passed to const_strides
+ /// will overide the value in memref.
+ if (auto memrefTy = llvm::dyn_cast<MemRefType>(getSourceType()))
+ statics = memrefTy.getStridesAndOffset().first;
+ if (auto attr = getConstStridesAttr())
+ statics = llvm::to_vector(attr.asArrayRef());
- /// Return the expected rank of each of the`static_offsets`,
- /// `static_shape` and `static_strides` attributes.
- std::array<unsigned, 3> getArrayAttrMaxRanks() {
- unsigned rank;
- if (auto ty = llvm::dyn_cast<MemRefType>(getSourceType())) {
- rank = ty.getRank();
- } else {
- rank = (unsigned)getMixedOffsets().size();
- }
- return {rank, rank, rank};
+ return getMixedValues(statics, getStrides(), getContext());
}
/// Return the number of leading operands before the `offsets`,
@@ -314,15 +269,15 @@ def XeGPU_PrefetchNdOp : XeGPU_Op<"prefetch_nd", []> {
}];
let assemblyFormat = [{
- $TensorDesc ``
- custom<OptionalDynamicIndexList>($offsets, $const_offsets)
+ $TensorDesc ``
+ custom<OptionalDynamicIndexList>($offsets, $const_offsets)
prop-dict attr-dict `:` qualified(type($TensorDesc))
}];
let builders = [
- OpBuilder<(ins "Value": $TensorDesc,
- "xegpu::CachePolicyAttr": $l1_hint,
- "xegpu::CachePolicyAttr": $l2_hint,
+ OpBuilder<(ins "Value": $TensorDesc,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
"xegpu::CachePolicyAttr": $l3_hint)>
];
@@ -370,7 +325,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [
let arguments = (ins XeGPU_TensorDesc: $TensorDesc,
Variadic<Index>: $offsets,
- OptionalAttr<DenseI64ArrayAttr>: $const_offsets,
+ OptionalAttr<DenseI64ArrayAttr>: $const_offsets,
OptionalAttr<UnitAttr>: $packed,
OptionalAttr<DenseI64ArrayAttr>: $transpose,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
@@ -390,16 +345,16 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [
}];
let assemblyFormat = [{
- $TensorDesc ``
- custom<OptionalDynamicIndexList>($offsets, $const_offsets)
+ $TensorDesc ``
+ custom<OptionalDynamicIndexList>($offsets, $const_offsets)
prop-dict attr-dict `:` qualified(type($TensorDesc)) `->` type($value)
}];
let builders = [
- OpBuilder<(ins "Type": $value, "Value": $TensorDesc,
+ OpBuilder<(ins "Type": $value, "Value": $TensorDesc,
"UnitAttr": $packed, "DenseI64ArrayAttr": $transpose,
- "xegpu::CachePolicyAttr": $l1_hint,
- "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
"xegpu::CachePolicyAttr": $l3_hint)>
];
@@ -442,7 +397,7 @@ def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [
let arguments = (ins XeGPU_ValueType: $value,
XeGPU_TensorDesc: $TensorDesc,
Variadic<Index>: $offsets,
- OptionalAttr<DenseI64ArrayAttr>: $const_offsets,
+ OptionalAttr<DenseI64ArrayAttr>: $const_offsets,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
@@ -458,16 +413,16 @@ def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [
}];
let assemblyFormat = [{
- $value `,`
- $TensorDesc ``
- custom<OptionalDynamicIndexList>($offsets, $const_offsets)
+ $value `,`
+ $TensorDesc ``
+ custom<OptionalDynamicIndexList>($offsets, $const_offsets)
prop-dict attr-dict `:` type($value) `,` qualified(type($TensorDesc))
}];
let builders = [
- OpBuilder<(ins "Value": $value, "Value": $TensorDesc,
- "xegpu::CachePolicyAttr": $l1_hint,
- "xegpu::CachePolicyAttr": $l2_hint,
+ OpBuilder<(ins "Value": $value, "Value": $TensorDesc,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
"xegpu::CachePolicyAttr": $l3_hint)>
];
@@ -635,12 +590,12 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> {
l3_hint = #xegpu.cache_hint<cached>}
: !xegpu.tensor_desc<16xf16>
```
-
+
Example 2:
A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc.
It combines "create scattered TensorTdesc" and "prefetch with scattered TensorTdesc".
The source operand could be a raw pointer (uint64_t).
- Please refer to create_tdesc for the restriction of memref.
+ Please refer to create_tdesc for the restriction of memref.
```mlir
%a = memref.alloc() : memref<1024xf32>
%0 = arith.constant dense<[0, 16, 32, 64]> : vector<4xindex>
@@ -676,16 +631,16 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> {
}];
let assemblyFormat = [{
- $source
+ $source
(`[` $offsets^ `]`)?
prop-dict
- attr-dict `:` type(operands)
+ attr-dict `:` type(operands)
}];
-
+
let builders = [
OpBuilder<(ins "Value": $source,
- "xegpu::CachePolicyAttr": $l1_hint,
- "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
"xegpu::CachePolicyAttr": $l3_hint)>
];
@@ -723,7 +678,7 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> {
: !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr<memory_space=global, chunk_size=8>>,
vector<16xi1> -> vector<16x8xf32>
```
-
+
Example 3 (SIMT mode):
```mlir
%2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint<cached>,
@@ -732,12 +687,12 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> {
: !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr<memory_space=global, chunk_size=8>>
vector<16xi1> -> vector<8xf32>
```
-
+
Example 4:
A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc.
It combines "create scattered TensorTdesc" and "load with scattered TensorTdesc".
The source operand could be a raw pointer (uint64_t). Please refer to create_tdesc
- for the restriction of memref.
+ for the restriction of memref.
```mlir
%a = memref.alloc() : memref<1024xf32>
%offsets = vector.step : vector<16xindex>
@@ -794,14 +749,14 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> {
let assemblyFormat = [{
$source
(`[` $offsets^ `]`)? `,`
- $mask prop-dict
+ $mask prop-dict
attr-dict `:` type(operands) `->` type($value)
}];
let builders = [
OpBuilder<(ins "Type": $value, "Value": $source, "Value": $mask,
- "xegpu::CachePolicyAttr": $l1_hint,
- "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
"xegpu::CachePolicyAttr": $l3_hint)>
];
@@ -848,7 +803,7 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> {
A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc.
It combines "create scattered TensorTdesc" and "store with scattered TensorTdesc".
The dest operand could be a raw pointer (uint64_t).
- Please refer to create_tdesc for the restriction of memref.
+ Please refer to create_tdesc for the restriction of memref.
```mlir
%a = memref.alloc() : memref<1024xf32>
%val = arith.constant dense<0.0> : vector<16xf32>
@@ -901,15 +856,15 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> {
$value `,`
$dest
(`[` $offsets^ `]`)? `,`
- $mask
- prop-dict
+ $mask
+ prop-dict
attr-dict `:` type(operands)
}];
let builders = [
OpBuilder<(ins "Value": $value, "Value": $dest, "Value": $mask,
- "xegpu::CachePolicyAttr": $l1_hint,
- "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
"xegpu::CachePolicyAttr": $l3_hint)>
];
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index b5a93a0..57e73c1 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -311,14 +311,14 @@ struct OpOrInterfaceRewritePatternBase : public RewritePattern {
/// opposed to a raw Operation.
template <typename SourceOp>
struct OpRewritePattern
- : public detail::OpOrInterfaceRewritePatternBase<SourceOp> {
+ : public mlir::detail::OpOrInterfaceRewritePatternBase<SourceOp> {
/// Patterns must specify the root operation name they match against, and can
/// also specify the benefit of the pattern matching and a list of generated
/// ops.
OpRewritePattern(MLIRContext *context, PatternBenefit benefit = 1,
ArrayRef<StringRef> generatedNames = {})
- : detail::OpOrInterfaceRewritePatternBase<SourceOp>(
+ : mlir::detail::OpOrInterfaceRewritePatternBase<SourceOp>(
SourceOp::getOperationName(), benefit, context, generatedNames) {}
};
@@ -327,10 +327,10 @@ struct OpRewritePattern
/// of a raw Operation.
template <typename SourceOp>
struct OpInterfaceRewritePattern
- : public detail::OpOrInterfaceRewritePatternBase<SourceOp> {
+ : public mlir::detail::OpOrInterfaceRewritePatternBase<SourceOp> {
OpInterfaceRewritePattern(MLIRContext *context, PatternBenefit benefit = 1)
- : detail::OpOrInterfaceRewritePatternBase<SourceOp>(
+ : mlir::detail::OpOrInterfaceRewritePatternBase<SourceOp>(
Pattern::MatchInterfaceOpTypeTag(), SourceOp::getInterfaceID(),
benefit, context) {}
};
diff --git a/mlir/include/mlir/Pass/PassOptions.h b/mlir/include/mlir/Pass/PassOptions.h
index e1f16c6..0c71f78 100644
--- a/mlir/include/mlir/Pass/PassOptions.h
+++ b/mlir/include/mlir/Pass/PassOptions.h
@@ -377,7 +377,7 @@ private:
/// ListOption<int> someListFlag{*this, "flag-name", llvm::cl::desc("...")};
/// };
template <typename T>
-class PassPipelineOptions : public detail::PassOptions {
+class PassPipelineOptions : public virtual detail::PassOptions {
public:
/// Factory that parses the provided options and returns a unique_ptr to the
/// struct.
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index f643765..ea82848 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -728,6 +728,9 @@ class ConversionPatternRewriter final : public PatternRewriter {
public:
~ConversionPatternRewriter() override;
+ /// Return the configuration of the current dialect conversion.
+ const ConversionConfig &getConfig() const;
+
/// Apply a signature conversion to given block. This replaces the block with
/// a new block containing the updated signature. The operations of the given
/// block are inlined into the newly-created block, which is returned.
@@ -1158,6 +1161,16 @@ public:
// ConversionConfig
//===----------------------------------------------------------------------===//
+/// An enum to control folding behavior during dialect conversion.
+enum class DialectConversionFoldingMode {
+ /// Never attempt to fold.
+ Never,
+ /// Only attempt to fold not legal operations before applying patterns.
+ BeforePatterns,
+ /// Only attempt to fold not legal operations after applying patterns.
+ AfterPatterns,
+};
+
/// Dialect conversion configuration.
struct ConversionConfig {
/// An optional callback used to notify about match failure diagnostics during
@@ -1240,6 +1253,10 @@ struct ConversionConfig {
/// your patterns do not trigger any IR rollbacks. For details, see
/// https://discourse.llvm.org/t/rfc-a-new-one-shot-dialect-conversion-driver/79083.
bool allowPatternRollback = true;
+
+ /// The folding mode to use during conversion.
+ DialectConversionFoldingMode foldingMode =
+ DialectConversionFoldingMode::BeforePatterns;
};
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
index c7ecd83..2e00b42 100644
--- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
+++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
@@ -22,6 +22,7 @@
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Visitors.h"
#include <cassert>
+#include <limits>
#include <optional>
#define DEBUG_TYPE "memref-to-spirv-pattern"
@@ -475,7 +476,12 @@ struct MemoryRequirements {
/// Given an accessed SPIR-V pointer, calculates its alignment requirements, if
/// any.
static FailureOr<MemoryRequirements>
-calculateMemoryRequirements(Value accessedPtr, bool isNontemporal) {
+calculateMemoryRequirements(Value accessedPtr, bool isNontemporal,
+ uint64_t preferredAlignment) {
+ if (preferredAlignment >= std::numeric_limits<uint32_t>::max()) {
+ return failure();
+ }
+
MLIRContext *ctx = accessedPtr.getContext();
auto memoryAccess = spirv::MemoryAccess::None;
@@ -484,7 +490,10 @@ calculateMemoryRequirements(Value accessedPtr, bool isNontemporal) {
}
auto ptrType = cast<spirv::PointerType>(accessedPtr.getType());
- if (ptrType.getStorageClass() != spirv::StorageClass::PhysicalStorageBuffer) {
+ bool mayOmitAlignment =
+ !preferredAlignment &&
+ ptrType.getStorageClass() != spirv::StorageClass::PhysicalStorageBuffer;
+ if (mayOmitAlignment) {
if (memoryAccess == spirv::MemoryAccess::None) {
return MemoryRequirements{spirv::MemoryAccessAttr{}, IntegerAttr{}};
}
@@ -493,6 +502,7 @@ calculateMemoryRequirements(Value accessedPtr, bool isNontemporal) {
}
// PhysicalStorageBuffers require the `Aligned` attribute.
+ // Other storage types may show an `Aligned` attribute.
auto pointeeType = dyn_cast<spirv::ScalarType>(ptrType.getPointeeType());
if (!pointeeType)
return failure();
@@ -504,7 +514,8 @@ calculateMemoryRequirements(Value accessedPtr, bool isNontemporal) {
memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned;
auto memAccessAttr = spirv::MemoryAccessAttr::get(ctx, memoryAccess);
- auto alignment = IntegerAttr::get(IntegerType::get(ctx, 32), *sizeInBytes);
+ auto alignmentValue = preferredAlignment ? preferredAlignment : *sizeInBytes;
+ auto alignment = IntegerAttr::get(IntegerType::get(ctx, 32), alignmentValue);
return MemoryRequirements{memAccessAttr, alignment};
}
@@ -518,16 +529,9 @@ calculateMemoryRequirements(Value accessedPtr, LoadOrStoreOp loadOrStoreOp) {
llvm::is_one_of<LoadOrStoreOp, memref::LoadOp, memref::StoreOp>::value,
"Must be called on either memref::LoadOp or memref::StoreOp");
- Operation *memrefAccessOp = loadOrStoreOp.getOperation();
- auto memrefMemAccess = memrefAccessOp->getAttrOfType<spirv::MemoryAccessAttr>(
- spirv::attributeName<spirv::MemoryAccess>());
- auto memrefAlignment =
- memrefAccessOp->getAttrOfType<IntegerAttr>("alignment");
- if (memrefMemAccess && memrefAlignment)
- return MemoryRequirements{memrefMemAccess, memrefAlignment};
-
return calculateMemoryRequirements(accessedPtr,
- loadOrStoreOp.getNontemporal());
+ loadOrStoreOp.getNontemporal(),
+ loadOrStoreOp.getAlignment().value_or(0));
}
LogicalResult
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 17a79e3..f9e2a01 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -247,8 +247,9 @@ public:
MemRefType memRefTy = loadOrStoreOp.getMemRefType();
// Resolve alignment.
- unsigned align;
- if (failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
+ unsigned align = loadOrStoreOp.getAlignment().value_or(0);
+ if (!align &&
+ failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
memRefTy, align, useVectorAlignment)))
return rewriter.notifyMatchFailure(loadOrStoreOp,
"could not resolve alignment");
diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
index 9a0a230..d7ffdcb 100644
--- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
+++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
@@ -518,8 +518,8 @@ LogicalResult GatherToLDSOp::verify() {
MemRefType srcType = cast<MemRefType>(getSrc().getType());
MemRefType dstType = cast<MemRefType>(getDst().getType());
- if (!dstType.areTrailingDimsContiguous(dstType.getRank()))
- return emitOpError("destination types must be contiguous");
+ if (!dstType.areTrailingDimsContiguous(1))
+ return emitOpError("destination type inner most dim must be contiguous");
auto elemType = srcType.getElementType();
// Check $src and $dst element types are the same.
diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/FoldMemRefsOps.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/FoldMemRefsOps.cpp
index a3fdc7e..d547510 100644
--- a/mlir/lib/Dialect/AMDGPU/Transforms/FoldMemRefsOps.cpp
+++ b/mlir/lib/Dialect/AMDGPU/Transforms/FoldMemRefsOps.cpp
@@ -28,62 +28,79 @@ struct AmdgpuFoldMemRefOpsPass final
}
};
+static LogicalResult foldMemrefViewOp(PatternRewriter &rewriter, Location loc,
+ Value view, mlir::OperandRange indices,
+ SmallVectorImpl<Value> &resolvedIndices,
+ Value &memrefBase, StringRef role) {
+ Operation *defOp = view.getDefiningOp();
+ if (!defOp) {
+ return failure();
+ }
+ return llvm::TypeSwitch<Operation *, LogicalResult>(defOp)
+ .Case<memref::SubViewOp>([&](memref::SubViewOp subviewOp) {
+ mlir::affine::resolveIndicesIntoOpWithOffsetsAndStrides(
+ rewriter, loc, subviewOp.getMixedOffsets(),
+ subviewOp.getMixedStrides(), subviewOp.getDroppedDims(), indices,
+ resolvedIndices);
+ memrefBase = subviewOp.getSource();
+ return success();
+ })
+ .Case<memref::ExpandShapeOp>([&](memref::ExpandShapeOp expandShapeOp) {
+ if (failed(mlir::memref::resolveSourceIndicesExpandShape(
+ loc, rewriter, expandShapeOp, indices, resolvedIndices,
+ false))) {
+ return failure();
+ }
+ memrefBase = expandShapeOp.getViewSource();
+ return success();
+ })
+ .Case<memref::CollapseShapeOp>(
+ [&](memref::CollapseShapeOp collapseShapeOp) {
+ if (failed(mlir::memref::resolveSourceIndicesCollapseShape(
+ loc, rewriter, collapseShapeOp, indices,
+ resolvedIndices))) {
+ return failure();
+ }
+ memrefBase = collapseShapeOp.getViewSource();
+ return success();
+ })
+ .Default([&](Operation *op) {
+ return rewriter.notifyMatchFailure(
+ op, (role + " producer is not one of SubViewOp, ExpandShapeOp, or "
+ "CollapseShapeOp")
+ .str());
+ });
+}
+
struct FoldMemRefOpsIntoGatherToLDSOp final : OpRewritePattern<GatherToLDSOp> {
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(GatherToLDSOp op,
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
- Value memrefSource;
- SmallVector<Value> sourceIndices;
- auto foldResult =
- llvm::TypeSwitch<Operation *, LogicalResult>(
- op.getSrc().getDefiningOp())
- .Case<memref::SubViewOp>([&](memref::SubViewOp subviewOp) {
- // If the source is a SubViewOp, we can directly rewrite the
- // GatherToLDSOp.
- mlir::affine::resolveIndicesIntoOpWithOffsetsAndStrides(
- rewriter, loc, subviewOp.getMixedOffsets(),
- subviewOp.getMixedStrides(), subviewOp.getDroppedDims(),
- op.getSrcIndices(), sourceIndices);
- memrefSource = subviewOp.getSource();
- return success();
- })
- .Case<memref::ExpandShapeOp>(
- [&](memref::ExpandShapeOp expandShapeOp) {
- if (failed(mlir::memref::resolveSourceIndicesExpandShape(
- loc, rewriter, expandShapeOp, op.getSrcIndices(),
- sourceIndices, false))) {
- return failure();
- }
- memrefSource = expandShapeOp.getViewSource();
- return success();
- })
- .Case<memref::CollapseShapeOp>(
- [&](memref::CollapseShapeOp collapseShapeOp) {
- if (failed(mlir::memref::resolveSourceIndicesCollapseShape(
- loc, rewriter, collapseShapeOp, op.getSrcIndices(),
- sourceIndices))) {
- return failure();
- }
- memrefSource = collapseShapeOp.getViewSource();
- return success();
- })
- .Default([&](Operation *op) {
- // If the source is not a SubViewOp, ExpandShapeOp, or
- // CollapseShapeOp, we cannot fold the GatherToLDSOp.
- return rewriter.notifyMatchFailure(
- op,
- "source producer is not one of SubViewOp, ExpandShapeOp, or "
- "CollapseShapeOp");
- });
+ SmallVector<Value> sourceIndices, destIndices;
+ Value memrefSource, memrefDest;
+
+ auto foldSrcResult =
+ foldMemrefViewOp(rewriter, loc, op.getSrc(), op.getSrcIndices(),
+ sourceIndices, memrefSource, "source");
+
+ if (failed(foldSrcResult)) {
+ memrefSource = op.getSrc();
+ sourceIndices = op.getSrcIndices();
+ }
+
+ auto foldDstResult =
+ foldMemrefViewOp(rewriter, loc, op.getDst(), op.getDstIndices(),
+ destIndices, memrefDest, "destination");
- if (failed(foldResult)) {
- return failure();
+ if (failed(foldDstResult)) {
+ memrefDest = op.getDst();
+ destIndices = op.getDstIndices();
}
rewriter.replaceOpWithNewOp<GatherToLDSOp>(op, memrefSource, sourceIndices,
- op.getDst(), op.getDstIndices(),
+ memrefDest, destIndices,
op.getTransferType());
return success();
diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
index e6a3154..e79da92 100644
--- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
+++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
@@ -114,11 +114,8 @@ bool mlir::emitc::isIntegerIndexOrOpaqueType(Type type) {
bool mlir::emitc::isSupportedFloatType(Type type) {
if (auto floatType = llvm::dyn_cast<FloatType>(type)) {
switch (floatType.getWidth()) {
- case 16: {
- if (llvm::isa<Float16Type, BFloat16Type>(type))
- return true;
- return false;
- }
+ case 16:
+ return llvm::isa<Float16Type, BFloat16Type>(type);
case 32:
case 64:
return true;
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 5a72ef1..2503ccb 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -756,7 +756,8 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
Type asyncTokenType, ValueRange asyncDependencies,
TypeRange workgroupAttributions,
TypeRange privateAttributions, Value clusterSizeX,
- Value clusterSizeY, Value clusterSizeZ) {
+ Value clusterSizeY, Value clusterSizeZ,
+ FlatSymbolRefAttr module, FlatSymbolRefAttr function) {
OpBuilder::InsertionGuard g(builder);
// Add a WorkGroup attribution attribute. This attribute is required to
@@ -781,6 +782,12 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
if (dynamicSharedMemorySize)
result.addOperands(dynamicSharedMemorySize);
+ // Add optional module and function attributes.
+ if (module)
+ result.addAttribute(getModuleAttrName(result.name), module);
+ if (function)
+ result.addAttribute(getFunctionAttrName(result.name), function);
+
// Create a kernel body region with kNumConfigRegionAttributes + N memory
// attributions, where the first kNumConfigRegionAttributes arguments have
// `index` type and the rest have the same types as the data operands.
@@ -944,6 +951,21 @@ void LaunchOp::print(OpAsmPrinter &p) {
p << ' ' << getDynamicSharedMemorySizeKeyword() << ' '
<< getDynamicSharedMemorySize();
+ // Print optional module attribute.
+ StringRef moduleAttrName = getModuleAttrName();
+ if (auto module = getModule()) {
+ p << ' ' << moduleAttrName << '(';
+ p.printSymbolName(*module);
+ p << ')';
+ }
+ // Print optional function attribute.
+ StringRef functionAttrName = getFunctionAttrName();
+ if (auto function = getFunction()) {
+ p << ' ' << functionAttrName << '(';
+ p.printSymbolName(*function);
+ p << ')';
+ }
+
printAttributions(p, getWorkgroupKeyword(), getWorkgroupAttributions());
printAttributions(p, getPrivateKeyword(), getPrivateAttributions());
@@ -952,7 +974,8 @@ void LaunchOp::print(OpAsmPrinter &p) {
p.printRegion(getBody(), /*printEntryBlockArgs=*/false);
p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{
LaunchOp::getOperandSegmentSizeAttr(),
- getNumWorkgroupAttributionsAttrName()});
+ getNumWorkgroupAttributionsAttrName(),
+ moduleAttrName, functionAttrName});
}
// Parse the size assignment blocks for blocks and threads. These have the form
@@ -990,6 +1013,9 @@ parseSizeAssignment(OpAsmParser &parser,
/// `clusters` `(` ssa-id-list `)` `in` ssa-reassignment (Optional)
/// `blocks` `(` ssa-id-list `)` `in` ssa-reassignment
/// `threads` `(` ssa-id-list `)` `in` ssa-reassignment
+/// (`dynamic_shared_memory_size` ssa-use)?
+/// (`module(` symbol-ref-id `)`)?
+/// (`function(` symbol-ref-id `)`)?
/// memory-attribution
/// region attr-dict?
/// ssa-reassignment ::= `(` ssa-id `=` ssa-use (`,` ssa-id `=` ssa-use)* `)`
@@ -1060,6 +1086,27 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
return failure();
}
+ // Parse optional module attribute.
+ StringRef moduleAttrName = getModuleAttrName(result.name);
+ if (succeeded(parser.parseOptionalKeyword(moduleAttrName))) {
+ FlatSymbolRefAttr moduleSymbol;
+ if (parser.parseLParen() ||
+ parser.parseAttribute(moduleSymbol, Type(), moduleAttrName,
+ result.attributes) ||
+ parser.parseRParen())
+ return failure();
+ }
+ // Parse optional function attribute.
+ StringRef functionAttrName = getFunctionAttrName(result.name);
+ if (succeeded(parser.parseOptionalKeyword(functionAttrName))) {
+ FlatSymbolRefAttr funcSymbol;
+ if (parser.parseLParen() ||
+ parser.parseAttribute(funcSymbol, Type(), functionAttrName,
+ result.attributes) ||
+ parser.parseRParen())
+ return failure();
+ }
+
// Create the region arguments, it has kNumConfigRegionAttributes arguments
// that correspond to block/thread identifiers and grid/block sizes, all
// having `index` type, a variadic number of WorkGroup Attributions and
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 99f5c5b..d4978ca 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -356,8 +356,8 @@ public:
auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
SetVector<Value> operands;
std::string kernelFnName;
- if (op.getKernelFunc()) {
- kernelFnName = op.getKernelFunc()->getRootReference().str();
+ if (op.getFunction()) {
+ kernelFnName = op.getFunction()->str();
} else {
kernelFnName =
Twine(op->getParentOfType<SymbolOpInterface>().getName(),
@@ -403,9 +403,8 @@ private:
OpBuilder builder(context);
std::string kernelModuleName;
gpu::GPUModuleOp kernelModule;
- if (gpuLaunchOp.getKernelModule()) {
- kernelModuleName =
- gpuLaunchOp.getKernelModule()->getRootReference().str();
+ if (gpuLaunchOp.getModule()) {
+ kernelModuleName = gpuLaunchOp.getModule()->str();
kernelModule =
parentSymbolTable.lookup<gpu::GPUModuleOp>(kernelModuleName);
} else {
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index e0977f5..7ad429e 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -189,6 +189,26 @@ LogicalResult BulkStoreOp::verify() {
return success();
}
+LogicalResult PMEventOp::verify() {
+ auto eventId = getEventId();
+ auto maskedEventId = getMaskedEventId();
+ if (!maskedEventId && !eventId) {
+ return emitOpError() << "either `id` or `mask` must be set";
+ }
+
+ if (maskedEventId && eventId) {
+ return emitOpError() << "`id` and `mask` cannot be set at the same time";
+ }
+
+ if (eventId) {
+ if (eventId < 0 || eventId > 15) {
+ return emitOpError() << "`id` must be between 0 and 15";
+ }
+ }
+
+ return llvm::success();
+}
+
// Given the element type of an operand and whether or not it is an accumulator,
// this function returns the PTX type (`NVVM::MMATypes`) that corresponds to the
// operand's element type.
diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.cpp b/mlir/lib/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.cpp
index b951df8..18f85b6 100644
--- a/mlir/lib/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.cpp
@@ -129,7 +129,6 @@ handleInlinedAllocas(Operation *call,
OpBuilder::InsertionGuard insertionGuard(builder);
builder.setInsertionPoint(allocaOp);
LLVM::LifetimeStartOp::create(builder, allocaOp.getLoc(),
- arraySize.getValue().getLimitedValue(),
allocaOp.getResult());
}
allocaOp->moveAfter(newConstant);
@@ -147,7 +146,6 @@ handleInlinedAllocas(Operation *call,
for (auto &[allocaOp, arraySize, shouldInsertLifetime] : allocasToMove) {
if (shouldInsertLifetime)
LLVM::LifetimeEndOp::create(builder, allocaOp.getLoc(),
- arraySize.getValue().getLimitedValue(),
allocaOp.getResult());
}
}
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 34c63d3..9d7fb18 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -194,9 +194,10 @@ static void buildMatmulOp(OpBuilder &b, OperationState &state,
ArrayRef<AffineMap> indexingMaps) {
// Initialize indexingMaps attribute, for MatmulOp.
SmallVector<Attribute, 3> indexingMapsAttrVal;
- indexingMapsAttrVal = llvm::map_to_vector(
- MatmulOp::getDefaultIndexingMaps(b.getContext()),
- [](AffineMap map) -> Attribute { return AffineMapAttr::get(map); });
+ indexingMapsAttrVal =
+ llvm::map_to_vector(indexingMaps, [](AffineMap map) -> Attribute {
+ return AffineMapAttr::get(map);
+ });
state.addAttribute("indexing_maps", b.getArrayAttr(indexingMapsAttrVal));
return buildStructuredOp(b, state, resultTensorTypes, inputs, outputs,
attributes, regionBuilder);
@@ -3749,6 +3750,25 @@ std::pair<int64_t, int64_t> getFmrFromWinogradConv2DFmr(WinogradConv2DFmr fmr) {
// MatMulOp
//===----------------------------------------------------------------------===//
+static FailureOr<SmallVector<SmallVector<int64_t>>>
+getAffineResultPositions(ArrayAttr maps) {
+ SmallVector<SmallVector<int64_t>> positions;
+ for (auto map : maps) {
+ AffineMapAttr attr = dyn_cast<AffineMapAttr>(map);
+ if (!attr)
+ return failure();
+ SmallVector<int64_t> pos;
+ for (auto result : attr.getAffineMap().getResults()) {
+ auto dim = dyn_cast<AffineDimExpr>(result);
+ if (!dim)
+ return failure();
+ pos.push_back(dim.getPosition());
+ }
+ positions.push_back(pos);
+ }
+ return positions;
+}
+
/// Returns a list of AffineMap with the typical matmul indexing charactristic.
SmallVector<AffineMap> MatmulOp::getDefaultIndexingMaps(MLIRContext *context) {
AffineExpr d0, d1, d2;
@@ -3760,6 +3780,20 @@ SmallVector<AffineMap> MatmulOp::getDefaultIndexingMaps(MLIRContext *context) {
return indexingMaps;
}
+bool MatmulOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{0, 2} &&
+ (*positions)[1] == SmallVector<int64_t>{2, 1} &&
+ (*positions)[2] == SmallVector<int64_t>{0, 1};
+}
+
SmallVector<utils::IteratorType> MatmulOp::getIteratorTypesArray() {
return SmallVector<utils::IteratorType>{utils::IteratorType::parallel,
utils::IteratorType::parallel,
@@ -3912,6 +3946,380 @@ Speculation::Speculatability MatmulOp::getSpeculatability() {
return getGenericSpeculatabilityImpl(cast<LinalgOp>(getOperation()));
}
+SmallVector<AffineMap>
+MatmulTransposeAOp::getDefaultIndexingMaps(OpBuilder &builder) {
+ AffineExpr d0, d1, d2;
+ MLIRContext *context = builder.getContext();
+ bindDims(context, d0, d1, d2);
+ AffineMap mapLHS = AffineMap::get(3, 0, {d2, d0}, context);
+ AffineMap mapRHS = AffineMap::get(3, 0, {d2, d1}, context);
+ AffineMap mapOut = AffineMap::get(3, 0, {d0, d1}, context);
+ return {mapLHS, mapRHS, mapOut};
+}
+
+bool MatmulTransposeAOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{2, 0} &&
+ (*positions)[1] == SmallVector<int64_t>{2, 1} &&
+ (*positions)[2] == SmallVector<int64_t>{0, 1};
+}
+
+void linalg::MatmulTransposeAOp::build(OpBuilder &builder,
+ OperationState &result,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, std::nullopt, inputs, outputs, attributes,
+ MatmulOp::getRegionBuilder(), getDefaultIndexingMaps(builder));
+}
+
+MatmulTransposeAOp
+MatmulTransposeAOp::create(OpBuilder &builder, Location location,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, inputs, outputs, attributes);
+ auto res = dyn_cast<MatmulTransposeAOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::MatmulTransposeAOp::build(OpBuilder &builder,
+ OperationState &result,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ MatmulOp::getRegionBuilder(), getDefaultIndexingMaps(builder));
+}
+
+MatmulTransposeAOp
+MatmulTransposeAOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, attributes);
+ auto res = dyn_cast<MatmulTransposeAOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::MatmulTransposeAOp::build(OpBuilder &builder,
+ OperationState &result,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ result.addAttribute("cast", cast);
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ MatmulOp::getRegionBuilder(), getDefaultIndexingMaps(builder));
+}
+
+MatmulTransposeAOp
+MatmulTransposeAOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, cast, attributes);
+ auto res = dyn_cast<MatmulTransposeAOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+bool MatmulTransposeAOp::classof(Operation *op) {
+ return dyn_cast_or_null<linalg::MatmulOp>(op) &&
+ MatmulTransposeAOp::isDefaultIndexingMaps(
+ op->getAttr("indexing_maps"));
+}
+
+SmallVector<AffineMap>
+MatmulTransposeBOp::getDefaultIndexingMaps(OpBuilder &builder) {
+ AffineExpr d0, d1, d2;
+ MLIRContext *context = builder.getContext();
+ bindDims(context, d0, d1, d2);
+ AffineMap mapLHS = AffineMap::get(3, 0, {d0, d2}, context);
+ AffineMap mapRHS = AffineMap::get(3, 0, {d1, d2}, context);
+ AffineMap mapOut = AffineMap::get(3, 0, {d0, d1}, context);
+ return {mapLHS, mapRHS, mapOut};
+}
+
+bool MatmulTransposeBOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{0, 2} &&
+ (*positions)[1] == SmallVector<int64_t>{1, 2} &&
+ (*positions)[2] == SmallVector<int64_t>{0, 1};
+}
+
+void linalg::MatmulTransposeBOp::build(OpBuilder &builder,
+ OperationState &result,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, std::nullopt, inputs, outputs, attributes,
+ MatmulOp::getRegionBuilder(), getDefaultIndexingMaps(builder));
+}
+
+MatmulTransposeBOp
+MatmulTransposeBOp::create(OpBuilder &builder, Location location,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, inputs, outputs, attributes);
+ auto res = dyn_cast<MatmulTransposeBOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::MatmulTransposeBOp::build(OpBuilder &builder,
+ OperationState &result,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ MatmulOp::getRegionBuilder(), getDefaultIndexingMaps(builder));
+}
+
+MatmulTransposeBOp
+MatmulTransposeBOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, attributes);
+ auto res = dyn_cast<MatmulTransposeBOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::MatmulTransposeBOp::build(OpBuilder &builder,
+ OperationState &result,
+ TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ result.addAttribute("cast", cast);
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ MatmulOp::getRegionBuilder(), getDefaultIndexingMaps(builder));
+}
+
+MatmulTransposeBOp
+MatmulTransposeBOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, cast, attributes);
+ auto res = dyn_cast<MatmulTransposeBOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+bool MatmulTransposeBOp::classof(Operation *op) {
+ return dyn_cast_or_null<linalg::MatmulOp>(op) &&
+ MatmulTransposeBOp::isDefaultIndexingMaps(
+ op->getAttr("indexing_maps"));
+}
+
+SmallVector<AffineMap>
+BatchMatmulTransposeAOp::getDefaultIndexingMaps(OpBuilder &builder) {
+ AffineExpr d0, d1, d2, d3;
+ MLIRContext *context = builder.getContext();
+ bindDims(context, d0, d1, d2, d3);
+ AffineMap mapLHS = AffineMap::get(4, 0, {d0, d3, d1}, context);
+ AffineMap mapRHS = AffineMap::get(4, 0, {d0, d3, d2}, context);
+ AffineMap mapOut = AffineMap::get(4, 0, {d0, d1, d2}, context);
+ return {mapLHS, mapRHS, mapOut};
+}
+
+bool BatchMatmulTransposeAOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{0, 3, 1} &&
+ (*positions)[1] == SmallVector<int64_t>{0, 3, 2} &&
+ (*positions)[2] == SmallVector<int64_t>{0, 1, 2};
+}
+
+void linalg::BatchMatmulTransposeAOp::build(
+ OpBuilder &builder, OperationState &result, ValueRange inputs,
+ ValueRange outputs, ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, std::nullopt, inputs, outputs, attributes,
+ BatchMatmulOp::getRegionBuilder(),
+ getDefaultIndexingMaps(builder));
+}
+
+BatchMatmulTransposeAOp
+BatchMatmulTransposeAOp::create(OpBuilder &builder, Location location,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, inputs, outputs, attributes);
+ auto res = dyn_cast<BatchMatmulTransposeAOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::BatchMatmulTransposeAOp::build(
+ OpBuilder &builder, OperationState &result, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ BatchMatmulOp::getRegionBuilder(),
+ getDefaultIndexingMaps(builder));
+}
+
+BatchMatmulTransposeAOp
+BatchMatmulTransposeAOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, attributes);
+ auto res = dyn_cast<BatchMatmulTransposeAOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::BatchMatmulTransposeAOp::build(
+ OpBuilder &builder, OperationState &result, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ result.addAttribute("cast", cast);
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ BatchMatmulOp::getRegionBuilder(),
+ getDefaultIndexingMaps(builder));
+}
+
+BatchMatmulTransposeAOp
+BatchMatmulTransposeAOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, cast, attributes);
+ auto res = dyn_cast<BatchMatmulTransposeAOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+bool BatchMatmulTransposeAOp::classof(Operation *op) {
+ return dyn_cast_or_null<linalg::BatchMatmulOp>(op) &&
+ BatchMatmulTransposeAOp::isDefaultIndexingMaps(
+ op->getAttr("indexing_maps"));
+}
+
+SmallVector<AffineMap>
+BatchMatmulTransposeBOp::getDefaultIndexingMaps(OpBuilder &builder) {
+ AffineExpr d0, d1, d2, d3;
+ MLIRContext *context = builder.getContext();
+ bindDims(context, d0, d1, d2, d3);
+ AffineMap mapLHS = AffineMap::get(4, 0, {d0, d1, d3}, context);
+ AffineMap mapRHS = AffineMap::get(4, 0, {d0, d2, d3}, context);
+ AffineMap mapOut = AffineMap::get(4, 0, {d0, d1, d2}, context);
+ return {mapLHS, mapRHS, mapOut};
+}
+
+bool BatchMatmulTransposeBOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{0, 1, 3} &&
+ (*positions)[1] == SmallVector<int64_t>{0, 2, 3} &&
+ (*positions)[2] == SmallVector<int64_t>{0, 1, 2};
+}
+
+void linalg::BatchMatmulTransposeBOp::build(
+ OpBuilder &builder, OperationState &result, ValueRange inputs,
+ ValueRange outputs, ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, std::nullopt, inputs, outputs, attributes,
+ BatchMatmulOp::getRegionBuilder(),
+ getDefaultIndexingMaps(builder));
+}
+
+BatchMatmulTransposeBOp
+BatchMatmulTransposeBOp::create(OpBuilder &builder, Location location,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, inputs, outputs, attributes);
+ auto res = dyn_cast<BatchMatmulTransposeBOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::BatchMatmulTransposeBOp::build(
+ OpBuilder &builder, OperationState &result, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ BatchMatmulOp::getRegionBuilder(),
+ getDefaultIndexingMaps(builder));
+}
+
+BatchMatmulTransposeBOp
+BatchMatmulTransposeBOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, attributes);
+ auto res = dyn_cast<BatchMatmulTransposeBOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+void linalg::BatchMatmulTransposeBOp::build(
+ OpBuilder &builder, OperationState &result, TypeRange resultTensorTypes,
+ ValueRange inputs, ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ result.addAttribute("cast", cast);
+ buildMatmulOp(builder, result, resultTensorTypes, inputs, outputs, attributes,
+ BatchMatmulOp::getRegionBuilder(),
+ getDefaultIndexingMaps(builder));
+}
+
+BatchMatmulTransposeBOp
+BatchMatmulTransposeBOp::create(OpBuilder &builder, Location location,
+ TypeRange resultTensorTypes, ValueRange inputs,
+ ValueRange outputs, Attribute cast,
+ ArrayRef<NamedAttribute> attributes) {
+ OperationState state(location, getOperationName());
+ build(builder, state, resultTensorTypes, inputs, outputs, cast, attributes);
+ auto res = dyn_cast<BatchMatmulTransposeBOp>(builder.create(state));
+ assert(res && "builder didn't return the right type");
+ return res;
+}
+
+bool BatchMatmulTransposeBOp::classof(Operation *op) {
+ return dyn_cast_or_null<linalg::BatchMatmulOp>(op) &&
+ BatchMatmulTransposeBOp::isDefaultIndexingMaps(
+ op->getAttr("indexing_maps"));
+}
+
//===----------------------------------------------------------------------===//
// ContractOp
//===----------------------------------------------------------------------===//
@@ -4120,6 +4528,20 @@ BatchMatmulOp::getDefaultIndexingMaps(MLIRContext *context) {
return indexingMaps;
}
+bool BatchMatmulOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{0, 1, 3} &&
+ (*positions)[1] == SmallVector<int64_t>{0, 3, 2} &&
+ (*positions)[2] == SmallVector<int64_t>{0, 1, 2};
+}
+
SmallVector<utils::IteratorType> BatchMatmulOp::getIteratorTypesArray() {
return SmallVector<utils::IteratorType>{
utils::IteratorType::parallel, utils::IteratorType::parallel,
@@ -5646,6 +6068,19 @@ BatchReduceMatmulOp::getDefaultIndexingMaps(MLIRContext *context) {
return indexingMaps;
}
+bool BatchReduceMatmulOp::isDefaultIndexingMaps(Attribute attr) {
+ ArrayAttr maps = dyn_cast<ArrayAttr>(attr);
+ if (!maps)
+ return false;
+ if (maps.size() != 3)
+ return false;
+ auto positions = getAffineResultPositions(maps);
+ if (failed(positions))
+ return false;
+ return (*positions)[0] == SmallVector<int64_t>{0, 1, 3} &&
+ (*positions)[1] == SmallVector<int64_t>{0, 3, 2} &&
+ (*positions)[2] == SmallVector<int64_t>{1, 2};
+}
unsigned BatchReduceMatmulOp::getNumRegionArgs() { return 3; }
std::string BatchReduceMatmulOp::getLibraryCallName() {
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index bdfc8d0..639e0fe 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -15,6 +15,7 @@
#include "mlir/Dialect/Arith/Utils/Utils.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
+#include "mlir/Dialect/CommonFolders.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/TransformOps/GPUHeuristics.h"
@@ -27,6 +28,7 @@
#include "mlir/Dialect/Transform/IR/TransformTypes.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/Dialect/Transform/Utils/Utils.h"
+#include "mlir/Dialect/UB/IR/UBOps.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h"
@@ -1985,14 +1987,19 @@ transform::PadOp::apply(transform::TransformRewriter &rewriter,
// Convert the padding values to attributes.
SmallVector<Attribute> paddingValues;
- for (auto const &it :
+ for (auto const &[untypedAttr, elementOrTensorType] :
llvm::zip(getPaddingValues(), linalgTarget->getOperandTypes())) {
- auto attr = dyn_cast<TypedAttr>(std::get<0>(it));
+
+ if (isa<ub::PoisonAttr>(untypedAttr)) {
+ paddingValues.push_back(untypedAttr);
+ continue;
+ }
+ auto attr = dyn_cast<TypedAttr>(untypedAttr);
if (!attr) {
- emitOpError("expects padding values to be typed attributes");
+ emitOpError("expects padding values to be typed attributes or poison");
return DiagnosedSilenceableFailure::definiteFailure();
}
- Type elementType = getElementTypeOrSelf(std::get<1>(it));
+ Type elementType = getElementTypeOrSelf(elementOrTensorType);
// Try to parse string attributes to obtain an attribute of element type.
if (auto stringAttr = dyn_cast<StringAttr>(attr)) {
auto parsedAttr = dyn_cast_if_present<TypedAttr>(parseAttribute(
@@ -2000,7 +2007,7 @@ transform::PadOp::apply(transform::TransformRewriter &rewriter,
/*numRead=*/nullptr, /*isKnownNullTerminated=*/true));
if (!parsedAttr || parsedAttr.getType() != elementType) {
auto diag = this->emitOpError("expects a padding that parses to ")
- << elementType << ", got " << std::get<0>(it);
+ << elementType << ", got " << untypedAttr;
diag.attachNote(linalgTarget.getLoc()) << "when applied to this op";
return DiagnosedSilenceableFailure::definiteFailure();
}
@@ -2235,8 +2242,13 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
llvm::zip(getPaddingValues(), targetOp->getOperandTypes())) {
auto attr = dyn_cast<TypedAttr>(untypedAttr);
Type elementType = getElementTypeOrSelf(elementOrTensorType);
+
+ if (isa<ub::PoisonAttr>(untypedAttr)) {
+ paddingValues.push_back(untypedAttr);
+ continue;
+ }
if (!attr) {
- emitOpError("expects padding values to be typed attributes");
+ emitOpError("expects padding values to be typed attributes or poison");
return DiagnosedSilenceableFailure::definiteFailure();
}
// Try to parse string attributes to obtain an attribute of element type.
@@ -3783,8 +3795,15 @@ LogicalResult TileUsingForallOp::verify() {
void transform::VectorizeChildrenAndApplyPatternsOp::build(
OpBuilder &builder, OperationState &result, Value target,
- bool vectorizePadding, bool vectorizeExtract, bool flatten1DDepthwiseConv) {
+ bool foldTypeExtensionsIntoContract, bool vectorizePadding,
+ bool vectorizeExtract, bool flatten1DDepthwiseConv) {
result.addOperands(target);
+ if (foldTypeExtensionsIntoContract) {
+ result.addAttribute(
+ VectorizeChildrenAndApplyPatternsOp::
+ getFoldTypeExtensionsIntoContractAttrName(result.name),
+ builder.getUnitAttr());
+ }
if (vectorizePadding) {
result.addAttribute(
VectorizeChildrenAndApplyPatternsOp::getVectorizePaddingAttrName(
@@ -3875,6 +3894,9 @@ transform::VectorizeChildrenAndApplyPatternsOp::applyToOne(
patterns.add<CopyVectorizationPattern>(ctx);
+ if (getFoldTypeExtensionsIntoContract())
+ vector::populateFoldArithExtensionPatterns(patterns);
+
if (getVectorizePadding()) {
linalg::populatePadOpVectorizationPatterns(patterns);
// This creates an alternative path for lowering tensor.pad - by
diff --git a/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp b/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
index 3908d73..b4507a9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
@@ -320,10 +320,6 @@ void linalg::populateBlockPackMatmulPatterns(
RewritePatternSet &patterns, const ControlBlockPackMatmulFn &controlFn) {
patterns.add<BlockPackMatmul<linalg::GenericOp>,
BlockPackMatmul<linalg::MatmulOp>,
- BlockPackMatmul<linalg::BatchMatmulOp>,
- BlockPackMatmul<linalg::MatmulTransposeAOp>,
- BlockPackMatmul<linalg::BatchMatmulTransposeAOp>,
- BlockPackMatmul<linalg::MatmulTransposeBOp>,
- BlockPackMatmul<linalg::BatchMatmulTransposeBOp>>(
- patterns.getContext(), controlFn);
+ BlockPackMatmul<linalg::BatchMatmulOp>>(patterns.getContext(),
+ controlFn);
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
index 70f846e..6ec2e9fd 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
@@ -23,9 +23,11 @@ add_mlir_dialect_library(MLIRLinalgTransforms
InlineScalarOperands.cpp
Interchange.cpp
Loops.cpp
+ MorphOps.cpp
TransposeMatmul.cpp
ShardingInterfaceImpl.cpp
NamedOpConversions.cpp
+ NamedToElementwise.cpp
BlockPackMatmul.cpp
PackAndUnpackPatterns.cpp
Padding.cpp
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index bf66ed0..d565069 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -1052,12 +1052,8 @@ struct RankReduceMatmul : RankReduceContractionOps<FromOpTy, ToOpTy> {
static bool constexpr reduceLeft =
(std::is_same_v<FromOpTy, BatchMatmulOp> &&
std::is_same_v<ToOpTy, BatchVecmatOp>) ||
- (std::is_same_v<FromOpTy, BatchMatmulTransposeAOp> &&
- std::is_same_v<ToOpTy, BatchVecmatOp>) ||
(std::is_same_v<FromOpTy, MatmulOp> &&
std::is_same_v<ToOpTy, VecmatOp>) ||
- (std::is_same_v<FromOpTy, MatmulTransposeAOp> &&
- std::is_same_v<ToOpTy, VecmatOp>) ||
(std::is_same_v<FromOpTy, MatvecOp> && std::is_same_v<ToOpTy, DotOp>);
/// Look for non-batch spatial dims to collapse.
@@ -1113,27 +1109,15 @@ void mlir::linalg::populateContractionOpRankReducingPatterns(
MLIRContext *context = patterns.getContext();
// Unbatching patterns for unit batch size
patterns.add<RankReduceToUnBatched<BatchMatmulOp, MatmulOp>>(context);
- patterns
- .add<RankReduceToUnBatched<BatchMatmulTransposeAOp, MatmulTransposeAOp>>(
- context);
- patterns
- .add<RankReduceToUnBatched<BatchMatmulTransposeBOp, MatmulTransposeBOp>>(
- context);
patterns.add<RankReduceToUnBatched<BatchMatvecOp, MatvecOp>>(context);
patterns.add<RankReduceToUnBatched<BatchVecmatOp, VecmatOp>>(context);
// Non-batch rank 1 reducing patterns
patterns.add<RankReduceMatmul<MatmulOp, VecmatOp>>(context);
patterns.add<RankReduceMatmul<MatmulOp, MatvecOp>>(context);
- patterns.add<RankReduceMatmul<MatmulTransposeAOp, VecmatOp>>(context);
- patterns.add<RankReduceMatmul<MatmulTransposeBOp, MatvecOp>>(context);
// Batch rank 1 reducing patterns
patterns.add<RankReduceMatmul<BatchMatmulOp, BatchVecmatOp>>(context);
patterns.add<RankReduceMatmul<BatchMatmulOp, BatchMatvecOp>>(context);
- patterns.add<RankReduceMatmul<BatchMatmulTransposeAOp, BatchVecmatOp>>(
- context);
- patterns.add<RankReduceMatmul<BatchMatmulTransposeBOp, BatchMatvecOp>>(
- context);
// Non-batch rank 0 reducing patterns
patterns.add<RankReduceMatmul<MatvecOp, DotOp>>(context);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/MorphOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/MorphOps.cpp
new file mode 100644
index 0000000..f261ccb
--- /dev/null
+++ b/mlir/lib/Dialect/Linalg/Transforms/MorphOps.cpp
@@ -0,0 +1,62 @@
+//===- MorphOps.cpp - conversion between named,category and generic ops ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements conversions between linalg ops:
+// named <--> category (elementwise, contraction, ..) <--> generic.
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Complex/IR/Complex.h"
+#include "mlir/Dialect/Linalg/IR/Linalg.h"
+#include "mlir/Dialect/Linalg/IR/LinalgInterfaces.h"
+#include "mlir/Dialect/Linalg/Passes.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/Math/IR/Math.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+namespace mlir {
+#define GEN_PASS_DEF_LINALGMORPHOPSPASS
+#include "mlir/Dialect/Linalg/Passes.h.inc"
+} // namespace mlir
+
+#define DEBUG_TYPE "linalg-morphism"
+
+using namespace mlir;
+using namespace mlir::linalg;
+
+namespace {
+struct LinalgMorphOpsPass
+ : public impl::LinalgMorphOpsPassBase<LinalgMorphOpsPass> {
+
+ using impl::LinalgMorphOpsPassBase<
+ LinalgMorphOpsPass>::LinalgMorphOpsPassBase;
+
+ void runOnOperation() override;
+};
+
+void LinalgMorphOpsPass::runOnOperation() {
+
+ RewritePatternSet patterns(&getContext());
+
+ // Lowering paths (named -> category -> generic)
+ if (namedToCategory) {
+ populateLinalgNamedToElementwisePatterns(patterns);
+ }
+ if (namedToGeneric || categoryToGeneric) {
+ populateLinalgNamedOpsGeneralizationPatterns(patterns);
+ }
+
+ // Lifting paths (named <- category <- generic)
+ if (genericToNamed) {
+ populateLinalgGenericOpsSpecializationPatterns(patterns);
+ }
+
+ if (failed(applyPatternsGreedily(getOperation(), std::move(patterns))))
+ signalPassFailure();
+}
+} // namespace
diff --git a/mlir/lib/Dialect/Linalg/Transforms/NamedToElementwise.cpp b/mlir/lib/Dialect/Linalg/Transforms/NamedToElementwise.cpp
new file mode 100644
index 0000000..00a076b
--- /dev/null
+++ b/mlir/lib/Dialect/Linalg/Transforms/NamedToElementwise.cpp
@@ -0,0 +1,98 @@
+//===- NamedToElementwise.cpp - convert linalg named op into elementwise --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements rewriting those linalg named ops that are essentially
+// elementwise e.g. `linalg.exp`, to `linalg.elementwise`. This allows further
+// optimization on `linalg.elementwise` such as folding transpose, broadcast.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Linalg/IR/Linalg.h"
+#include "mlir/Dialect/Linalg/Passes.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/TypeSwitch.h"
+
+using namespace mlir;
+using namespace mlir::linalg;
+
+#define DEBUG_TYPE "linalg-named-to-elementwise"
+
+namespace {
+ElementwiseKind getKind(Operation *op) {
+ return llvm::TypeSwitch<Operation *, ElementwiseKind>(op)
+ .Case([](SelectOp) { return ElementwiseKind::select; })
+ .Case([](AddOp) { return ElementwiseKind::add; })
+ .Case([](SubOp) { return ElementwiseKind::sub; })
+ .Case([](MulOp) { return ElementwiseKind::mul; })
+ .Case([](DivOp) { return ElementwiseKind::div; })
+ .Case([](DivUnsignedOp) { return ElementwiseKind::div_unsigned; })
+ .Case([](PowFOp) { return ElementwiseKind::powf; })
+ .Case([](ExpOp) { return ElementwiseKind::exp; })
+ .Case([](LogOp) { return ElementwiseKind::log; })
+ .Case([](AbsOp) { return ElementwiseKind::abs; })
+ .Case([](CeilOp) { return ElementwiseKind::ceil; })
+ .Case([](FloorOp) { return ElementwiseKind::floor; })
+ .Case([](NegFOp) { return ElementwiseKind::negf; })
+ .Case([](ReciprocalOp) { return ElementwiseKind::reciprocal; })
+ .Case([](RoundOp) { return ElementwiseKind::round; })
+ .Case([](SqrtOp) { return ElementwiseKind::sqrt; })
+ .Case([](RsqrtOp) { return ElementwiseKind::rsqrt; })
+ .Case([](SquareOp) { return ElementwiseKind::square; })
+ .Case([](TanhOp) { return ElementwiseKind::tanh; })
+ .Case([](ErfOp) { return ElementwiseKind::erf; })
+ .Default([&](Operation *op) {
+ llvm_unreachable("unhandled case in named to elementwise");
+ return ElementwiseKind::sub;
+ });
+}
+
+template <typename NamedOpTy>
+struct NamedToElementwisePattern : public OpRewritePattern<NamedOpTy> {
+ using OpRewritePattern<NamedOpTy>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(NamedOpTy op,
+ PatternRewriter &rewriter) const override {
+ SmallVector<NamedAttribute> attrs;
+ auto kindAttr = ElementwiseKindAttr::get(op.getContext(), getKind(op));
+ attrs.push_back(rewriter.getNamedAttr("kind", kindAttr));
+ attrs.push_back(
+ rewriter.getNamedAttr("indexing_maps", op.getIndexingMaps()));
+
+ rewriter.replaceOpWithNewOp<ElementwiseOp>(op, op.getDpsInputs(),
+ op.getDpsInits(), attrs);
+ return success();
+ }
+};
+} // namespace
+
+void mlir::linalg::populateLinalgNamedToElementwisePatterns(
+ RewritePatternSet &patterns) {
+ patterns.add<NamedToElementwisePattern<SelectOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<AddOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<SubOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<MulOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<DivOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<DivUnsignedOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<PowFOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<ExpOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<LogOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<AbsOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<CeilOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<FloorOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<NegFOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<ReciprocalOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<RoundOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<SqrtOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<RsqrtOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<SquareOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<TanhOp>>(patterns.getContext());
+ patterns.add<NamedToElementwisePattern<ErfOp>>(patterns.getContext());
+}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index 2e62523..3d12bc3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -11,6 +11,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/Dialect/UB/IR/UBOps.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/BuiltinAttributes.h"
@@ -230,13 +231,18 @@ static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
Value paddingValue;
if (auto complexTy =
dyn_cast<ComplexType>(getElementTypeOrSelf(v.getType()))) {
- auto complexAttr = cast<ArrayAttr>(paddingValueAttr);
- paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
- complexTy, complexAttr);
- } else {
- paddingValue = arith::ConstantOp::create(rewriter, opToPad.getLoc(),
- cast<TypedAttr>(paddingValueAttr));
+ if (auto complexAttr = dyn_cast<ArrayAttr>(paddingValueAttr)) {
+ paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
+ complexTy, complexAttr);
+ }
+ } else if (isa<ub::PoisonAttr>(paddingValueAttr)) {
+ paddingValue = ub::PoisonOp::create(rewriter, opToPad.getLoc(),
+ getElementTypeOrSelf(v.getType()));
+ } else if (auto typedAttr = dyn_cast<TypedAttr>(paddingValueAttr)) {
+ paddingValue =
+ arith::ConstantOp::create(rewriter, opToPad.getLoc(), typedAttr);
}
+ assert(paddingValue && "failed to create value from padding attribute");
// Pad the operand to the bounding box defined by `paddedShape`.
SmallVector<int64_t> tensorShape;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
index 455e1a6..35ba4f15 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
@@ -234,19 +234,8 @@ static FailureOr<LinalgOp> specializeLinalgContractions(RewriterBase &rewriter,
/// Codegen the different matmul variants.
if (numOfBatchDims) {
- if (a == IndexMatchResult::Transposed)
- return replaceWithMatmulVariant<BatchMatmulTransposeAOp>(rewriter,
- genericOp);
- if (b == IndexMatchResult::Transposed)
- return replaceWithMatmulVariant<BatchMatmulTransposeBOp>(rewriter,
- genericOp);
return replaceWithMatmulVariant<BatchMatmulOp>(rewriter, genericOp);
}
-
- if (a == IndexMatchResult::Transposed)
- return replaceWithMatmulVariant<MatmulTransposeAOp>(rewriter, genericOp);
- if (b == IndexMatchResult::Transposed)
- return replaceWithMatmulVariant<MatmulTransposeBOp>(rewriter, genericOp);
return replaceWithMatmulVariant<MatmulOp>(rewriter, genericOp);
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp b/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
index a2a4335..9ec4af6 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeMatmul.cpp
@@ -52,19 +52,19 @@ FailureOr<Operation *> mlir::linalg::transposeMatmul(RewriterBase &rewriter,
dynamicDims.push_back(tensor::DimOp::create(rewriter, loc, input, 0));
ArrayRef<int64_t> shape = type.getShape();
- Value empty = tensor::EmptyOp::create(rewriter, loc,
- ArrayRef<int64_t>{shape[1], shape[0]},
- type.getElementType(), dynamicDims);
- auto transposeOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
- ArrayRef<int64_t>{1, 0});
+ Value empty = rewriter.create<tensor::EmptyOp>(
+ loc, ArrayRef<int64_t>{shape[1], shape[0]}, type.getElementType(),
+ dynamicDims);
+ auto transposeOp = rewriter.create<linalg::TransposeOp>(
+ loc, input, empty, ArrayRef<int64_t>{1, 0});
Operation *newMatmulOp;
if (transposeLHS) {
- newMatmulOp = linalg::MatmulTransposeAOp::create(
+ newMatmulOp = MatmulTransposeAOp::create(
rewriter, loc, matmulOp.getResultTypes(),
ValueRange{transposeOp->getResult(0), matmulOp.getInputs()[1]},
matmulOp.getOutputs());
} else {
- newMatmulOp = linalg::MatmulTransposeBOp::create(
+ newMatmulOp = MatmulTransposeBOp::create(
rewriter, loc, matmulOp.getResultTypes(),
ValueRange{matmulOp.getInputs()[0], transposeOp->getResult(0)},
matmulOp.getOutputs());
@@ -112,16 +112,16 @@ mlir::linalg::transposeBatchMatmul(RewriterBase &rewriter,
Value empty = tensor::EmptyOp::create(
rewriter, loc, ArrayRef<int64_t>{shape[0], shape[2], shape[1]},
type.getElementType(), dynamicDims);
- auto transposeOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
- ArrayRef<int64_t>{0, 2, 1});
+ auto transposeOp = rewriter.create<linalg::TransposeOp>(
+ loc, input, empty, ArrayRef<int64_t>{0, 2, 1});
Operation *newMatmulOp;
if (transposeLHS) {
- newMatmulOp = linalg::BatchMatmulTransposeAOp::create(
+ newMatmulOp = BatchMatmulTransposeAOp::create(
rewriter, loc, batchMatmulOp.getResultTypes(),
ValueRange{transposeOp->getResult(0), batchMatmulOp.getInputs()[1]},
batchMatmulOp.getOutputs());
} else {
- newMatmulOp = linalg::BatchMatmulTransposeBOp::create(
+ newMatmulOp = BatchMatmulTransposeBOp::create(
rewriter, loc, batchMatmulOp.getResultTypes(),
ValueRange{batchMatmulOp.getInputs()[0], transposeOp->getResult(0)},
batchMatmulOp.getOutputs());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index cf65e67..80fbe3c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -2563,7 +2563,7 @@ vectorizeScalableVectorPrecondition(Operation *op,
"vectorization";
return failure();
}
- if (isa<linalg::MatmulOp>(op) || isa<linalg::MatmulTransposeAOp>(op)) {
+ if (isa<linalg::MatmulOp>(op)) {
LDBG()
<< "Scalable vectorization of the reduction dim in Matmul-like ops "
"is not supported";
@@ -2604,15 +2604,9 @@ vectorizeScalableVectorPrecondition(Operation *op,
return failure();
}
- // Check to not let go the matmul with extended semantic, through this
- // transform.
- if (linalgOp.hasUserDefinedMaps())
- return failure();
-
// Cond 4: Only the following ops are supported in the
// presence of scalable vectors
return success(isElementwise(linalgOp) || isa<linalg::MatmulOp>(op) ||
- isa<linalg::MatmulTransposeAOp>(op) ||
isa<linalg::DepthwiseConv1DNwcWcOp>(op) ||
isa<linalg::MatvecOp>(op) || isa<linalg::Mmt4DOp>(op) ||
hasReductionIterator(linalgOp));
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index a450056..cb4783d 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -2841,9 +2841,47 @@ LogicalResult BroadcastOp::verify() {
llvm_unreachable("unexpected vector.broadcast op error");
}
+// Fold broadcast(shape_cast(x)) into broadcast(x) if x's type is compatible
+// with broadcast's result type and shape_cast only adds or removes ones in the
+// leading dimensions.
+static LogicalResult foldBroadcastOfShapeCast(BroadcastOp broadcastOp) {
+ auto srcShapeCast = broadcastOp.getSource().getDefiningOp<ShapeCastOp>();
+ if (!srcShapeCast)
+ return failure();
+
+ VectorType srcType = srcShapeCast.getSourceVectorType();
+ VectorType destType = broadcastOp.getResultVectorType();
+ // Check type compatibility.
+ if (vector::isBroadcastableTo(srcType, destType) !=
+ BroadcastableToResult::Success)
+ return failure();
+
+ ArrayRef<int64_t> srcShape = srcType.getShape();
+ ArrayRef<int64_t> shapecastShape =
+ srcShapeCast.getResultVectorType().getShape();
+ // Trailing dimensions should be the same if shape_cast only alters the
+ // leading dimensions.
+ unsigned numTrailingDims = std::min(srcShape.size(), shapecastShape.size());
+ if (!llvm::equal(srcShape.take_back(numTrailingDims),
+ shapecastShape.take_back(numTrailingDims)))
+ return failure();
+
+ assert(all_of(srcShape.drop_back(numTrailingDims),
+ [](int64_t E) { return E == 1; }) &&
+ all_of(shapecastShape.drop_back(numTrailingDims),
+ [](int64_t E) { return E == 1; }) &&
+ "ill-formed shape_cast");
+
+ broadcastOp.getSourceMutable().assign(srcShapeCast.getSource());
+ return success();
+}
+
OpFoldResult BroadcastOp::fold(FoldAdaptor adaptor) {
if (getSourceType() == getResultVectorType())
return getSource();
+ if (succeeded(foldBroadcastOfShapeCast(*this)))
+ return getResult();
+
if (!adaptor.getSource())
return {};
auto vectorType = getResultVectorType();
diff --git a/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp b/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp
index 2ce32fe..89b62a2 100644
--- a/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp
+++ b/mlir/lib/Dialect/WasmSSA/IR/WasmSSAOps.cpp
@@ -22,6 +22,47 @@
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
+using namespace mlir;
+namespace {
+ParseResult parseElseRegion(OpAsmParser &opParser, Region &elseRegion) {
+ std::string keyword;
+ std::ignore = opParser.parseOptionalKeywordOrString(&keyword);
+ if (keyword == "else")
+ return opParser.parseRegion(elseRegion);
+ return ParseResult::success();
+}
+
+void printElseRegion(OpAsmPrinter &opPrinter, Operation *op,
+ Region &elseRegion) {
+ if (elseRegion.empty())
+ return;
+ opPrinter.printKeywordOrString("else ");
+ opPrinter.printRegion(elseRegion);
+}
+
+ParseResult parseWasmVisibility(OpAsmParser &opParser, StringAttr &visibility) {
+ std::string keyword;
+ auto initLocation = opParser.getCurrentLocation();
+ std::ignore = opParser.parseOptionalKeywordOrString(&keyword);
+ if (keyword == "nested" or keyword == "") {
+ visibility = StringAttr::get(opParser.getContext(), "nested");
+ return ParseResult::success();
+ }
+
+ if (keyword == "public" || keyword == "private") {
+ visibility = StringAttr::get(opParser.getContext(), keyword);
+ return ParseResult::success();
+ }
+ opParser.emitError(initLocation, "expecting symbol visibility");
+ return ParseResult::failure();
+}
+
+void printWasmVisibility(OpAsmPrinter &opPrinter, Operation *op,
+ Attribute visibility) {
+ opPrinter.printKeywordOrString(cast<StringAttr>(visibility).strref());
+}
+} // namespace
+
#define GET_OP_CLASSES
#include "mlir/Dialect/WasmSSA/IR/WasmSSAOps.cpp.inc"
@@ -29,7 +70,6 @@
#include "mlir/IR/Types.h"
#include "llvm/Support/LogicalResult.h"
-using namespace mlir;
using namespace wasmssa;
namespace {
diff --git a/mlir/lib/Dialect/XeGPU/IR/CMakeLists.txt b/mlir/lib/Dialect/XeGPU/IR/CMakeLists.txt
index 242a97c..7c6a4f3 100644
--- a/mlir/lib/Dialect/XeGPU/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/XeGPU/IR/CMakeLists.txt
@@ -7,11 +7,14 @@ add_mlir_dialect_library(MLIRXeGPUDialect
DEPENDS
MLIRXeGPUIncGen
+ MLIRXeGPUAttrInterfaceIncGen
MLIRXeGPUAttrsIncGen
MLIRXeGPUEnumsIncGen
LINK_LIBS PUBLIC
MLIRArithDialect
+ MLIRIndexDialect
+ MLIRAffineUtils
MLIRArithUtils
MLIRDialectUtils
MLIRIR
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
index 3c0ca114..d997296 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
@@ -6,12 +6,16 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/Affine/Utils.h"
+#include "mlir/Dialect/Arith/Utils/Utils.h"
+#include "mlir/Dialect/Index/IR/IndexOps.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Dialect/XeGPU/IR/XeGPU.h"
#include "mlir/Dialect/XeGPU/IR/XeGPUTargetInfo.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/DialectImplementation.h"
#include "llvm/ADT/TypeSwitch.h"
+#include "llvm/Support/Debug.h"
using std::optional;
@@ -33,6 +37,57 @@ void XeGPUDialect::initialize() {
>();
}
+/// Generates instructions to compute offsets for a subgroup identified by
+/// its multidimensional indices (sgId), using the specified subgroup layout
+/// (sgLayout), subgroup data dimensions (sizePerSg), and the overall data
+/// dimensions (sizePerWg).
+static SmallVector<SmallVector<Value>>
+genOffsetsComputingInsts(OpBuilder &builder, Location loc,
+ SmallVector<Value> sgId, ArrayRef<int64_t> sgLayout,
+ ArrayRef<int64_t> sizePerSg,
+ ArrayRef<int64_t> sizePerWg) {
+
+ SmallVector<SmallVector<Value>> offsets;
+
+ // nd local offset, localOffset[i] = sgId[i] * sizePerSg[i]
+ SmallVector<Value> localOffsets = llvm::map_to_vector(
+ llvm::zip(sgId, sizePerSg), [&](const auto &t) -> Value {
+ return builder.createOrFold<index::MulOp>(
+ loc, std::get<0>(t),
+ builder.createOrFold<arith::ConstantIndexOp>(loc, std::get<1>(t)));
+ });
+
+ // distUnit[i] is the minimum value between sizePerWg[i] and
+ // sgLayout[i] * sizePerSg[i]
+ SmallVector<int64_t> distUnit = llvm::map_to_vector(
+ llvm::zip_equal(sizePerWg, computeElementwiseMul(sgLayout, sizePerSg)),
+ [](const auto &t) { return std::min(std::get<0>(t), std::get<1>(t)); });
+
+ for (SmallVector<int64_t> unitOffs :
+ StaticTileOffsetRange(sizePerWg, distUnit)) {
+ SmallVector<Value> base =
+ llvm::map_to_vector(unitOffs, [&](int64_t d) -> Value {
+ return builder.create<arith::ConstantIndexOp>(loc, d);
+ });
+
+ SmallVector<Value> adds = llvm::map_to_vector(
+ llvm::zip_equal(base, localOffsets), [&](const auto &t) -> Value {
+ return builder.createOrFold<arith::AddIOp>(loc, std::get<0>(t),
+ std::get<1>(t));
+ });
+
+ SmallVector<Value> mods = llvm::map_to_vector(
+ llvm::zip_equal(adds, sizePerWg), [&](const auto &t) -> Value {
+ return builder.createOrFold<index::RemUOp>(
+ loc, std::get<0>(t),
+ builder.create<arith::ConstantIndexOp>(loc, std::get<1>(t)));
+ });
+
+ offsets.push_back(mods);
+ }
+ return offsets;
+}
+
// Checks if the given shape can be evenly distributed based on the layout
// and data factors provided by the LayoutAttr.
bool XeGPUDialect::isEvenlyDistributable(llvm::ArrayRef<int64_t> shape,
@@ -211,6 +266,148 @@ LayoutAttr::verify(llvm::function_ref<mlir::InFlightDiagnostic()> emitError,
return success();
}
+FailureOr<SmallVector<Value>>
+LayoutAttr::delinearizeSubgroupId(OpBuilder &builder, Location loc,
+ Value linearId) {
+ // delinearizeSubgroupId is only available for
+ // workgroup-level layout attribute
+ if (!isWgLayout())
+ return failure();
+
+ // TODO: handle order attribute
+ auto hasDefaultOrder = [&]() {
+ DenseI32ArrayAttr order = getOrder();
+ return !order || isIdentityPermutation(llvm::to_vector_of<int64_t>(
+ llvm::reverse(order.asArrayRef())));
+ };
+ if (!hasDefaultOrder())
+ return mlir::emitError(loc, "order attribute is currently not supported.");
+
+ auto dims = llvm::map_to_vector(*getSgLayoutAsInt(), [&](int64_t d) -> Value {
+ return builder.createOrFold<arith::ConstantIndexOp>(loc, d);
+ });
+
+ return affine::delinearizeIndex(builder, loc, linearId, dims);
+}
+
+/// Implements LayoutTrait::getOffsets to generate instructions for
+/// computing multi-dimensional offsets when distributed by LayoutAttr.
+FailureOr<SmallVector<SmallVector<Value>>>
+LayoutAttr::getOffsets(OpBuilder &builder, Location loc, Value linearId,
+ ArrayRef<int64_t> shape) {
+ if (!isWgLayout())
+ return failure();
+
+ SmallVector<int64_t> sgLayout = getSgLayoutAsInt().value();
+ SmallVector<int64_t> sgShape;
+ if (auto maybeSgShape = getSgDataAsInt())
+ sgShape = maybeSgShape.value();
+ else if (auto derivedShape = computeShapeRatio(shape, sgLayout))
+ sgShape = derivedShape.value();
+ else
+ return failure();
+
+ // delinearize Ids
+ auto maybeIds = delinearizeSubgroupId(builder, loc, linearId);
+ if (failed(maybeIds))
+ return failure();
+ SmallVector<Value> sgIds = *maybeIds;
+
+ return genOffsetsComputingInsts(builder, loc, sgIds, sgLayout, sgShape,
+ shape);
+}
+
+//===----------------------------------------------------------------------===//
+// XeGPU_SliceAttr
+//===----------------------------------------------------------------------===//
+LogicalResult
+SliceAttr::verify(llvm::function_ref<InFlightDiagnostic()> emitError,
+ xegpu::LayoutTrait parent, DenseI64ArrayAttr dims) {
+ if (!parent || !dims)
+ return emitError() << "expected parent layout and dims attribute";
+
+ int64_t rank = parent.getRank();
+
+ // check every element in dims is unique and smaller than rank
+ llvm::SmallDenseSet<int64_t> seen;
+ for (int64_t dim : dims.asArrayRef()) {
+ if (dim < 0 || dim >= rank)
+ return emitError() << "invalid dim (" << dim << ") in slice attribute.";
+ if (!seen.insert(dim).second)
+ return emitError() << "repeated dim (" << dim << ") in slice attribute.";
+ }
+ return success();
+}
+
+SliceAttr SliceAttr::flatten() const {
+ xegpu::LayoutTrait parent = getParent();
+ SmallVector<DenseI64ArrayAttr> slicedDims({getDims()});
+
+ while (auto sliceAttr = dyn_cast<xegpu::SliceAttr>(parent)) {
+ parent = sliceAttr.getParent();
+ slicedDims.push_back(sliceAttr.getDims());
+ }
+
+ auto layoutAttr = dyn_cast<xegpu::LayoutAttr>(parent);
+ SmallVector<int64_t> indices =
+ llvm::to_vector(llvm::seq<int64_t>(0, layoutAttr.getRank()));
+
+ // get remaining dims (flattend) by applying slice ops with all slicedDims
+ SmallVector<int64_t> remainingDims(indices);
+ for (auto dim : llvm::reverse(slicedDims))
+ remainingDims = XeGPUDialect::slice(llvm::ArrayRef<int64_t>(remainingDims),
+ dim.asArrayRef());
+
+ // get flattend sliced dims by applying slice ops with the remaining dims
+ SmallVector<int64_t> flattendDims = XeGPUDialect::slice(
+ llvm::ArrayRef<int64_t>(indices), llvm::ArrayRef<int64_t>(remainingDims));
+
+ return xegpu::SliceAttr::get(
+ getContext(), layoutAttr,
+ DenseI64ArrayAttr::get(getContext(), flattendDims));
+}
+
+FailureOr<SmallVector<Value>>
+SliceAttr::delinearizeSubgroupId(OpBuilder &builder, Location loc,
+ Value linearId) {
+ SliceAttr attr = flatten();
+ auto parent = dyn_cast<LayoutAttr>(attr.getParent());
+ return parent.delinearizeSubgroupId(builder, loc, linearId);
+}
+
+/// Implements LayoutTrait::getOffsets to generate instructions for
+/// computing multi-dimensional offsets when distributed by SliceAttr.
+FailureOr<SmallVector<SmallVector<Value>>>
+SliceAttr::getOffsets(OpBuilder &builder, Location loc, Value linearId,
+ ArrayRef<int64_t> shape) {
+ assert(getRank() == static_cast<int64_t>(shape.size()) && "invalid shape.");
+ if (!isWgLayout())
+ return failure();
+
+ SmallVector<int64_t> sgLayout = getSgLayoutAsInt().value();
+ SmallVector<int64_t> sgShape;
+ if (auto maybeSgShape = getSgDataAsInt())
+ sgShape = maybeSgShape.value();
+ else if (auto derivedShape = computeShapeRatio(shape, sgLayout))
+ sgShape = derivedShape.value();
+ else
+ return failure();
+
+ // delinearize Ids
+ auto maybeIds = delinearizeSubgroupId(builder, loc, linearId);
+ if (failed(maybeIds))
+ return failure();
+
+ // The effective sgIds for offsets computing correspond
+ // to the dims that are not sliced.
+ ArrayRef<int64_t> dims = flatten().getDims().asArrayRef();
+ SmallVector<Value> sgIds =
+ XeGPUDialect::slice(ArrayRef<Value>(*maybeIds), dims);
+
+ return genOffsetsComputingInsts(builder, loc, sgIds, sgLayout, sgShape,
+ shape);
+}
+
//===----------------------------------------------------------------------===//
// XeGPU_RangeAttr
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
index 33450f3..2cd086f 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
@@ -265,8 +265,8 @@ void CreateNdDescOp::build(OpBuilder &builder, OperationState &state,
}
LogicalResult CreateNdDescOp::verify() {
- auto rank = (int64_t)getMixedOffsets().size();
- bool invalidRank = false;
+ size_t rank = getMixedSizes().size();
+ bool invalidRank = rank != getMixedStrides().size();
bool invalidElemTy = false;
// Memory space of created TensorDesc should match with the source.
@@ -280,31 +280,28 @@ LogicalResult CreateNdDescOp::verify() {
<< " Source: " << srcMemorySpace
<< ", TensorDesc: " << tdescMemorySpace;
+ if (size_t offsetRank = getMixedOffsets().size())
+ invalidRank |= (offsetRank != rank);
+
// check source type matches the rank if it is a memref.
// It also should have the same ElementType as TensorDesc.
- auto memrefTy = dyn_cast<MemRefType>(getSourceType());
- if (memrefTy) {
- invalidRank |= (memrefTy.getRank() != rank);
+ if (auto memrefTy = dyn_cast<MemRefType>(getSourceType()))
invalidElemTy |= memrefTy.getElementType() != getElementType();
- }
if (llvm::isa<IntegerType>(getSourceType())) {
// strides and shape must present for integer source.
if (getMixedStrides().empty() || getMixedSizes().empty())
- return emitOpError("Expecting strides and shape to be present for "
+ return emitOpError("expecting strides and shape to be present for "
"integer source.");
}
- // mismatches among shape, strides, and offsets are
- // already handeled by OffsetSizeAndStrideOpInterface.
- // So they are not check here.
if (invalidRank)
return emitOpError(
"Expecting the rank of shape, strides, offsets, and source (if source "
"is a memref) should match with each other.");
// check result TensorDesc rank
- if (getType().getRank() > rank)
+ if (getType().getRank() > (int64_t)rank)
return emitOpError(
"Expecting the TensorDesc rank is not greater than the "
"ranks of shape, strides, offsets or the memref source.");
@@ -931,6 +928,9 @@ void ConvertLayoutOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
} // namespace xegpu
} // namespace mlir
+namespace mlir {
+#include <mlir/Dialect/XeGPU/IR/XeGPUAttrInterface.cpp.inc>
+} // namespace mlir
#include <mlir/Dialect/XeGPU/IR/XeGPUEnums.cpp.inc>
#define GET_OP_CLASSES
#include <mlir/Dialect/XeGPU/IR/XeGPU.cpp.inc>
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
index 850f70c..4a5525c 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
@@ -125,39 +125,6 @@ getSgShapeAndCount(ArrayRef<int64_t> shape, xegpu::LayoutAttr layout) {
struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
using OpConversionPattern<xegpu::CreateNdDescOp>::OpConversionPattern;
- // Calculate offset for each subgroup
- static SmallVector<OpFoldResult>
- calculateGlobalOffsets(ConversionPatternRewriter &rewriter, Location loc,
- const SmallVector<OpFoldResult> &originalOffsets,
- const SmallVector<Value> &localOffset,
- const SmallVector<int64_t> &distUnitBaseAddr,
- const SmallVector<int64_t> &distUnitShape) {
- assert(localOffset.size() == distUnitBaseAddr.size() &&
- "localOffset and distUnitBaseAddr must have the same rank");
-
- SmallVector<OpFoldResult> globalOffsets(originalOffsets.begin(),
- originalOffsets.end());
- size_t rank = localOffset.size();
- for (size_t i = 0; i < rank; ++i) {
- size_t dimIdx = originalOffsets.size() - rank + i;
- Value constOffset =
- arith::ConstantIndexOp::create(rewriter, loc, distUnitBaseAddr[i]);
- Value offset =
- rewriter.createOrFold<index::AddOp>(loc, localOffset[i], constOffset);
- Value modValue =
- arith::ConstantIndexOp::create(rewriter, loc, distUnitShape[i]);
- Value offsetMod =
- rewriter.createOrFold<index::RemUOp>(loc, offset, modValue);
- Value origOffset = getValueOrCreateConstantIndexOp(
- rewriter, loc, originalOffsets[dimIdx]);
- Value globalOffset =
- rewriter.createOrFold<index::AddOp>(loc, origOffset, offsetMod);
- globalOffsets[dimIdx] = globalOffset;
- }
-
- return globalOffsets;
- }
-
LogicalResult
matchAndRewrite(xegpu::CreateNdDescOp op, OneToNOpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
@@ -177,74 +144,56 @@ struct WgToSgCreateNdOp : public OpConversionPattern<xegpu::CreateNdDescOp> {
return rewriter.notifyMatchFailure(
op, "sgLayout attribute is required in layout");
- SmallVector<int64_t> sgShape = getSgShapeAndCount(wgShape, layout).first;
-
- // TODO : Handle order attribute
// Get the subgroup ID
- auto linearSgId =
+ Value linearSgId =
gpu::SubgroupIdOp::create(rewriter, loc, /*upper_bound=*/nullptr);
- // Create constants for layout dimensions
- SmallVector<Value> sgLayoutDim(sgLayout.size());
- SmallVector<Value> sgDataDim(sgShape.size());
-
- for (size_t i = 0; i < sgLayout.size(); i++) {
- sgLayoutDim[i] =
- arith::ConstantIndexOp::create(rewriter, loc, sgLayout[i]);
- sgDataDim[i] = arith::ConstantIndexOp::create(rewriter, loc, sgShape[i]);
- }
-
int64_t startOfRange = -1, endOfRange = -1;
bool sgIdRangeSpecified =
isSgIdRangeSpecified(op, startOfRange, endOfRange);
- Value adjustedSgId = linearSgId;
if (sgIdRangeSpecified) {
int64_t sgCount = endOfRange - startOfRange;
if (computeProduct(sgLayout) != sgCount)
return rewriter.notifyMatchFailure(
op, "sg_layout size must match the sg_id_range");
- // Subtract startOfRange from the original subgroup id to get the adjusted
- // sg id
+ // Subtract startOfRange from the original subgroup id to get
+ // the adjusted sg id
Value startOfRangeVal =
- arith::ConstantIndexOp::create(rewriter, loc, startOfRange);
- adjustedSgId =
+ rewriter.create<arith::ConstantIndexOp>(loc, startOfRange);
+ linearSgId =
rewriter.createOrFold<index::SubOp>(loc, linearSgId, startOfRangeVal);
}
- auto deLinearizeSgId =
- affine::delinearizeIndex(rewriter, loc, adjustedSgId, sgLayoutDim);
- if (failed(deLinearizeSgId))
+ auto maybeTdescOffsets =
+ layout.getOffsets(rewriter, loc, linearSgId, wgShape);
+ if (failed(maybeTdescOffsets))
return failure();
- SmallVector<Value> sgIds = *deLinearizeSgId;
-
- // Calculate distribution unit shape and local offsets for subgroup
- SmallVector<int64_t> distUnitShape(sgLayout.size());
- SmallVector<Value> localOffset(sgLayout.size());
- for (size_t i = 0; i < sgLayout.size(); i++) {
- distUnitShape[i] = std::min(sgLayout[i] * sgShape[i], wgShape[i]);
- localOffset[i] =
- rewriter.createOrFold<index::MulOp>(loc, sgIds[i], sgDataDim[i]);
- }
-
- SmallVector<OpFoldResult> originalOffsets = op.getMixedOffsets();
+ SmallVector<int64_t> sgShape = getSgShapeAndCount(wgShape, layout).first;
xegpu::TensorDescType newTdescTy =
xegpu::TensorDescType::get(ctx, sgShape, elemTy, tdescTy.getEncoding(),
layout.dropSgLayoutAndData());
+
SmallVector<Value> newCreateNdOps;
- for (SmallVector<int64_t> distUnitBaseAddr :
- StaticTileOffsetRange(wgShape, distUnitShape)) {
- SmallVector<OpFoldResult> globalOffsets =
- calculateGlobalOffsets(rewriter, loc, originalOffsets, localOffset,
- distUnitBaseAddr, distUnitShape);
-
- auto newCreateNdOp = xegpu::CreateNdDescOp::create(
- rewriter, loc, newTdescTy, op.getSource(), globalOffsets,
+ SmallVector<OpFoldResult> wgOffsets = op.getMixedOffsets();
+
+ for (auto tdescOffsets : *maybeTdescOffsets) {
+ SmallVector<OpFoldResult> sgOffsets;
+ size_t rank = tdescOffsets.size();
+ for (size_t i = 0; i < rank; i++) {
+ size_t idx = wgOffsets.size() - rank + i;
+ Value add = rewriter.createOrFold<index::AddOp>(
+ loc, tdescOffsets[i],
+ getValueOrCreateConstantIndexOp(rewriter, loc, wgOffsets[idx]));
+ sgOffsets.push_back(add);
+ }
+
+ auto newOp = xegpu::CreateNdDescOp::create(
+ rewriter, loc, newTdescTy, op.getSource(), sgOffsets,
op.getMixedSizes(), op.getMixedStrides());
- newCreateNdOps.push_back(newCreateNdOp);
+ newCreateNdOps.push_back(newOp);
}
-
rewriter.replaceOpWithMultiple(op, {newCreateNdOps});
return success();
}
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 762cc88..2cdd502 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -3205,6 +3205,23 @@ llvm::AtomicRMWInst::BinOp convertBinOpToAtomic(Operation &op) {
.Default(llvm::AtomicRMWInst::BinOp::BAD_BINOP);
}
+void extractAtomicControlFlags(omp::AtomicUpdateOp atomicUpdateOp,
+ bool &isIgnoreDenormalMode,
+ bool &isFineGrainedMemory,
+ bool &isRemoteMemory) {
+ isIgnoreDenormalMode = false;
+ isFineGrainedMemory = false;
+ isRemoteMemory = false;
+ if (atomicUpdateOp &&
+ atomicUpdateOp->hasAttr(atomicUpdateOp.getAtomicControlAttrName())) {
+ mlir::omp::AtomicControlAttr atomicControlAttr =
+ atomicUpdateOp.getAtomicControlAttr();
+ isIgnoreDenormalMode = atomicControlAttr.getIgnoreDenormalMode();
+ isFineGrainedMemory = atomicControlAttr.getFineGrainedMemory();
+ isRemoteMemory = atomicControlAttr.getRemoteMemory();
+ }
+}
+
/// Converts an OpenMP atomic update operation using OpenMPIRBuilder.
static LogicalResult
convertOmpAtomicUpdate(omp::AtomicUpdateOp &opInst,
@@ -3269,13 +3286,19 @@ convertOmpAtomicUpdate(omp::AtomicUpdateOp &opInst,
return moduleTranslation.lookupValue(yieldop.getResults()[0]);
};
+ bool isIgnoreDenormalMode;
+ bool isFineGrainedMemory;
+ bool isRemoteMemory;
+ extractAtomicControlFlags(opInst, isIgnoreDenormalMode, isFineGrainedMemory,
+ isRemoteMemory);
// Handle ambiguous alloca, if any.
auto allocaIP = findAllocaInsertPoint(builder, moduleTranslation);
llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder);
llvm::OpenMPIRBuilder::InsertPointOrErrorTy afterIP =
ompBuilder->createAtomicUpdate(ompLoc, allocaIP, llvmAtomicX, llvmExpr,
atomicOrdering, binop, updateFn,
- isXBinopExpr);
+ isXBinopExpr, isIgnoreDenormalMode,
+ isFineGrainedMemory, isRemoteMemory);
if (failed(handleError(afterIP, *opInst)))
return failure();
@@ -3364,13 +3387,19 @@ convertOmpAtomicCapture(omp::AtomicCaptureOp atomicCaptureOp,
return moduleTranslation.lookupValue(yieldop.getResults()[0]);
};
+ bool isIgnoreDenormalMode;
+ bool isFineGrainedMemory;
+ bool isRemoteMemory;
+ extractAtomicControlFlags(atomicUpdateOp, isIgnoreDenormalMode,
+ isFineGrainedMemory, isRemoteMemory);
// Handle ambiguous alloca, if any.
auto allocaIP = findAllocaInsertPoint(builder, moduleTranslation);
llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder);
llvm::OpenMPIRBuilder::InsertPointOrErrorTy afterIP =
ompBuilder->createAtomicCapture(
ompLoc, allocaIP, llvmAtomicX, llvmAtomicV, llvmExpr, atomicOrdering,
- binop, updateFn, atomicUpdateOp, isPostfixUpdate, isXBinopExpr);
+ binop, updateFn, atomicUpdateOp, isPostfixUpdate, isXBinopExpr,
+ isIgnoreDenormalMode, isFineGrainedMemory, isRemoteMemory);
if (failed(handleError(afterIP, *atomicCaptureOp)))
return failure();
diff --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
index c967e86..d8c54ec 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
+++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
@@ -1560,7 +1560,19 @@ spirv::Deserializer::processConstantComposite(ArrayRef<uint32_t> operands) {
}
auto resultID = operands[1];
- if (auto shapedType = dyn_cast<ShapedType>(resultType)) {
+ if (auto tensorType = dyn_cast<TensorArmType>(resultType)) {
+ SmallVector<Attribute> flattenedElems;
+ for (Attribute element : elements) {
+ if (auto denseElemAttr = dyn_cast<DenseElementsAttr>(element)) {
+ for (auto value : denseElemAttr.getValues<Attribute>())
+ flattenedElems.push_back(value);
+ } else {
+ flattenedElems.push_back(element);
+ }
+ }
+ auto attr = DenseElementsAttr::get(tensorType, flattenedElems);
+ constantMap.try_emplace(resultID, attr, tensorType);
+ } else if (auto shapedType = dyn_cast<ShapedType>(resultType)) {
auto attr = DenseElementsAttr::get(shapedType, elements);
// For normal constants, we just record the attribute (and its type) for
// later materialization at use sites.
diff --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
index c049574..7c007de 100644
--- a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
@@ -956,6 +956,11 @@ Serializer::prepareDenseElementsConstant(Location loc, Type constType,
uint32_t resultID = getNextID();
SmallVector<uint32_t, 4> operands = {typeID, resultID};
auto elementType = cast<spirv::CompositeType>(constType).getElementType(0);
+ if (auto tensorArmType = dyn_cast<spirv::TensorArmType>(constType)) {
+ ArrayRef<int64_t> innerShape = tensorArmType.getShape().drop_front();
+ if (!innerShape.empty())
+ elementType = spirv::TensorArmType::get(innerShape, elementType);
+ }
// "If the Result Type is a cooperative matrix type, then there must be only
// one Constituent, with scalar type matching the cooperative matrix Component
@@ -979,30 +984,10 @@ Serializer::prepareDenseElementsConstant(Location loc, Type constType,
} else {
return 0;
}
- } else if (isa<spirv::TensorArmType>(constType)) {
- if (isZeroValue(valueAttr)) {
- encodeInstructionInto(typesGlobalValues, spirv::Opcode::OpConstantNull,
- {typeID, resultID});
- return resultID;
- }
- numberOfConstituents = shapedType.getNumElements();
- operands.reserve(numberOfConstituents + 2);
- for (int i = 0; i < numberOfConstituents; ++i) {
- uint32_t elementID = 0;
- if (auto attr = dyn_cast<DenseIntElementsAttr>(valueAttr)) {
- elementID =
- elementType.isInteger(1)
- ? prepareConstantBool(loc, attr.getValues<BoolAttr>()[i])
- : prepareConstantInt(loc, attr.getValues<IntegerAttr>()[i]);
- }
- if (auto attr = dyn_cast<DenseFPElementsAttr>(valueAttr)) {
- elementID = prepareConstantFp(loc, attr.getValues<FloatAttr>()[i]);
- }
- if (!elementID) {
- return 0;
- }
- operands.push_back(elementID);
- }
+ } else if (isa<spirv::TensorArmType>(constType) && isZeroValue(valueAttr)) {
+ encodeInstructionInto(typesGlobalValues, spirv::Opcode::OpConstantNull,
+ {typeID, resultID});
+ return resultID;
} else {
operands.reserve(numberOfConstituents + 2);
for (int i = 0; i < numberOfConstituents; ++i) {
diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index f23c619..2470f2b 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -121,17 +121,8 @@ struct ConversionValueMapping {
/// false positives.
bool isMappedTo(Value value) const { return mappedTo.contains(value); }
- /// Lookup the most recently mapped values with the desired types in the
- /// mapping.
- ///
- /// Special cases:
- /// - If the desired type range is empty, simply return the most recently
- /// mapped values.
- /// - If there is no mapping to the desired types, also return the most
- /// recently mapped values.
- /// - If there is no mapping for the given values at all, return the given
- /// value.
- ValueVector lookupOrDefault(Value from, TypeRange desiredTypes = {}) const;
+ /// Lookup a value in the mapping.
+ ValueVector lookup(const ValueVector &from) const;
template <typename T>
struct IsValueVector : std::is_same<std::decay_t<T>, ValueVector> {};
@@ -185,54 +176,31 @@ private:
};
} // namespace
-ValueVector
-ConversionValueMapping::lookupOrDefault(Value from,
- TypeRange desiredTypes) const {
- // Try to find the deepest values that have the desired types. If there is no
- // such mapping, simply return the deepest values.
- ValueVector desiredValue;
- ValueVector current{from};
- do {
- // Store the current value if the types match.
- if (TypeRange(ValueRange(current)) == desiredTypes)
- desiredValue = current;
-
- // If possible, Replace each value with (one or multiple) mapped values.
- ValueVector next;
- for (Value v : current) {
- auto it = mapping.find({v});
- if (it != mapping.end()) {
- llvm::append_range(next, it->second);
- } else {
- next.push_back(v);
- }
- }
- if (next != current) {
- // If at least one value was replaced, continue the lookup from there.
- current = std::move(next);
- continue;
- }
-
- // Otherwise: Check if there is a mapping for the entire vector. Such
- // mappings are materializations. (N:M mapping are not supported for value
- // replacements.)
- //
- // Note: From a correctness point of view, materializations do not have to
- // be stored (and looked up) in the mapping. But for performance reasons,
- // we choose to reuse existing IR (when possible) instead of creating it
- // multiple times.
- auto it = mapping.find(current);
- if (it == mapping.end()) {
- // No mapping found: The lookup stops here.
- break;
- }
- current = it->second;
- } while (true);
+/// Marker attribute for pure type conversions. I.e., mappings whose only
+/// purpose is to resolve a type mismatch. (In contrast, mappings that point to
+/// the replacement values of a "replaceOp" call, etc., are not pure type
+/// conversions.)
+static const StringRef kPureTypeConversionMarker = "__pure_type_conversion__";
+
+/// A vector of values is a pure type conversion if all values are defined by
+/// the same operation and the operation has the `kPureTypeConversionMarker`
+/// attribute.
+static bool isPureTypeConversion(const ValueVector &values) {
+ assert(!values.empty() && "expected non-empty value vector");
+ Operation *op = values.front().getDefiningOp();
+ for (Value v : llvm::drop_begin(values))
+ if (v.getDefiningOp() != op)
+ return false;
+ return op && op->hasAttr(kPureTypeConversionMarker);
+}
- // If the desired values were found use them, otherwise default to the leaf
- // values.
- // Note: If `desiredTypes` is empty, this function always returns `current`.
- return !desiredValue.empty() ? std::move(desiredValue) : std::move(current);
+ValueVector ConversionValueMapping::lookup(const ValueVector &from) const {
+ auto it = mapping.find(from);
+ if (it == mapping.end()) {
+ // No mapping found: The lookup stops here.
+ return {};
+ }
+ return it->second;
}
//===----------------------------------------------------------------------===//
@@ -930,7 +898,11 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
/// recently mapped values.
/// - If there is no mapping for the given values at all, return the given
/// value.
- ValueVector lookupOrDefault(Value from, TypeRange desiredTypes = {}) const;
+ ///
+ /// If `skipPureTypeConversions` is "true", materializations that are pure
+ /// type conversions are not considered.
+ ValueVector lookupOrDefault(Value from, TypeRange desiredTypes = {},
+ bool skipPureTypeConversions = false) const;
/// Lookup the given value within the map, or return an empty vector if the
/// value is not mapped. If it is mapped, this follows the same behavior
@@ -993,11 +965,19 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
/// If `valuesToMap` is set to a non-null Value, then that value is mapped to
/// the results of the unresolved materialization in the conversion value
/// mapping.
+ ///
+ /// If `isPureTypeConversion` is "true", the materialization is created only
+ /// to resolve a type mismatch. That means it is not a regular value
+ /// replacement issued by the user. (Replacement values that are created
+ /// "out of thin air" appear like unresolved materializations because they are
+ /// unrealized_conversion_cast ops. However, they must be treated like
+ /// regular value replacements.)
ValueRange buildUnresolvedMaterialization(
MaterializationKind kind, OpBuilder::InsertPoint ip, Location loc,
ValueVector valuesToMap, ValueRange inputs, TypeRange outputTypes,
Type originalType, const TypeConverter *converter,
- UnrealizedConversionCastOp *castOp = nullptr);
+ UnrealizedConversionCastOp *castOp = nullptr,
+ bool isPureTypeConversion = true);
/// Find a replacement value for the given SSA value in the conversion value
/// mapping. The replacement value must have the same type as the given SSA
@@ -1264,10 +1244,77 @@ void ConversionPatternRewriterImpl::applyRewrites() {
// State Management
//===----------------------------------------------------------------------===//
-ValueVector
-ConversionPatternRewriterImpl::lookupOrDefault(Value from,
- TypeRange desiredTypes) const {
- return mapping.lookupOrDefault(from, desiredTypes);
+ValueVector ConversionPatternRewriterImpl::lookupOrDefault(
+ Value from, TypeRange desiredTypes, bool skipPureTypeConversions) const {
+ // Helper function that looks up each value in `values` individually and then
+ // composes the results. If that fails, it tries to look up the entire vector
+ // at once.
+ auto composedLookup = [&](const ValueVector &values) -> ValueVector {
+ // If possible, replace each value with (one or multiple) mapped values.
+ ValueVector next;
+ for (Value v : values) {
+ ValueVector r = mapping.lookup({v});
+ if (!r.empty()) {
+ llvm::append_range(next, r);
+ } else {
+ next.push_back(v);
+ }
+ }
+ if (next != values) {
+ // At least one value was replaced.
+ return next;
+ }
+
+ // Otherwise: Check if there is a mapping for the entire vector. Such
+ // mappings are materializations. (N:M mapping are not supported for value
+ // replacements.)
+ //
+ // Note: From a correctness point of view, materializations do not have to
+ // be stored (and looked up) in the mapping. But for performance reasons,
+ // we choose to reuse existing IR (when possible) instead of creating it
+ // multiple times.
+ ValueVector r = mapping.lookup(values);
+ if (r.empty()) {
+ // No mapping found: The lookup stops here.
+ return {};
+ }
+ return r;
+ };
+
+ // Try to find the deepest values that have the desired types. If there is no
+ // such mapping, simply return the deepest values.
+ ValueVector desiredValue;
+ ValueVector current{from};
+ ValueVector lastNonMaterialization{from};
+ do {
+ // Store the current value if the types match.
+ bool match = TypeRange(ValueRange(current)) == desiredTypes;
+ if (skipPureTypeConversions) {
+ // Skip pure type conversions, if requested.
+ bool pureConversion = isPureTypeConversion(current);
+ match &= !pureConversion;
+ // Keep track of the last mapped value that was not a pure type
+ // conversion.
+ if (!pureConversion)
+ lastNonMaterialization = current;
+ }
+ if (match)
+ desiredValue = current;
+
+ // Lookup next value in the mapping.
+ ValueVector next = composedLookup(current);
+ if (next.empty())
+ break;
+ current = std::move(next);
+ } while (true);
+
+ // If the desired values were found use them, otherwise default to the leaf
+ // values. (Skip pure type conversions, if requested.)
+ if (!desiredTypes.empty())
+ return desiredValue;
+ if (skipPureTypeConversions)
+ return lastNonMaterialization;
+ return current;
}
ValueVector
@@ -1324,10 +1371,13 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
Location operandLoc = inputLoc ? *inputLoc : operand.getLoc();
if (!currentTypeConverter) {
- // The current pattern does not have a type converter. I.e., it does not
- // distinguish between legal and illegal types. For each operand, simply
- // pass through the most recently mapped values.
- remapped.push_back(lookupOrDefault(operand));
+ // The current pattern does not have a type converter. Pass the most
+ // recently mapped values, excluding materializations. Materializations
+ // are intentionally excluded because their presence may depend on other
+ // patterns. Including materializations would make the lookup fragile
+ // and unpredictable.
+ remapped.push_back(lookupOrDefault(operand, /*desiredTypes=*/{},
+ /*skipPureTypeConversions=*/true));
continue;
}
@@ -1356,7 +1406,8 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
}
// Create a materialization for the most recently mapped values.
- repl = lookupOrDefault(operand);
+ repl = lookupOrDefault(operand, /*desiredTypes=*/{},
+ /*skipPureTypeConversions=*/true);
ValueRange castValues = buildUnresolvedMaterialization(
MaterializationKind::Target, computeInsertPoint(repl), operandLoc,
/*valuesToMap=*/repl, /*inputs=*/repl, /*outputTypes=*/legalTypes,
@@ -1482,7 +1533,8 @@ Block *ConversionPatternRewriterImpl::applySignatureConversion(
OpBuilder::InsertPoint(newBlock, newBlock->begin()),
origArg.getLoc(),
/*valuesToMap=*/{}, /*inputs=*/ValueRange(),
- /*outputTypes=*/origArgType, /*originalType=*/Type(), converter)
+ /*outputTypes=*/origArgType, /*originalType=*/Type(), converter,
+ /*castOp=*/nullptr, /*isPureTypeConversion=*/false)
.front();
replaceUsesOfBlockArgument(origArg, mat, converter);
continue;
@@ -1523,7 +1575,7 @@ ValueRange ConversionPatternRewriterImpl::buildUnresolvedMaterialization(
MaterializationKind kind, OpBuilder::InsertPoint ip, Location loc,
ValueVector valuesToMap, ValueRange inputs, TypeRange outputTypes,
Type originalType, const TypeConverter *converter,
- UnrealizedConversionCastOp *castOp) {
+ UnrealizedConversionCastOp *castOp, bool isPureTypeConversion) {
assert((!originalType || kind == MaterializationKind::Target) &&
"original type is valid only for target materializations");
assert(TypeRange(inputs) != outputTypes &&
@@ -1535,6 +1587,8 @@ ValueRange ConversionPatternRewriterImpl::buildUnresolvedMaterialization(
builder.setInsertionPoint(ip.getBlock(), ip.getPoint());
auto convertOp =
UnrealizedConversionCastOp::create(builder, loc, outputTypes, inputs);
+ if (isPureTypeConversion)
+ convertOp->setAttr(kPureTypeConversionMarker, builder.getUnitAttr());
if (!valuesToMap.empty())
mapping.map(valuesToMap, convertOp.getResults());
if (castOp)
@@ -1650,7 +1704,8 @@ void ConversionPatternRewriterImpl::replaceOp(
MaterializationKind::Source, computeInsertPoint(result),
result.getLoc(), /*valuesToMap=*/{result}, /*inputs=*/ValueRange(),
/*outputTypes=*/result.getType(), /*originalType=*/Type(),
- currentTypeConverter);
+ currentTypeConverter, /*castOp=*/nullptr,
+ /*isPureTypeConversion=*/false);
continue;
}
@@ -1754,6 +1809,10 @@ ConversionPatternRewriter::ConversionPatternRewriter(
ConversionPatternRewriter::~ConversionPatternRewriter() = default;
+const ConversionConfig &ConversionPatternRewriter::getConfig() const {
+ return impl->config;
+}
+
void ConversionPatternRewriter::replaceOp(Operation *op, Operation *newOp) {
assert(op && newOp && "expected non-null op");
replaceOp(op, newOp->getResults());
@@ -1895,7 +1954,7 @@ void ConversionPatternRewriter::inlineBlockBefore(Block *source, Block *dest,
// ops should be moved one-by-one ("slow path"), so that a separate
// `MoveOperationRewrite` is enqueued for each moved op. Moving ops in bulk is
// a bit more efficient, so we try to do that when possible.
- bool fastPath = !impl->config.listener;
+ bool fastPath = !getConfig().listener;
if (fastPath)
impl->inlineBlockBefore(source, dest, before);
@@ -2018,8 +2077,7 @@ public:
using LegalizationAction = ConversionTarget::LegalizationAction;
OperationLegalizer(const ConversionTarget &targetInfo,
- const FrozenRewritePatternSet &patterns,
- const ConversionConfig &config);
+ const FrozenRewritePatternSet &patterns);
/// Returns true if the given operation is known to be illegal on the target.
bool isIllegal(Operation *op) const;
@@ -2116,16 +2174,12 @@ private:
/// The pattern applicator to use for conversions.
PatternApplicator applicator;
-
- /// Dialect conversion configuration.
- const ConversionConfig &config;
};
} // namespace
OperationLegalizer::OperationLegalizer(const ConversionTarget &targetInfo,
- const FrozenRewritePatternSet &patterns,
- const ConversionConfig &config)
- : target(targetInfo), applicator(patterns), config(config) {
+ const FrozenRewritePatternSet &patterns)
+ : target(targetInfo), applicator(patterns) {
// The set of patterns that can be applied to illegal operations to transform
// them into legal ones.
DenseMap<OperationName, LegalizationPatterns> legalizerPatterns;
@@ -2203,15 +2257,17 @@ OperationLegalizer::legalize(Operation *op,
return success();
}
- // If the operation isn't legal, try to fold it in-place.
- // TODO: Should we always try to do this, even if the op is
- // already legal?
- if (succeeded(legalizeWithFold(op, rewriter))) {
- LLVM_DEBUG({
- logSuccess(logger, "operation was folded");
- logger.startLine() << logLineComment;
- });
- return success();
+ // If the operation is not legal, try to fold it in-place if the folding mode
+ // is 'BeforePatterns'. 'Never' will skip this.
+ const ConversionConfig &config = rewriter.getConfig();
+ if (config.foldingMode == DialectConversionFoldingMode::BeforePatterns) {
+ if (succeeded(legalizeWithFold(op, rewriter))) {
+ LLVM_DEBUG({
+ logSuccess(logger, "operation was folded");
+ logger.startLine() << logLineComment;
+ });
+ return success();
+ }
}
// Otherwise, we need to apply a legalization pattern to this operation.
@@ -2223,6 +2279,18 @@ OperationLegalizer::legalize(Operation *op,
return success();
}
+ // If the operation can't be legalized via patterns, try to fold it in-place
+ // if the folding mode is 'AfterPatterns'.
+ if (config.foldingMode == DialectConversionFoldingMode::AfterPatterns) {
+ if (succeeded(legalizeWithFold(op, rewriter))) {
+ LLVM_DEBUG({
+ logSuccess(logger, "operation was folded");
+ logger.startLine() << logLineComment;
+ });
+ return success();
+ }
+ }
+
LLVM_DEBUG({
logFailure(logger, "no matched legalization pattern");
logger.startLine() << logLineComment;
@@ -2286,7 +2354,7 @@ OperationLegalizer::legalizeWithFold(Operation *op,
LLVM_DEBUG(logFailure(rewriterImpl.logger,
"failed to legalize generated constant '{0}'",
newOp->getName()));
- if (!config.allowPatternRollback) {
+ if (!rewriter.getConfig().allowPatternRollback) {
// Rolling back a folder is like rolling back a pattern.
llvm::report_fatal_error(
"op '" + opName +
@@ -2302,10 +2370,36 @@ OperationLegalizer::legalizeWithFold(Operation *op,
return success();
}
+/// Report a fatal error indicating that newly produced or modified IR could
+/// not be legalized.
+static void
+reportNewIrLegalizationFatalError(const Pattern &pattern,
+ const SetVector<Operation *> &newOps,
+ const SetVector<Operation *> &modifiedOps,
+ const SetVector<Block *> &insertedBlocks) {
+ auto newOpNames = llvm::map_range(
+ newOps, [](Operation *op) { return op->getName().getStringRef(); });
+ auto modifiedOpNames = llvm::map_range(
+ modifiedOps, [](Operation *op) { return op->getName().getStringRef(); });
+ StringRef detachedBlockStr = "(detached block)";
+ auto insertedBlockNames = llvm::map_range(insertedBlocks, [&](Block *block) {
+ if (block->getParentOp())
+ return block->getParentOp()->getName().getStringRef();
+ return detachedBlockStr;
+ });
+ llvm::report_fatal_error(
+ "pattern '" + pattern.getDebugName() +
+ "' produced IR that could not be legalized. " + "new ops: {" +
+ llvm::join(newOpNames, ", ") + "}, " + "modified ops: {" +
+ llvm::join(modifiedOpNames, ", ") + "}, " + "inserted block into ops: {" +
+ llvm::join(insertedBlockNames, ", ") + "}");
+}
+
LogicalResult
OperationLegalizer::legalizeWithPattern(Operation *op,
ConversionPatternRewriter &rewriter) {
auto &rewriterImpl = rewriter.getImpl();
+ const ConversionConfig &config = rewriter.getConfig();
#if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
Operation *checkOp;
@@ -2389,8 +2483,8 @@ OperationLegalizer::legalizeWithPattern(Operation *op,
appliedPatterns.erase(&pattern);
if (failed(result)) {
if (!rewriterImpl.config.allowPatternRollback)
- llvm::report_fatal_error("pattern '" + pattern.getDebugName() +
- "' produced IR that could not be legalized");
+ reportNewIrLegalizationFatalError(pattern, newOps, modifiedOps,
+ insertedBlocks);
rewriterImpl.resetState(curState, pattern.getDebugName());
}
if (config.listener)
@@ -2749,8 +2843,7 @@ struct OperationConverter {
const FrozenRewritePatternSet &patterns,
const ConversionConfig &config,
OpConversionMode mode)
- : config(config), opLegalizer(target, patterns, this->config),
- mode(mode) {}
+ : config(config), opLegalizer(target, patterns), mode(mode) {}
/// Converts the given operations to the conversion target.
LogicalResult convertOperations(ArrayRef<Operation *> ops);
@@ -2902,6 +2995,10 @@ LogicalResult OperationConverter::convertOperations(ArrayRef<Operation *> ops) {
SmallVector<UnrealizedConversionCastOp> remainingCastOps;
reconcileUnrealizedCasts(allCastOps, &remainingCastOps);
+ // Drop markers.
+ for (UnrealizedConversionCastOp castOp : remainingCastOps)
+ castOp->removeAttr(kPureTypeConversionMarker);
+
// Try to legalize all unresolved materializations.
if (config.buildMaterializations) {
IRRewriter rewriter(rewriterImpl.context, config.listener);
diff --git a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
index 1b359da..fd4a5a8 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
@@ -374,42 +374,6 @@ def quantized_matmul(
@linalg_structured_op
-def matmul_transpose_a(
- A=TensorDef(T1, S.K, S.N),
- B=TensorDef(T2, S.K, S.M),
- C=TensorDef(U, S.M, S.N, output=True),
- cast=TypeFnAttrDef(default=TypeFn.cast_signed),
-):
- """Performs a matrix multiplication of two 2D inputs with lhs operand
- transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- """
- domain(D.m, D.n, D.k)
- implements(ContractionOpInterface)
- C[D.m, D.n] += cast(U, A[D.k, D.m]) * cast(U, B[D.k, D.n])
-
-
-@linalg_structured_op
-def matmul_transpose_b(
- A=TensorDef(T1, S.M, S.K),
- B=TensorDef(T2, S.N, S.K),
- C=TensorDef(U, S.M, S.N, output=True),
- cast=TypeFnAttrDef(default=TypeFn.cast_signed),
-):
- """Performs a matrix multiplication of two 2D inputs with rhs operand
- transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- """
- domain(D.m, D.n, D.k)
- implements(ContractionOpInterface)
- C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.n, D.k])
-
-
-@linalg_structured_op
def mmt4d(
lhs=TensorDef(TV.LhsType, S.M, S.K, S.M0, S.K0),
rhs=TensorDef(TV.RhsType, S.N, S.K, S.N0, S.K0),
@@ -454,44 +418,6 @@ def batch_mmt4d(
@linalg_structured_op
-def batch_matmul_transpose_a(
- A=TensorDef(T1, Batch, S.K, S.M),
- B=TensorDef(T2, Batch, S.K, S.N),
- C=TensorDef(U, Batch, S.M, S.N, output=True),
-):
- """Performs a batched matrix multiplication of two 3D inputs where lhs operand
- has its non-batch dimensions transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- """
- domain(D.b, D.m, D.n, D.k)
- implements(ContractionOpInterface)
- C[D.b, D.m, D.n] += TypeFn.cast_signed(U, A[D.b, D.k, D.m]) * TypeFn.cast_signed(
- U, B[D.b, D.k, D.n]
- )
-
-
-@linalg_structured_op
-def batch_matmul_transpose_b(
- A=TensorDef(T1, Batch, S.M, S.K),
- B=TensorDef(T2, Batch, S.N, S.K),
- C=TensorDef(U, Batch, S.M, S.N, output=True),
-):
- """Performs a batched matrix multiplication of two 3D inputs where rhs operand
- has its non-batch dimensions transposed.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- """
- domain(D.b, D.m, D.n, D.k)
- implements(ContractionOpInterface)
- C[D.b, D.m, D.n] += TypeFn.cast_signed(U, A[D.b, D.m, D.k]) * TypeFn.cast_signed(
- U, B[D.b, D.n, D.k]
- )
-
-
-@linalg_structured_op
def quantized_batch_matmul(
A=TensorDef(T1, Batch, S.M, S.K),
B=TensorDef(T2, Batch, S.K, S.N),
@@ -513,25 +439,6 @@ def quantized_batch_matmul(
@linalg_structured_op
-def batch_reduce_matmul(
- A=TensorDef(T1, Batch, S.M, S.K),
- B=TensorDef(T2, Batch, S.K, S.N),
- C=TensorDef(U, S.M, S.N, output=True),
-):
- """Performs a batch-reduce matrix multiplication of two 3D inputs.
- The partial multiplication results are reduced into a 2D output.
-
- Numeric casting is performed on the operands to the inner multiply, promoting
- them to the same data type as the accumulator/output.
- """
- domain(D.b, D.m, D.n, D.k)
- implements(ContractionOpInterface)
- C[D.m, D.n] += TypeFn.cast_signed(U, A[D.b, D.m, D.k]) * TypeFn.cast_signed(
- U, B[D.b, D.k, D.n]
- )
-
-
-@linalg_structured_op
def matvec(
A=TensorDef(T1, S.M, S.N), y=TensorDef(T2, S.N), x=TensorDef(U, S.M, output=True)
):
diff --git a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
index 2a7be0b..e6321e9 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
@@ -85,6 +85,28 @@ func.func @load_i1(%src: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i :
return %0: i1
}
+// CHECK-LABEL: func @load_aligned
+// CHECK-SAME: (%[[SRC:.+]]: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %[[IDX:.+]]: index)
+func.func @load_aligned(%src: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i : index) -> i1 {
+ // CHECK: spirv.Load "StorageBuffer" {{.*}} ["Aligned", 32] : i8
+ %0 = memref.load %src[%i] { alignment = 32 } : memref<4xi1, #spirv.storage_class<StorageBuffer>>
+ return %0: i1
+}
+
+// CHECK-LABEL: func @load_aligned_nontemporal
+func.func @load_aligned_nontemporal(%src: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i : index) -> i1 {
+ // CHECK: spirv.Load "StorageBuffer" {{.*}} ["Aligned|Nontemporal", 32] : i8
+ %0 = memref.load %src[%i] { alignment = 32, nontemporal = true } : memref<4xi1, #spirv.storage_class<StorageBuffer>>
+ return %0: i1
+}
+
+// CHECK-LABEL: func @load_aligned_psb
+func.func @load_aligned_psb(%src: memref<4xi1, #spirv.storage_class<PhysicalStorageBuffer>>, %i : index) -> i1 {
+ // CHECK: %[[VAL:.+]] = spirv.Load "PhysicalStorageBuffer" {{.*}} ["Aligned", 32] : i8
+ %0 = memref.load %src[%i] { alignment = 32 } : memref<4xi1, #spirv.storage_class<PhysicalStorageBuffer>>
+ return %0: i1
+}
+
// CHECK-LABEL: func @store_i1
// CHECK-SAME: %[[DST:.+]]: memref<4xi1, #spirv.storage_class<StorageBuffer>>,
// CHECK-SAME: %[[IDX:.+]]: index
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 580b09d..e505767 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -681,3 +681,17 @@ llvm.func @ex2(%input : f32, %pred : i1) {
%1 = nvvm.inline_ptx "ex2.approx.ftz.f32 $0, $1;" (%input), predicate = %pred : f32, i1 -> f32
llvm.return
}
+
+// -----
+
+// CHECK-LABEL: @nvvm_pmevent
+llvm.func @nvvm_pmevent() {
+ // CHECK: %[[S0:.+]] = llvm.mlir.constant(10 : i32) : i32
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "pmevent $0;", "n" %[[S0]] : (i32) -> ()
+
+ nvvm.pmevent id = 10
+ // CHECK: %[[S1:.+]] = llvm.mlir.constant(4 : i32) : i32
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "pmevent $0;", "n" %[[S1]] : (i32) -> ()
+ nvvm.pmevent id = 4
+ llvm.return
+}
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
index 31e17fb..5a424a8 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
@@ -1679,6 +1679,16 @@ func.func @load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vec
// -----
+func.func @load_with_alignment(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+ %0 = vector.load %memref[%i, %j] { alignment = 8 } : memref<200x100xf32>, vector<8xf32>
+ return %0 : vector<8xf32>
+}
+
+// CHECK-LABEL: func @load_with_alignment
+// CHECK: llvm.load {{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.store
//===----------------------------------------------------------------------===//
@@ -1785,6 +1795,16 @@ func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) {
// -----
+func.func @store_with_alignment(%memref : memref<200x100xf32>, %i : index, %j : index, %val : vector<4xf32>) {
+ vector.store %val, %memref[%i, %j] {alignment = 8} : memref<200x100xf32>, vector<4xf32>
+ return
+}
+
+// CHECK-LABEL: func @store_with_alignment
+// CHECK: llvm.store %{{.*}} {alignment = 8 : i64} : vector<4xf32>, !llvm.ptr
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.maskedload
//===----------------------------------------------------------------------===//
@@ -1839,6 +1859,16 @@ func.func @masked_load_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16]
// -----
+func.func @masked_load_with_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) -> vector<16xf32> {
+ %0 = vector.maskedload %arg0[%arg3], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+ return %0 : vector<16xf32>
+}
+
+// CHECK-LABEL: func @masked_load_with_alignment
+// CHECK: llvm.intr.masked.load %{{.*}} {alignment = 2 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.maskedstore
//===----------------------------------------------------------------------===//
@@ -1891,6 +1921,16 @@ func.func @masked_store_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16
// -----
+func.func @masked_store_with_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) {
+ vector.maskedstore %arg0[%arg3], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
+ return
+}
+
+// CHECK-LABEL: func @masked_store_with_alignment
+// CHECK: llvm.intr.masked.store %{{.*}} {alignment = 2 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.gather
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/AMDGPU/amdgpu-fold-memrefs.mlir b/mlir/test/Dialect/AMDGPU/amdgpu-fold-memrefs.mlir
index 57afa12..8ca3dd6 100644
--- a/mlir/test/Dialect/AMDGPU/amdgpu-fold-memrefs.mlir
+++ b/mlir/test/Dialect/AMDGPU/amdgpu-fold-memrefs.mlir
@@ -54,18 +54,20 @@ func.func @subview_folding_offset(%offset_i: index, %offset_j: index) {
// CHECK: func @test_expand_shape
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
func.func @test_expand_shape(%offset_i: index, %offset_j: index) {
- // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<64x64xf16, 3>
+ // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<4096xf16, 3>
// CHECK: %[[MEM:.*]] = memref.alloc() : memref<8192xf16>
// CHECK: %[[C0:.*]] = arith.constant 0 : index
- // CHECK: %[[IDX:.*]] = affine.linearize_index [%[[ARG0]], %[[ARG1]]] by (64, 128) : index
- // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[IDX]]], %[[LOCAL]][%[[C0]], %[[C0]]]
- // CHECK-SAME: vector<8xf16>, memref<8192xf16>, memref<64x64xf16, 3>
+ // CHECK: %[[IDXM:.*]] = affine.linearize_index [%[[ARG0]], %[[ARG1]]] by (64, 128) : index
+ // CHECK: %[[IDXL:.*]] = affine.linearize_index [%[[C0]], %[[C0]]] by (64, 64) : index
+ // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[IDXM]]], %[[LOCAL]][%[[IDXL]]]
+ // CHECK-SAME: vector<8xf16>, memref<8192xf16>, memref<4096xf16, 3>
- %alloc = memref.alloc() : memref<64x64xf16, #gpu_lds_addrspace>
+ %alloc = memref.alloc() : memref<4096xf16, #gpu_lds_addrspace>
%mem = memref.alloc() : memref<8192xf16>
- %expand = memref.expand_shape %mem [[0, 1]] output_shape [64, 128] : memref<8192xf16> into memref<64x128xf16>
+ %expand_mem = memref.expand_shape %mem [[0, 1]] output_shape [64, 128] : memref<8192xf16> into memref<64x128xf16>
+ %expand_alloc = memref.expand_shape %alloc [[0, 1]] output_shape [64, 64] : memref<4096xf16, #gpu_lds_addrspace> into memref<64x64xf16, #gpu_lds_addrspace>
%c0 = arith.constant 0 : index
- amdgpu.gather_to_lds %expand[%offset_i, %offset_j], %alloc[%c0, %c0]
+ amdgpu.gather_to_lds %expand_mem[%offset_i, %offset_j], %expand_alloc[%c0, %c0]
: vector<8xf16>, memref<64x128xf16>, memref<64x64xf16, #gpu_lds_addrspace>
func.return
}
@@ -80,15 +82,82 @@ func.func @test_collapse_shape(%offset_i: index, %offset_j: index) {
// CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<64x64xf16, 3>
// CHECK: %[[MEM:.*]] = memref.alloc() : memref<64x128xf16>
// CHECK: %[[C0:.*]] = arith.constant 0 : index
- // CHECK: %[[INDICES:.*]]:2 = affine.delinearize_index %[[ARG0]] into (64, 128) : index, index
- // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[INDICES]]#0, %[[INDICES]]#1], %[[LOCAL]][%[[C0]], %[[C0]]]
+ // CHECK: %[[INDICES_MEM:.*]]:2 = affine.delinearize_index %[[ARG0]] into (64, 128) : index, index
+ // CHECK: %[[INDICES_LDS:.*]]:2 = affine.delinearize_index %[[ARG1]] into (64, 64) : index, index
+ // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[INDICES_MEM]]#0, %[[INDICES_MEM]]#1], %[[LOCAL]][%[[INDICES_LDS]]#0, %[[INDICES_LDS]]#1]
// CHECK-SAME: vector<8xf16>, memref<64x128xf16>, memref<64x64xf16, 3>
%alloc = memref.alloc() : memref<64x64xf16, #gpu_lds_addrspace>
+ %collapse_alloc = memref.collapse_shape %alloc [[0, 1]] : memref<64x64xf16, #gpu_lds_addrspace> into memref<4096xf16, #gpu_lds_addrspace>
%mem = memref.alloc() : memref<64x128xf16>
- %collapse = memref.collapse_shape %mem [[0, 1]] : memref<64x128xf16> into memref<8192xf16>
+ %collapse_mem = memref.collapse_shape %mem [[0, 1]] : memref<64x128xf16> into memref<8192xf16>
%c0 = arith.constant 0 : index
- amdgpu.gather_to_lds %collapse[%offset_i], %alloc[%c0, %c0]
+ amdgpu.gather_to_lds %collapse_mem[%offset_i], %collapse_alloc[%offset_j]
+ : vector<8xf16>, memref<8192xf16>, memref<4096xf16, #gpu_lds_addrspace>
+ func.return
+}
+
+
+// -----
+
+#gpu_lds_addrspace = 3
+
+
+// CHECK: func @test_expand_shape_src_raw_buffer
+// CHECK-SAME: %[[ARG0:.*]]: memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index
+func.func @test_expand_shape_src_raw_buffer(%mem : memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, %offset_i: index, %offset_j: index) {
+ // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<4096xf16, 3>
+ // CHECK: %[[C0:.*]] = arith.constant 0 : index
+ // CHECK: %[[IDXM:.*]] = affine.linearize_index [%[[ARG1]], %[[ARG2]]] by (64, 128) : index
+ // CHECK: amdgpu.gather_to_lds %[[ARG0]][%[[IDXM]]], %[[LOCAL]][%[[C0]]]
+ // CHECK-SAME: vector<8xf16>, memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, memref<4096xf16, 3>
+
+ %alloc = memref.alloc() : memref<4096xf16, #gpu_lds_addrspace>
+ %expand_mem = memref.expand_shape %mem [[0, 1]] output_shape [64, 128] : memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>> into memref<64x128xf16, #amdgpu.address_space<fat_raw_buffer>>
+
+ %c0 = arith.constant 0 : index
+ amdgpu.gather_to_lds %expand_mem[%offset_i, %offset_j], %alloc[%c0]
+ : vector<8xf16>, memref<64x128xf16, #amdgpu.address_space<fat_raw_buffer>>, memref<4096xf16, #gpu_lds_addrspace>
+ func.return
+}
+
+// -----
+
+#gpu_lds_addrspace = 3
+
+// CHECK: func @test_expand_shape_dst_only
+// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
+func.func @test_expand_shape_dst_only(%offset_i: index, %offset_j: index) {
+ // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<4096xf16, 3>
+ // CHECK: %[[MEM:.*]] = memref.alloc() : memref<8192xf16>
+ // CHECK: %[[C0:.*]] = arith.constant 0 : index
+ // CHECK: %[[IDX_LDS:.*]] = affine.linearize_index [%[[ARG1]], %[[C0]]] by (64, 64) : index
+ // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[ARG0]]], %[[LOCAL]][%[[IDX_LDS]]]
+ // CHECK-SAME: vector<8xf16>, memref<8192xf16>, memref<4096xf16, 3>
+
+ %alloc = memref.alloc() : memref<4096xf16, #gpu_lds_addrspace>
+ %mem = memref.alloc() : memref<8192xf16>
+ %expand_alloc = memref.expand_shape %alloc [[0, 1]] output_shape [64, 64] : memref<4096xf16, #gpu_lds_addrspace> into memref<64x64xf16, #gpu_lds_addrspace>
+
+ %c0 = arith.constant 0 : index
+ amdgpu.gather_to_lds %mem[%offset_i], %expand_alloc[%offset_j, %c0]
: vector<8xf16>, memref<8192xf16>, memref<64x64xf16, #gpu_lds_addrspace>
func.return
}
+
+// -----
+
+#gpu_lds_addrspace = 3
+
+// CHECK: func @test_nop
+// CHECK-SAME: %[[ARG0:.*]]: memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index
+func.func @test_nop(%mem : memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, %offset_i: index, %offset_j: index) {
+ // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<4096xf16, 3>
+ // CHECK: amdgpu.gather_to_lds %[[ARG0]][%[[ARG1]]], %[[LOCAL]][%[[ARG2]]]
+ // CHECK-SAME: vector<8xf16>, memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, memref<4096xf16, 3>
+
+ %alloc = memref.alloc() : memref<4096xf16, #gpu_lds_addrspace>
+ amdgpu.gather_to_lds %mem[%offset_i], %alloc[%offset_j]
+ : vector<8xf16>, memref<8192xf16, #amdgpu.address_space<fat_raw_buffer>>, memref<4096xf16, #gpu_lds_addrspace>
+ func.return
+}
diff --git a/mlir/test/Dialect/AMDGPU/invalid.mlir b/mlir/test/Dialect/AMDGPU/invalid.mlir
index 0d2fd24..66e7dd4 100644
--- a/mlir/test/Dialect/AMDGPU/invalid.mlir
+++ b/mlir/test/Dialect/AMDGPU/invalid.mlir
@@ -230,3 +230,11 @@ func.func @gather_to_lds_non_lds(%idx1 : index, %mem1 : memref<32xf16>, %mem2 :
amdgpu.gather_to_lds %mem1[%idx1], %mem2[%idx1] : vector<2xf16>, memref<32xf16>, memref<32xf16>
func.return
}
+
+// -----
+
+func.func @gather_to_lds_non_lds(%idx1 : index, %mem1 : memref<32xf16>, %mem2 : memref<32xf16, strided<[?]>, #gpu.address_space<workgroup>>) {
+ // expected-error@+1 {{'amdgpu.gather_to_lds' op destination type inner most dim must be contiguous}}
+ amdgpu.gather_to_lds %mem1[%idx1], %mem2[%idx1] : vector<2xf16>, memref<32xf16>, memref<32xf16, strided<[?]>, #gpu.address_space<workgroup>>
+ func.return
+}
diff --git a/mlir/test/Dialect/AMDGPU/ops.mlir b/mlir/test/Dialect/AMDGPU/ops.mlir
index fe78b53..87e11c0 100644
--- a/mlir/test/Dialect/AMDGPU/ops.mlir
+++ b/mlir/test/Dialect/AMDGPU/ops.mlir
@@ -539,13 +539,15 @@ func.func @transpose_load(%idx1 : index, %idx2 : index, %mem : memref<128x32xf16
}
// CHECK-LABEL: func @gather_to_lds
-func.func @gather_to_lds(%idx1 : index, %idx2 : index, %mem1 : memref<32xf16>, %mem2 : memref<32x32xf16>, %smem1 : memref<32xf16, #gpu.address_space<workgroup>>, %smem2 : memref<32x32xf16, #gpu.address_space<workgroup>>) {
+func.func @gather_to_lds(%idx1 : index, %idx2 : index, %mem1 : memref<32xf16>, %mem2 : memref<32x32xf16>, %smem1 : memref<32xf16, #gpu.address_space<workgroup>>, %smem2 : memref<32x32xf16, #gpu.address_space<workgroup>>, %smem3 : memref<?x?xf16, strided<[?, 1]>, #gpu.address_space<workgroup>>) {
// CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}]
// CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}}]
// CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}]
+ // CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}]
amdgpu.gather_to_lds %mem2[%idx1, %idx2], %smem2[%idx1, %idx2] : vector<2xf16>, memref<32x32xf16>, memref<32x32xf16, #gpu.address_space<workgroup>>
amdgpu.gather_to_lds %mem2[%idx1, %idx2], %smem1[%idx1] : vector<2xf16>, memref<32x32xf16>, memref<32xf16, #gpu.address_space<workgroup>>
amdgpu.gather_to_lds %mem1[%idx1], %smem2[%idx1, %idx2] : vector<2xf16>, memref<32xf16>, memref<32x32xf16, #gpu.address_space<workgroup>>
+ amdgpu.gather_to_lds %mem1[%idx1], %smem3[%idx1, %idx2] : vector<2xf16>, memref<32xf16>, memref<?x?xf16, strided<[?, 1]>, #gpu.address_space<workgroup>>
func.return
}
diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir
index 3d5a46d..78f6782 100644
--- a/mlir/test/Dialect/Arith/canonicalize.mlir
+++ b/mlir/test/Dialect/Arith/canonicalize.mlir
@@ -654,7 +654,7 @@ func.func @signExtendConstant() -> i16 {
// CHECK: return %[[cres]]
func.func @signExtendConstantSplat() -> vector<4xi16> {
%c-2 = arith.constant -2 : i8
- %splat = vector.splat %c-2 : vector<4xi8>
+ %splat = vector.broadcast %c-2 : i8 to vector<4xi8>
%ext = arith.extsi %splat : vector<4xi8> to vector<4xi16>
return %ext : vector<4xi16>
}
@@ -682,7 +682,7 @@ func.func @unsignedExtendConstant() -> i16 {
// CHECK: return %[[cres]]
func.func @unsignedExtendConstantSplat() -> vector<4xi16> {
%c2 = arith.constant 2 : i8
- %splat = vector.splat %c2 : vector<4xi8>
+ %splat = vector.broadcast %c2 : i8 to vector<4xi8>
%ext = arith.extui %splat : vector<4xi8> to vector<4xi16>
return %ext : vector<4xi16>
}
@@ -866,7 +866,7 @@ func.func @truncExtsiVector(%arg0: vector<2xi32>) -> vector<2xi16> {
// CHECK: return %[[cres]]
func.func @truncConstantSplat() -> vector<4xi8> {
%c-2 = arith.constant -2 : i16
- %splat = vector.splat %c-2 : vector<4xi16>
+ %splat = vector.broadcast %c-2 : i16 to vector<4xi16>
%trunc = arith.trunci %splat : vector<4xi16> to vector<4xi8>
return %trunc : vector<4xi8>
}
@@ -2334,7 +2334,7 @@ func.func @constant_FPtoUI_splat() -> vector<4xi32> {
// CHECK: %[[C0:.+]] = arith.constant dense<2> : vector<4xi32>
// CHECK: return %[[C0]]
%c0 = arith.constant 2.0 : f32
- %splat = vector.splat %c0 : vector<4xf32>
+ %splat = vector.broadcast %c0 : f32 to vector<4xf32>
%res = arith.fptoui %splat : vector<4xf32> to vector<4xi32>
return %res : vector<4xi32>
}
@@ -2374,7 +2374,7 @@ func.func @constant_FPtoSI_splat() -> vector<4xi32> {
// CHECK: %[[C0:.+]] = arith.constant dense<-2> : vector<4xi32>
// CHECK: return %[[C0]]
%c0 = arith.constant -2.0 : f32
- %splat = vector.splat %c0 : vector<4xf32>
+ %splat = vector.broadcast %c0 : f32 to vector<4xf32>
%res = arith.fptosi %splat : vector<4xf32> to vector<4xi32>
return %res : vector<4xi32>
}
@@ -2413,7 +2413,7 @@ func.func @constant_SItoFP_splat() -> vector<4xf32> {
// CHECK: %[[C0:.+]] = arith.constant dense<2.000000e+00> : vector<4xf32>
// CHECK: return %[[C0]]
%c0 = arith.constant 2 : i32
- %splat = vector.splat %c0 : vector<4xi32>
+ %splat = vector.broadcast %c0 : i32 to vector<4xi32>
%res = arith.sitofp %splat : vector<4xi32> to vector<4xf32>
return %res : vector<4xf32>
}
@@ -2442,7 +2442,7 @@ func.func @constant_UItoFP_splat() -> vector<4xf32> {
// CHECK: %[[C0:.+]] = arith.constant dense<2.000000e+00> : vector<4xf32>
// CHECK: return %[[C0]]
%c0 = arith.constant 2 : i32
- %splat = vector.splat %c0 : vector<4xi32>
+ %splat = vector.broadcast %c0 : i32 to vector<4xi32>
%res = arith.uitofp %splat : vector<4xi32> to vector<4xf32>
return %res : vector<4xf32>
}
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index ee1fdfa..9cc0bf8 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -17,6 +17,18 @@ module attributes {gpu.container_module} {
return
}
+ // CHECK-LABEL:func @launch_with_module_func_attr(%{{.*}}: index)
+ func.func @launch_with_module_func_attr(%sz : index) {
+ // CHECK: gpu.launch blocks(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}) threads(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}) module(@test_module) function(@test_kernel_func)
+ gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %sz, %grid_y = %sz, %grid_z = %sz)
+ threads(%tx, %ty, %tz) in (%block_x = %sz, %block_y = %sz, %block_z = %sz)
+ module(@test_module) function(@test_kernel_func) {
+ // CHECK: gpu.terminator
+ gpu.terminator
+ }
+ return
+ }
+
// CHECK-LABEL:func @args(%{{.*}}: index, %{{.*}}: index, %{{.*}}: f32, %{{.*}}: memref<?xf32, 1>) {
func.func @args(%blk : index, %thrd : index, %float : f32, %data : memref<?xf32,1>) {
// CHECK: gpu.launch blocks(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}) threads(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}})
diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir
index d48fa05..0490118 100644
--- a/mlir/test/Dialect/GPU/outlining.mlir
+++ b/mlir/test/Dialect/GPU/outlining.mlir
@@ -509,7 +509,7 @@ func.func @launch_cluster() {
// CHECK-NEXT: = memref.load %[[KERNEL_ARG1]][%[[TID]]] : memref<?xf32, 1>
// -----
-// This test tests the two optional attributes kernelModule and kernelFunc for gpu.launch
+// This test tests the two optional attributes `module` and `function` for gpu.launch
// CHECK-LABEL: func.func @testKernelAttributes()
// CHECK: gpu.launch_func @test_module::@test_kernel_func blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
// CHECK: gpu.module @test_module
@@ -523,15 +523,16 @@ func.func @testKernelAttributes() {
%bDimZ = arith.constant 8 : index
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %gDimX, %grid_y = %gDimY, %grid_z = %gDimZ)
- threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
+ threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ)
+ module(@test_module) function(@test_kernel_func) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelModule = @test_module, kernelFunc = @test_kernel_func}
+ }
return
}
// -----
-// This test tests the two optional attributes kernelModule and kernelFunc for gpu.launch, when kernelModule already exists.
+// This test tests the two optional attributes `module` and `function` for gpu.launch, when kernelModule already exists.
// CHECK-LABEL: gpu.module @existing_module
// CHECK: gpu.func @test_kernel_func()
@@ -556,15 +557,16 @@ func.func @testExistingModule() {
%bDimZ = arith.constant 8 : index
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %gDimX, %grid_y = %gDimY, %grid_z = %gDimZ)
- threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
+ threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ)
+ module(@existing_module) function(@test_kernel_func) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelModule = @existing_module, kernelFunc = @test_kernel_func}
+ }
return
}
// -----
-// This test tests the optional attribute kernelModule for gpu.launch.
+// This test tests the optional attribute `module` for gpu.launch.
// CHECK-LABEL: func.func @testKernelModuleOnly()
// CHECK: gpu.launch_func @test_module::@testKernelModuleOnly_kernel blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
// CHECK: gpu.module @test_module
@@ -578,15 +580,16 @@ func.func @testKernelModuleOnly() {
%bDimZ = arith.constant 8 : index
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %gDimX, %grid_y = %gDimY, %grid_z = %gDimZ)
- threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
+ threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ)
+ module(@test_module) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelModule = @test_module}
+ }
return
}
// -----
-// This test tests the optional attribute kernelFunc for gpu.launch.
+// This test tests the optional attribute `function` for gpu.launch.
// CHECK-LABEL: func.func @testKernelFuncOnly()
// CHECK: gpu.launch_func @test_kernel_func::@test_kernel_func blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
@@ -601,15 +604,16 @@ func.func @testKernelFuncOnly() {
%bDimZ = arith.constant 8 : index
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %gDimX, %grid_y = %gDimY, %grid_z = %gDimZ)
- threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
+ threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ)
+ function(@test_kernel_func) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelFunc = @test_kernel_func}
+ }
return
}
// -----
-// This test tests gpu.launch when optional attributes kernelModule and kernelFunc are not specified.
+// This test tests gpu.launch when optional attributes `module` and `function` are not specified.
// CHECK-LABEL: func.func @testNoAttributes()
// CHECK: gpu.launch_func @testNoAttributes_kernel::@testNoAttributes_kernel blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
diff --git a/mlir/test/Dialect/LLVMIR/call-intrin.mlir b/mlir/test/Dialect/LLVMIR/call-intrin.mlir
index b8d845d..bf11e07 100644
--- a/mlir/test/Dialect/LLVMIR/call-intrin.mlir
+++ b/mlir/test/Dialect/LLVMIR/call-intrin.mlir
@@ -27,14 +27,13 @@ llvm.func @round_overloaded() -> f32 {
// CHECK: define void @lifetime_start() {
// CHECK: %1 = alloca float, i8 1, align 4
-// CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %1)
+// CHECK: call void @llvm.lifetime.start.p0(ptr %1)
// CHECK: ret void
// CHECK: }
llvm.func @lifetime_start() {
- %0 = llvm.mlir.constant(4 : i64) : i64
- %1 = llvm.mlir.constant(1 : i8) : i8
- %2 = llvm.alloca %1 x f32 : (i8) -> !llvm.ptr
- llvm.call_intrinsic "llvm.lifetime.start"(%0, %2) {} : (i64, !llvm.ptr) -> ()
+ %0 = llvm.mlir.constant(1 : i8) : i8
+ %1 = llvm.alloca %0 x f32 : (i8) -> !llvm.ptr
+ llvm.call_intrinsic "llvm.lifetime.start"(%1) {} : (!llvm.ptr) -> ()
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/inlining.mlir b/mlir/test/Dialect/LLVMIR/inlining.mlir
index 551e0c9..8e292f4 100644
--- a/mlir/test/Dialect/LLVMIR/inlining.mlir
+++ b/mlir/test/Dialect/LLVMIR/inlining.mlir
@@ -299,7 +299,7 @@ llvm.func @test_inline(%cond0 : i1, %cond1 : i1, %funcArg : f32) -> f32 {
^bb1:
// Make sure the lifetime begin intrinsic has been inserted where the call
// used to be, even though the alloca has been moved to the entry block.
- // CHECK-NEXT: llvm.intr.lifetime.start 4, %[[PTR]]
+ // CHECK-NEXT: llvm.intr.lifetime.start %[[PTR]]
%0 = llvm.call @static_alloca(%cond1) : (i1) -> f32
// CHECK: llvm.cond_br %{{.+}}, ^[[BB2:.+]], ^[[BB3:.+]]
llvm.br ^bb3(%0: f32)
@@ -307,9 +307,9 @@ llvm.func @test_inline(%cond0 : i1, %cond1 : i1, %funcArg : f32) -> f32 {
// return sites of the callee.
// CHECK: ^[[BB2]]:
// CHECK-NEXT: llvm.load
- // CHECK-NEXT: llvm.intr.lifetime.end 4, %[[PTR]]
+ // CHECK-NEXT: llvm.intr.lifetime.end %[[PTR]]
// CHECK: ^[[BB3]]:
- // CHECK-NEXT: llvm.intr.lifetime.end 4, %[[PTR]]
+ // CHECK-NEXT: llvm.intr.lifetime.end %[[PTR]]
^bb2:
llvm.br ^bb3(%funcArg: f32)
^bb3(%blockArg: f32):
@@ -334,9 +334,9 @@ llvm.func @test_inline(%cond0 : i1) {
// CHECK: "test.one_region_op"() ({
"test.one_region_op"() ({
%0 = llvm.call @static_alloca() : () -> f32
- // CHECK-NEXT: llvm.intr.lifetime.start 4, %[[ALLOCA]]
+ // CHECK-NEXT: llvm.intr.lifetime.start %[[ALLOCA]]
// CHECK-NEXT: %[[RES:.+]] = llvm.load %[[ALLOCA]]
- // CHECK-NEXT: llvm.intr.lifetime.end 4, %[[ALLOCA]]
+ // CHECK-NEXT: llvm.intr.lifetime.end %[[ALLOCA]]
// CHECK-NEXT: test.region_yield %[[RES]]
test.region_yield %0 : f32
}) : () -> ()
@@ -368,9 +368,9 @@ llvm.func @test_inline(%cond0 : i1) {
llvm.func @alloca_with_lifetime(%cond: i1) -> f32 {
%0 = llvm.mlir.constant(4 : i32) : i32
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
- llvm.intr.lifetime.start 4, %1 : !llvm.ptr
+ llvm.intr.lifetime.start %1 : !llvm.ptr
%2 = llvm.load %1 : !llvm.ptr -> f32
- llvm.intr.lifetime.end 4, %1 : !llvm.ptr
+ llvm.intr.lifetime.end %1 : !llvm.ptr
%3 = llvm.fadd %2, %2 : f32
llvm.return %3 : f32
}
@@ -385,9 +385,9 @@ llvm.func @test_inline(%cond0 : i1, %cond1 : i1, %funcArg : f32) -> f32 {
^bb1:
// Make sure the original lifetime intrinsic has been preserved, rather than
// inserting a new one with a larger scope.
- // CHECK: llvm.intr.lifetime.start 4, %[[PTR]]
+ // CHECK: llvm.intr.lifetime.start %[[PTR]]
// CHECK-NEXT: llvm.load %[[PTR]]
- // CHECK-NEXT: llvm.intr.lifetime.end 4, %[[PTR]]
+ // CHECK-NEXT: llvm.intr.lifetime.end %[[PTR]]
// CHECK: llvm.fadd
// CHECK-NOT: llvm.intr.lifetime.end
%0 = llvm.call @alloca_with_lifetime(%cond1) : (i1) -> f32
diff --git a/mlir/test/Dialect/LLVMIR/mem2reg.mlir b/mlir/test/Dialect/LLVMIR/mem2reg.mlir
index 56634cf..716a586 100644
--- a/mlir/test/Dialect/LLVMIR/mem2reg.mlir
+++ b/mlir/test/Dialect/LLVMIR/mem2reg.mlir
@@ -304,10 +304,9 @@ llvm.func @g()
// CHECK-NOT: = llvm.alloca
llvm.func amdgpu_kernelcc @addrspace_discard() {
%0 = llvm.mlir.constant(1 : i32) : i32
- %1 = llvm.mlir.constant(2 : i64) : i64
- %2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr<5>
- %3 = llvm.addrspacecast %2 : !llvm.ptr<5> to !llvm.ptr
- llvm.intr.lifetime.start 2, %3 : !llvm.ptr
+ %1 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr<5>
+ %2 = llvm.addrspacecast %1 : !llvm.ptr<5> to !llvm.ptr
+ llvm.intr.lifetime.start %2 : !llvm.ptr
llvm.return
}
@@ -406,9 +405,9 @@ llvm.func @unreachable_jumps_to_merge_point(%arg0: i1) -> i32 {
llvm.func @ignore_lifetime() {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.intr.lifetime.start 2, %1 : !llvm.ptr
+ llvm.intr.lifetime.start %1 : !llvm.ptr
llvm.store %0, %1 {alignment = 4 : i64} : i32, !llvm.ptr
- llvm.intr.lifetime.end 2, %1 : !llvm.ptr
+ llvm.intr.lifetime.end %1 : !llvm.ptr
llvm.return
}
@@ -437,9 +436,9 @@ llvm.func @ignore_discardable_tree() {
%5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i8, i16)>
%6 = llvm.alloca %0 x !llvm.struct<(i8, i16)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%7 = llvm.getelementptr %6[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i8, i16)>
- llvm.intr.lifetime.start 2, %7 : !llvm.ptr
+ llvm.intr.lifetime.start %7 : !llvm.ptr
llvm.store %5, %6 {alignment = 2 : i64} : !llvm.struct<(i8, i16)>, !llvm.ptr
- llvm.intr.lifetime.end 2, %7 : !llvm.ptr
+ llvm.intr.lifetime.end %7 : !llvm.ptr
llvm.return
}
@@ -517,8 +516,8 @@ llvm.func @discardable_use_tree() {
%2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr
%3 = llvm.bitcast %2 : !llvm.ptr to !llvm.ptr
%4 = llvm.bitcast %3 : !llvm.ptr to !llvm.ptr
- llvm.intr.lifetime.start 2, %3 : !llvm.ptr
- llvm.intr.lifetime.start 2, %4 : !llvm.ptr
+ llvm.intr.lifetime.start %3 : !llvm.ptr
+ llvm.intr.lifetime.start %4 : !llvm.ptr
%5 = llvm.intr.invariant.start 2, %3 : !llvm.ptr
llvm.intr.invariant.end %5, 2, %3 : !llvm.ptr
llvm.return
@@ -534,8 +533,8 @@ llvm.func @non_discardable_use_tree() {
%2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr
%3 = llvm.bitcast %2 : !llvm.ptr to !llvm.ptr
%4 = llvm.bitcast %3 : !llvm.ptr to !llvm.ptr
- llvm.intr.lifetime.start 2, %3 : !llvm.ptr
- llvm.intr.lifetime.start 2, %4 : !llvm.ptr
+ llvm.intr.lifetime.start %3 : !llvm.ptr
+ llvm.intr.lifetime.start %4 : !llvm.ptr
llvm.call @use(%4) : (!llvm.ptr) -> i1
llvm.return
}
@@ -551,8 +550,8 @@ llvm.func @trivial_get_element_ptr() {
%2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr
%3 = llvm.bitcast %2 : !llvm.ptr to !llvm.ptr
%4 = llvm.getelementptr %3[0] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.intr.lifetime.start 2, %3 : !llvm.ptr
- llvm.intr.lifetime.start 2, %4 : !llvm.ptr
+ llvm.intr.lifetime.start %3 : !llvm.ptr
+ llvm.intr.lifetime.start %4 : !llvm.ptr
llvm.return
}
@@ -565,8 +564,8 @@ llvm.func @nontrivial_get_element_ptr() {
// CHECK: = llvm.alloca
%2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr
%4 = llvm.getelementptr %2[1] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.intr.lifetime.start 2, %2 : !llvm.ptr
- llvm.intr.lifetime.start 2, %4 : !llvm.ptr
+ llvm.intr.lifetime.start %2 : !llvm.ptr
+ llvm.intr.lifetime.start %4 : !llvm.ptr
llvm.return
}
@@ -580,8 +579,8 @@ llvm.func @dynamic_get_element_ptr() {
%2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr
%3 = llvm.bitcast %2 : !llvm.ptr to !llvm.ptr
%4 = llvm.getelementptr %3[%0] : (!llvm.ptr, i32) -> !llvm.ptr, i8
- llvm.intr.lifetime.start 2, %3 : !llvm.ptr
- llvm.intr.lifetime.start 2, %4 : !llvm.ptr
+ llvm.intr.lifetime.start %3 : !llvm.ptr
+ llvm.intr.lifetime.start %4 : !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index a2b2f84..db5271c 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -981,6 +981,13 @@ llvm.func @rocdl.s.wait.expcnt() {
// -----
+llvm.func @rocdl.readfirstlane(%src : f32) -> f32 {
+ // CHECK-LABEL: rocdl.readfirstlane
+ // CHECK: rocdl.readfirstlane %{{.*}} : f32
+ %ret = rocdl.readfirstlane %src : f32
+ llvm.return %ret : f32
+}
+
llvm.func @rocdl.readlane(%src : f32) -> f32 {
%cst0 = llvm.mlir.constant(0 : i32) : i32
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index a0273fb..7344797 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -685,10 +685,10 @@ func.func @fastmathFlags(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: vector<2 x f
// CHECK-LABEL: @lifetime
// CHECK-SAME: %[[P:.*]]: !llvm.ptr
llvm.func @lifetime(%p: !llvm.ptr) {
- // CHECK: llvm.intr.lifetime.start 16, %[[P]]
- llvm.intr.lifetime.start 16, %p : !llvm.ptr
- // CHECK: llvm.intr.lifetime.end 16, %[[P]]
- llvm.intr.lifetime.end 16, %p : !llvm.ptr
+ // CHECK: llvm.intr.lifetime.start %[[P]]
+ llvm.intr.lifetime.start %p : !llvm.ptr
+ // CHECK: llvm.intr.lifetime.end %[[P]]
+ llvm.intr.lifetime.end %p : !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/sroa.mlir b/mlir/test/Dialect/LLVMIR/sroa.mlir
index fe1531d..1674bbd 100644
--- a/mlir/test/Dialect/LLVMIR/sroa.mlir
+++ b/mlir/test/Dialect/LLVMIR/sroa.mlir
@@ -177,7 +177,7 @@ llvm.func @direct_promotable_use_is_fine() -> i32 {
// CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
%3 = llvm.load %2 : !llvm.ptr -> i32
// This is a direct use of the slot but it can be removed because it implements PromotableOpInterface.
- llvm.intr.lifetime.start 2, %1 : !llvm.ptr
+ llvm.intr.lifetime.start %1 : !llvm.ptr
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
diff --git a/mlir/test/Dialect/Linalg/block-pack-matmul-layout.mlir b/mlir/test/Dialect/Linalg/block-pack-matmul-layout.mlir
index 4ba4b09..2f30e8b 100644
--- a/mlir/test/Dialect/Linalg/block-pack-matmul-layout.mlir
+++ b/mlir/test/Dialect/Linalg/block-pack-matmul-layout.mlir
@@ -20,20 +20,6 @@ func.func @block_matmul(
return %0 : tensor<64x64xf32>
}
-func.func @block_matmul_transpose_a(
- %A: tensor<128x64xf32>, %B: tensor<128x64xf32>, %C: tensor<64x64xf32>) -> tensor<64x64xf32> {
- %0 = linalg.matmul_transpose_a ins(%A, %B : tensor<128x64xf32>, tensor<128x64xf32>)
- outs(%C : tensor<64x64xf32>) -> tensor<64x64xf32>
- return %0 : tensor<64x64xf32>
-}
-
-func.func @block_matmul_transpose_b(
- %A: tensor<64x128xf32>, %B: tensor<64x128xf32>, %C: tensor<64x64xf32>) -> tensor<64x64xf32> {
- %0 = linalg.matmul_transpose_b ins(%A, %B : tensor<64x128xf32>, tensor<64x128xf32>)
- outs(%C : tensor<64x64xf32>) -> tensor<64x64xf32>
- return %0 : tensor<64x64xf32>
-}
-
// MMT4D-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>
// MMT4D-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d2, d4, d5)>
// MMT4D-DAG: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>
@@ -43,18 +29,6 @@ func.func @block_matmul_transpose_b(
// MMT4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
// MMT4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
// MMT4D-COUNT-1: linalg.unpack
-// MMT4D-LABEL: func @block_matmul_transpose_a
-// MMT4D-COUNT-3: linalg.pack
-// MMT4D: linalg.generic
-// MMT4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// MMT4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// MMT4D-COUNT-1: linalg.unpack
-// MMT4D-LABEL: func @block_matmul_transpose_b
-// MMT4D-COUNT-3: linalg.pack
-// MMT4D: linalg.generic
-// MMT4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// MMT4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// MMT4D-COUNT-1: linalg.unpack
// MM4D-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>
// MM4D-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>
@@ -65,18 +39,6 @@ func.func @block_matmul_transpose_b(
// MM4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
// MM4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
// MM4D-COUNT-1: linalg.unpack
-// MM4D-LABEL: func @block_matmul_transpose_a
-// MM4D-COUNT-3: linalg.pack
-// MM4D: linalg.generic
-// MM4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// MM4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// MM4D-COUNT-1: linalg.unpack
-// MM4D-LABEL: func @block_matmul_transpose_b
-// MM4D-COUNT-3: linalg.pack
-// MM4D: linalg.generic
-// MM4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// MM4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// MM4D-COUNT-1: linalg.unpack
// MTM4D-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d0, d5, d3)>
// MTM4D-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>
@@ -87,15 +49,3 @@ func.func @block_matmul_transpose_b(
// MTM4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
// MTM4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
// MTM4D-COUNT-1: linalg.unpack
-// MTM4D-LABEL: func @block_matmul_transpose_a
-// MTM4D-COUNT-3: linalg.pack
-// MTM4D: linalg.generic
-// MTM4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// MTM4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// MTM4D-COUNT-1: linalg.unpack
-// MTM4D-LABEL: func @block_matmul_transpose_b
-// MTM4D-COUNT-3: linalg.pack
-// MTM4D: linalg.generic
-// MTM4D-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// MTM4D-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// MTM4D-COUNT-1: linalg.unpack
diff --git a/mlir/test/Dialect/Linalg/block-pack-matmul.mlir b/mlir/test/Dialect/Linalg/block-pack-matmul.mlir
index aa860db..e16af1f 100644
--- a/mlir/test/Dialect/Linalg/block-pack-matmul.mlir
+++ b/mlir/test/Dialect/Linalg/block-pack-matmul.mlir
@@ -197,150 +197,6 @@ func.func @block_batch_matmul(
// -----
-func.func @block_matmul_transpose_a(
- %A: tensor<128x64xf32>, %B: tensor<128x64xf32>, %C: tensor<64x64xf32>) -> tensor<64x64xf32> {
- %0 = linalg.matmul_transpose_a ins(%A, %B : tensor<128x64xf32>, tensor<128x64xf32>)
- outs(%C : tensor<64x64xf32>) -> tensor<64x64xf32>
- return %0 : tensor<64x64xf32>
-}
-
-// CHECK-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>
-// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d2, d4, d5)>
-// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>
-
-// CHECK-LABEL: func @block_matmul_transpose_a(
-// CHECK-SAME: %[[A:[0-9a-z]+]]: tensor<128x64xf32>, %[[B:[0-9a-z]+]]: tensor<128x64xf32>, %[[C:[0-9a-z]+]]: tensor<64x64xf32>
-// CHECK: %[[PACK_DST_0:.+]] = tensor.empty() : tensor<2x2x32x64xf32>
-// CHECK: %[[A_PACKED:.+]] = linalg.pack %[[A]]
-// CHECK-SAME: outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [32, 64]
-// CHECK-SAME: into %[[PACK_DST_0]] : tensor<128x64xf32> -> tensor<2x2x32x64xf32>
-// CHECK: %[[PACK_DST_1:.+]] = tensor.empty() : tensor<4x2x16x64xf32>
-// CHECK: %[[B_PACKED:.+]] = linalg.pack %[[B]]
-// CHECK-SAME: outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 64]
-// CHECK-SAME: into %[[PACK_DST_1]] : tensor<128x64xf32> -> tensor<4x2x16x64xf32>
-// CHECK: %[[PACK_DST_2:.+]] = tensor.empty() : tensor<2x4x32x16xf32>
-// CHECK: %[[C_PACKED:.+]] = linalg.pack %[[C]]
-// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[PACK_DST_2]] : tensor<64x64xf32> -> tensor<2x4x32x16xf32>
-// CHECK: %[[GEMM_RES_PACKED:.+]] = linalg.generic
-// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// CHECK-SAME: ins(%[[A_PACKED]], %[[B_PACKED]] : tensor<2x2x32x64xf32>, tensor<4x2x16x64xf32>) outs(%[[C_PACKED]] : tensor<2x4x32x16xf32>)
-// CHECK: %[[RES_UNPACKED:.+]] = linalg.unpack %[[GEMM_RES_PACKED]]
-// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[C]] : tensor<2x4x32x16xf32> -> tensor<64x64xf32>
-// CHECK: return %[[RES_UNPACKED]] : tensor<64x64xf32>
-
-// -----
-
-func.func @block_batch_matmul_transpose_a(
- %A: tensor<512x128x64xf32>, %B: tensor<512x128x64xf32>, %C: tensor<512x64x64xf32>) -> tensor<512x64x64xf32> {
- %0 = linalg.batch_matmul_transpose_a ins(%A, %B : tensor<512x128x64xf32>, tensor<512x128x64xf32>)
- outs(%C : tensor<512x64x64xf32>) -> tensor<512x64x64xf32>
- return %0 : tensor<512x64x64xf32>
-}
-
-// CHECK-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>
-// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d2, d3, d5, d6)>
-// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>
-
-// CHECK-LABEL: func @block_batch_matmul_transpose_a(
-// CHECK-SAME: %[[A:.+]]: tensor<512x128x64xf32>, %[[B:.+]]: tensor<512x128x64xf32>, %[[C:.+]]: tensor<512x64x64xf32>
-// CHECK: %[[PACK_DST_0:.+]] = tensor.empty() : tensor<512x2x2x32x64xf32>
-// CHECK: %[[A_PACKED:.+]] = linalg.pack %[[A]]
-// CHECK-SAME: outer_dims_perm = [0, 2, 1] inner_dims_pos = [2, 1] inner_tiles = [32, 64]
-// CHECK-SAME: into %[[PACK_DST_0]] : tensor<512x128x64xf32> -> tensor<512x2x2x32x64xf32>
-// CHECK: %[[PACK_DST_1:.+]] = tensor.empty() : tensor<512x4x2x16x64xf32>
-// CHECK: %[[B_PACKED:.+]] = linalg.pack %[[B]]
-// CHECK-SAME: outer_dims_perm = [0, 2, 1] inner_dims_pos = [2, 1] inner_tiles = [16, 64]
-// CHECK-SAME: into %[[PACK_DST_1]] : tensor<512x128x64xf32> -> tensor<512x4x2x16x64xf32>
-// CHECK: %[[PACK_DST_2:.+]] = tensor.empty() : tensor<512x2x4x32x16xf32>
-// CHECK: %[[C_PACKED:.+]] = linalg.pack %[[C]]
-// CHECK-SAME: inner_dims_pos = [1, 2] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[PACK_DST_2]] : tensor<512x64x64xf32> -> tensor<512x2x4x32x16xf32>
-// CHECK: %[[GEMM_RES_PACKED:.+]] = linalg.generic
-// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// CHECK-SAME: ins(%[[A_PACKED]], %[[B_PACKED]] : tensor<512x2x2x32x64xf32>, tensor<512x4x2x16x64xf32>) outs(%[[C_PACKED]] : tensor<512x2x4x32x16xf32>)
-// CHECK: %[[RES_UNPACKED:.+]] = linalg.unpack %[[GEMM_RES_PACKED]]
-// CHECK-SAME: inner_dims_pos = [1, 2] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[C]] : tensor<512x2x4x32x16xf32> -> tensor<512x64x64xf32>
-// CHECK: return %[[RES_UNPACKED]] : tensor<512x64x64xf32>
-
-// -----
-
-func.func @block_matmul_transpose_b(
- %A: tensor<64x128xf32>, %B: tensor<64x128xf32>, %C: tensor<64x64xf32>) -> tensor<64x64xf32> {
- %0 = linalg.matmul_transpose_b ins(%A, %B : tensor<64x128xf32>, tensor<64x128xf32>)
- outs(%C : tensor<64x64xf32>) -> tensor<64x64xf32>
- return %0 : tensor<64x64xf32>
-}
-
-// CHECK-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>
-// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d2, d4, d5)>
-// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>
-
-// CHECK-LABEL: func @block_matmul_transpose_b(
-// CHECK-SAME: %[[A:[0-9a-z]+]]: tensor<64x128xf32>, %[[B:[0-9a-z]+]]: tensor<64x128xf32>, %[[C:[0-9a-z]+]]: tensor<64x64xf32>
-// CHECK: %[[PACK_DST_0:.+]] = tensor.empty() : tensor<2x2x32x64xf32>
-// CHECK: %[[A_PACKED:.+]] = linalg.pack %[[A]]
-// CHECK-SAME: outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 64]
-// CHECK-SAME: into %[[PACK_DST_0]] : tensor<64x128xf32> -> tensor<2x2x32x64xf32>
-// CHECK: %[[PACK_DST_1:.+]] = tensor.empty() : tensor<4x2x16x64xf32>
-// CHECK: %[[B_PACKED:.+]] = linalg.pack %[[B]]
-// CHECK-SAME: outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 64]
-// CHECK-SAME: into %[[PACK_DST_1]] : tensor<64x128xf32> -> tensor<4x2x16x64xf32>
-// CHECK: %[[PACK_DST_2:.+]] = tensor.empty() : tensor<2x4x32x16xf32>
-// CHECK: %[[C_PACKED:.+]] = linalg.pack %[[C]]
-// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[PACK_DST_2]] : tensor<64x64xf32> -> tensor<2x4x32x16xf32>
-// CHECK: %[[GEMM_RES_PACKED:.+]] = linalg.generic
-// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// CHECK-SAME: ins(%[[A_PACKED]], %[[B_PACKED]] : tensor<2x2x32x64xf32>, tensor<4x2x16x64xf32>) outs(%[[C_PACKED]] : tensor<2x4x32x16xf32>)
-// CHECK: %[[RES_UNPACKED:.+]] = linalg.unpack %[[GEMM_RES_PACKED]]
-// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[C]] : tensor<2x4x32x16xf32> -> tensor<64x64xf32>
-// CHECK: return %[[RES_UNPACKED]] : tensor<64x64xf32>
-
-// -----
-
-func.func @block_batch_matmul_transpose_b(
- %A: tensor<512x64x128xf32>, %B: tensor<512x64x128xf32>, %C: tensor<512x64x64xf32>) -> tensor<512x64x64xf32> {
- %0 = linalg.batch_matmul_transpose_b ins(%A, %B : tensor<512x64x128xf32>, tensor<512x64x128xf32>)
- outs(%C : tensor<512x64x64xf32>) -> tensor<512x64x64xf32>
- return %0 : tensor<512x64x64xf32>
-}
-
-// CHECK-DAG: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>
-// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d2, d3, d5, d6)>
-// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>
-
-// CHECK-LABEL: func @block_batch_matmul_transpose_b(
-// CHECK-SAME: %[[A:.+]]: tensor<512x64x128xf32>, %[[B:.+]]: tensor<512x64x128xf32>, %[[C:.+]]: tensor<512x64x64xf32>
-// CHECK: %[[PACK_DST_0:.+]] = tensor.empty() : tensor<512x2x2x32x64xf32>
-// CHECK: %[[A_PACKED:.+]] = linalg.pack %[[A]]
-// CHECK-SAME: outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [32, 64]
-// CHECK-SAME: into %[[PACK_DST_0]] : tensor<512x64x128xf32> -> tensor<512x2x2x32x64xf32>
-// CHECK: %[[PACK_DST_1:.+]] = tensor.empty() : tensor<512x4x2x16x64xf32>
-// CHECK: %[[B_PACKED:.+]] = linalg.pack %[[B]]
-// CHECK-SAME: outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 64]
-// CHECK-SAME: into %[[PACK_DST_1]] : tensor<512x64x128xf32> -> tensor<512x4x2x16x64xf32>
-// CHECK: %[[PACK_DST_2:.+]] = tensor.empty() : tensor<512x2x4x32x16xf32>
-// CHECK: %[[C_PACKED:.+]] = linalg.pack %[[C]]
-// CHECK-SAME: inner_dims_pos = [1, 2] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[PACK_DST_2]] : tensor<512x64x64xf32> -> tensor<512x2x4x32x16xf32>
-// CHECK: %[[GEMM_RES_PACKED:.+]] = linalg.generic
-// CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP2]]]
-// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]
-// CHECK-SAME: ins(%[[A_PACKED]], %[[B_PACKED]] : tensor<512x2x2x32x64xf32>, tensor<512x4x2x16x64xf32>) outs(%[[C_PACKED]] : tensor<512x2x4x32x16xf32>)
-// CHECK: %[[RES_UNPACKED:.+]] = linalg.unpack %[[GEMM_RES_PACKED]]
-// CHECK-SAME: inner_dims_pos = [1, 2] inner_tiles = [32, 16]
-// CHECK-SAME: into %[[C]] : tensor<512x2x4x32x16xf32> -> tensor<512x64x64xf32>
-// CHECK: return %[[RES_UNPACKED]] : tensor<512x64x64xf32>
-
-// -----
-
#map = affine_map<(d0, d1, d2) -> (d0, d2)>
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
diff --git a/mlir/test/Dialect/Linalg/elementwise/named-to-elementwise.mlir b/mlir/test/Dialect/Linalg/elementwise/named-to-elementwise.mlir
new file mode 100644
index 0000000..2332b28
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/elementwise/named-to-elementwise.mlir
@@ -0,0 +1,56 @@
+// RUN: mlir-opt %s -linalg-morph-ops=named-to-category -split-input-file | FileCheck %s
+
+// CHECK: @exp(%[[A:.+]]: tensor<16x8xf32>, %[[B:.+]]: tensor<16x8xf32>) -> tensor<16x8xf32> {
+// CHECK: {{.*}} = linalg.elementwise
+// CHECK-SAME: kind=#linalg.elementwise_kind<exp>
+// CHECK-SAME: ins(%[[A]] : tensor<16x8xf32>)
+// CHECK-SAME: outs(%[[B]] : tensor<16x8xf32>) -> tensor<16x8xf32>
+//
+func.func @exp(%A : tensor<16x8xf32>, %B : tensor<16x8xf32>) -> tensor<16x8xf32> {
+ %exp = linalg.exp ins(%A : tensor<16x8xf32>) outs(%B : tensor<16x8xf32>) -> tensor<16x8xf32>
+ return %exp : tensor<16x8xf32>
+}
+
+// ----
+
+// CHECK: @add(%[[A:.+]]: tensor<16x8xf32>, %[[B:.+]]: tensor<16x8xf32>, %[[C:.+]]: tensor<16x8xf32>) -> tensor<16x8xf32> {
+// CHECK: {{.*}} = linalg.elementwise
+// CHECK-SAME: kind=#linalg.elementwise_kind<add>
+// CHECK-SAME: ins(%[[A]], %[[B]] : tensor<16x8xf32>, tensor<16x8xf32>)
+// CHECK-SAME: outs(%[[C]] : tensor<16x8xf32>) -> tensor<16x8xf32>
+//
+func.func @add(%A : tensor<16x8xf32>, %B: tensor<16x8xf32>, %C : tensor<16x8xf32>) -> tensor<16x8xf32> {
+ %add = linalg.add ins(%A, %B : tensor<16x8xf32>, tensor<16x8xf32>) outs(%C : tensor<16x8xf32>) -> tensor<16x8xf32>
+ return %add : tensor<16x8xf32>
+}
+
+// ----
+
+// CHECK: @sub(%[[A:.+]]: tensor<16x8xf32>, %[[B:.+]]: tensor<16x8xf32>, %[[C:.+]]: tensor<16x8xf32>) -> tensor<16x8xf32> {
+// CHECK: {{.*}} = linalg.elementwise
+// CHECK-SAME: kind=#linalg.elementwise_kind<sub>
+// CHECK-SAME: ins(%[[A]], %[[B]] : tensor<16x8xf32>, tensor<16x8xf32>)
+// CHECK-SAME: outs(%[[C]] : tensor<16x8xf32>)
+//
+func.func @sub(%A : tensor<16x8xf32>, %B: tensor<16x8xf32>, %C : tensor<16x8xf32>) -> tensor<16x8xf32> {
+ %sub = linalg.sub ins(%A, %B : tensor<16x8xf32>, tensor<16x8xf32>) outs(%C : tensor<16x8xf32>) -> tensor<16x8xf32>
+ return %sub : tensor<16x8xf32>
+}
+
+// ----
+
+// CHECK: @ternary_select(%[[A:.+]]: tensor<4x8x16xi1>, %[[B:.+]]: tensor<4x8x16xf32>, %[[C:.+]]: tensor<4x8x16xf32>)
+// CHECK: %[[E:.+]] = tensor.empty() : tensor<4x8x16xf32>
+// CHECK: {{.*}} = linalg.elementwise
+// CHECK-SAME: kind=#linalg.elementwise_kind<select>
+// CHECK-SAME: ins(%[[A]], %[[B]], %[[C]] : tensor<4x8x16xi1>, tensor<4x8x16xf32>, tensor<4x8x16xf32>)
+// CHECK-SAME: outs(%[[E]] : tensor<4x8x16xf32>) -> tensor<4x8x16xf32>
+//
+func.func @ternary_select(%A: tensor<4x8x16xi1>, %B: tensor<4x8x16xf32>, %C: tensor<4x8x16xf32>)
+ -> tensor<4x8x16xf32> {
+ %empty = tensor.empty() : tensor<4x8x16xf32>
+ %select = linalg.select
+ ins(%A, %B, %C : tensor<4x8x16xi1>, tensor<4x8x16xf32>, tensor<4x8x16xf32>)
+ outs(%empty: tensor<4x8x16xf32>) -> tensor<4x8x16xf32>
+ return %select : tensor<4x8x16xf32>
+}
diff --git a/mlir/test/Dialect/Linalg/fold-add-into-dest.mlir b/mlir/test/Dialect/Linalg/fold-add-into-dest.mlir
index d8e92e4..e90247d 100644
--- a/mlir/test/Dialect/Linalg/fold-add-into-dest.mlir
+++ b/mlir/test/Dialect/Linalg/fold-add-into-dest.mlir
@@ -158,36 +158,6 @@ module attributes {transform.with_named_sequence} {
// -----
!type = tensor<2048x2048xf32>
-func.func @fold_add_on_transposed_matmuls(%arg0: !type, %arg1: !type) -> !type {
- %0 = arith.constant dense<1.111111e+00> : !type
- %cst = arith.constant 0.000000e+00 : f32
- %1 = tensor.empty() : !type
- %2 = linalg.fill ins(%cst : f32) outs(%1 : !type) -> !type
- %3 = linalg.matmul_transpose_a ins(%arg0, %0 : !type, !type) outs(%2 : !type) -> !type
- %4 = linalg.matmul_transpose_b ins(%arg1, %0 : !type, !type) outs(%2 : !type) -> !type
- %5 = linalg.add ins(%3, %4 : !type, !type) outs(%1 : !type) -> !type
- return %5 : !type
-}
-
-// CHECK-LABEL: func.func @fold_add_on_transposed_matmuls
-// CHECK: %[[ACC:.+]] = linalg.matmul_transpose_a
-// CHECK-NEXT: %[[RES:.+]] = linalg.matmul_transpose_b ins({{.+}}) outs(%[[ACC]]
-// CHECK-NOT: linalg.add
-// CHECK-NEXT: return %[[RES]]
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %func {
- transform.apply_patterns.linalg.fold_add_into_dest
- } : !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-!type = tensor<2048x2048xf32>
func.func @expect_no_fold_of_add_as_dominated_op_is_not_a_contraction(%arg0: !type, %arg1: !type) -> !type {
%0 = arith.constant dense<1.111111e+00> : !type
%cst = arith.constant 0.000000e+00 : f32
diff --git a/mlir/test/Dialect/Linalg/linalg-morph-category-ops.mlir b/mlir/test/Dialect/Linalg/linalg-morph-category-ops.mlir
new file mode 100644
index 0000000..00602c4
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/linalg-morph-category-ops.mlir
@@ -0,0 +1,15 @@
+// Forward path `named -> category -> generic`
+// RUN: mlir-opt %s -linalg-morph-ops=named-to-category | FileCheck %s --check-prefix=NAMED_TO_CATEGORY
+
+// RUN: mlir-opt %s -linalg-morph-ops=named-to-category | \
+// RUN: mlir-opt %s -linalg-morph-ops=category-to-generic | FileCheck %s --check-prefix=CATEGORY_TO_GENERIC
+
+func.func @exp(%A : tensor<16x8xf32>, %B : tensor<16x8xf32>) -> tensor<16x8xf32> {
+ %exp = linalg.exp ins(%A : tensor<16x8xf32>) outs(%B : tensor<16x8xf32>) -> tensor<16x8xf32>
+ return %exp : tensor<16x8xf32>
+}
+// NAMED_TO_CATEGORY: linalg.elementwise
+// NAMED_TO_CATEGORY-NOT: linalg.exp
+
+// CATEGORY_TO_GENERIC: linalg.generic
+// CATEGORY_TO_GENERIC-NOT: linalg.elementwise
diff --git a/mlir/test/Dialect/Linalg/linalg-morph-multi-step.mlir b/mlir/test/Dialect/Linalg/linalg-morph-multi-step.mlir
new file mode 100644
index 0000000..bdd29b9
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/linalg-morph-multi-step.mlir
@@ -0,0 +1,14 @@
+// RUN: mlir-opt %s -linalg-morph-ops=named-to-generic | FileCheck %s --check-prefix=NAMED_TO_GENERIC
+// RUN: mlir-opt %s -linalg-morph-ops=named-to-generic | mlir-opt -linalg-morph-ops=generic-to-named | \
+// RUN: FileCheck %s --check-prefix=ROUND_TRIP
+
+func.func @exp(%A : tensor<16x8xf32>, %B : tensor<16x8xf32>) -> tensor<16x8xf32> {
+ %exp = linalg.exp ins(%A : tensor<16x8xf32>) outs(%B : tensor<16x8xf32>) -> tensor<16x8xf32>
+ return %exp : tensor<16x8xf32>
+}
+
+// NAMED_TO_GENERIC: linalg.generic
+// NAMED_TO_GENERIC-NOT: linalg.exp
+
+// ROUND_TRIP: linalg.exp
+// ROUND_TRIP-NOT: linalg.generic
diff --git a/mlir/test/Dialect/Linalg/named-ops.mlir b/mlir/test/Dialect/Linalg/named-ops.mlir
index 412f40d..a93e979 100644
--- a/mlir/test/Dialect/Linalg/named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/named-ops.mlir
@@ -1222,17 +1222,6 @@ func.func @batch_reduce_matmul(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32
// -----
-// CHECK-LABEL: func @matmul_transpose_a
-// CHECK: linalg.matmul_transpose_a
-// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<5x3xf32>, memref<5x7xf32>)
-// CHECK-SAME: outs(%{{.+}} : memref<3x7xf32>)
-func.func @matmul_transpose_a(%arg0: memref<5x3xf32>, %arg1: memref<5x7xf32>, %arg2: memref<3x7xf32>) {
- linalg.matmul_transpose_a ins(%arg0, %arg1 : memref<5x3xf32>, memref<5x7xf32>) outs(%arg2: memref<3x7xf32>)
- return
-}
-
-// -----
-
// CHECK-LABEL: func @matmul_transpose_a_explicit
// CHECK: linalg.matmul
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<5x3xf32>, memref<5x7xf32>)
@@ -1478,17 +1467,6 @@ func.func @matmul_bcast_b_transpose_a(%arg0: memref<5x3xf32>, %arg1: memref<5xf3
// -----
-// CHECK-LABEL: func @matmul_transpose_b
-// CHECK: linalg.matmul_transpose_b
-// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<3x5xf32>, memref<7x5xf32>)
-// CHECK-SAME: outs(%{{.+}} : memref<3x7xf32>)
-func.func @matmul_transpose_b(%arg0: memref<3x5xf32>, %arg1: memref<7x5xf32>, %arg2: memref<3x7xf32>) {
- linalg.matmul_transpose_b ins(%arg0, %arg1 : memref<3x5xf32>, memref<7x5xf32>) outs(%arg2: memref<3x7xf32>)
- return
-}
-
-// -----
-
// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
@@ -1806,28 +1784,6 @@ func.func @bcast_A_transpose_B(%A: memref<3x5xf32>, %B: memref<2x7x5xf32>, %C: m
// -----
-// CHECK-LABEL: func @batchmatmul_transpose_a
-// CHECK: linalg.batch_matmul_transpose_a
-// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<2x5x3xf32>, memref<2x5x7xf32>)
-// CHECK-SAME: outs(%{{.+}} : memref<2x3x7xf32>)
-func.func @batchmatmul_transpose_a(%arg0: memref<2x5x3xf32>, %arg1: memref<2x5x7xf32>, %arg2: memref<2x3x7xf32>) {
- linalg.batch_matmul_transpose_a ins(%arg0, %arg1 : memref<2x5x3xf32>, memref<2x5x7xf32>) outs(%arg2: memref<2x3x7xf32>)
- return
-}
-
-// -----
-
-// CHECK-LABEL: func @batchmatmul_transpose_b
-// CHECK: linalg.batch_matmul_transpose_b
-// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<2x3x5xf32>, memref<2x7x5xf32>)
-// CHECK-SAME: outs(%{{.+}} : memref<2x3x7xf32>)
-func.func @batchmatmul_transpose_b(%arg0: memref<2x3x5xf32>, %arg1: memref<2x7x5xf32>, %arg2: memref<2x3x7xf32>) {
- linalg.batch_matmul_transpose_b ins(%arg0, %arg1 : memref<2x3x5xf32>, memref<2x7x5xf32>) outs(%arg2: memref<2x3x7xf32>)
- return
-}
-
-// -----
-
// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
diff --git a/mlir/test/Dialect/Linalg/rank-reduce-contraction-ops.mlir b/mlir/test/Dialect/Linalg/rank-reduce-contraction-ops.mlir
index 43bddb0..704576d 100644
--- a/mlir/test/Dialect/Linalg/rank-reduce-contraction-ops.mlir
+++ b/mlir/test/Dialect/Linalg/rank-reduce-contraction-ops.mlir
@@ -92,38 +92,6 @@ func.func @singleton_batch_vecmat(%arg0 : tensor<1x?xf32>, %arg1 : tensor<1x?x?x
// -----
-func.func @singleton_batchmatmul_transpose_a(%arg0: memref<1x5x3xf32>, %arg1: memref<1x5x7xf32>, %arg2: memref<1x3x7xf32>) {
- // CHECK-LABEL: @singleton_batchmatmul_transpose_a
- // CHECK-SAME: %[[LHS:[a-zA-Z0-9]+]]: memref<1x5x3xf32>
- // CHECK-SAME: %[[RHS:[a-zA-Z0-9]+]]: memref<1x5x7xf32>
- // CHECK-SAME: %[[INIT:[a-zA-Z0-9]+]]: memref<1x3x7xf32>
- // CHECK-NEXT: %[[COLLAPSED_LHS:.*]] = memref.collapse_shape %[[LHS]] {{\[}}[0, 1], [2]]
- // CHECK-NEXT: %[[COLLAPSED_RHS:.*]] = memref.collapse_shape %[[RHS]] {{\[}}[0, 1], [2]]
- // CHECK-NEXT: %[[COLLAPSED_INIT:.*]] = memref.collapse_shape %[[INIT]] {{\[}}[0, 1], [2]]
- // CHECK-NEXT: linalg.matmul_transpose_a ins(%[[COLLAPSED_LHS]], %[[COLLAPSED_RHS]] : memref<5x3xf32>, memref<5x7xf32>) outs(%[[COLLAPSED_INIT]] : memref<3x7xf32>)
- // CHECK-NEXT: return
- linalg.batch_matmul_transpose_a ins(%arg0, %arg1 : memref<1x5x3xf32>, memref<1x5x7xf32>) outs(%arg2: memref<1x3x7xf32>)
- return
-}
-
-// -----
-
-func.func @singleton_batchmatmul_transpose_b(%arg0: memref<1x3x5xf32>, %arg1: memref<1x7x5xf32>, %arg2: memref<1x3x7xf32>) {
- // CHECK-LABEL: @singleton_batchmatmul_transpose_b
- // CHECK-SAME: %[[LHS:[a-zA-Z0-9]+]]: memref<1x3x5xf32>
- // CHECK-SAME: %[[RHS:[a-zA-Z0-9]+]]: memref<1x7x5xf32>
- // CHECK-SAME: %[[INIT:[a-zA-Z0-9]+]]: memref<1x3x7xf32>
- // CHECK-NEXT: %[[COLLAPSED_LHS:.*]] = memref.collapse_shape %[[LHS]] {{\[}}[0, 1], [2]]
- // CHECK-NEXT: %[[COLLAPSED_RHS:.*]] = memref.collapse_shape %[[RHS]] {{\[}}[0, 1], [2]]
- // CHECK-NEXT: %[[COLLAPSED_INIT:.*]] = memref.collapse_shape %[[INIT]] {{\[}}[0, 1], [2]]
- // CHECK-NEXT: linalg.matmul_transpose_b ins(%[[COLLAPSED_LHS]], %[[COLLAPSED_RHS]] : memref<3x5xf32>, memref<7x5xf32>) outs(%[[COLLAPSED_INIT]] : memref<3x7xf32>)
- // CHECK-NEXT: return
- linalg.batch_matmul_transpose_b ins(%arg0, %arg1 : memref<1x3x5xf32>, memref<1x7x5xf32>) outs(%arg2: memref<1x3x7xf32>)
- return
-}
-
-// -----
-
func.func @matmul_to_matvec_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x1xf32>, %arg2: tensor<?x1xf32>) -> tensor<?x1xf32> {
// CHECK-LABEL: @matmul_to_matvec_tensor
// CHECK-SAME: %[[LHS:[a-zA-Z0-9]+]]: tensor<?x?xf32>
@@ -226,59 +194,6 @@ func.func @matvec_to_dot_tensor(%arg0: tensor<1x?xf32>, %arg1: tensor<?xf32>, %a
// -----
-func.func @matmul_transpose_a_to_vecmat(%arg0: tensor<256x1xf32>, %arg1: tensor<256x512xf32>, %arg2: tensor<1x512xf32>) -> tensor<1x512xf32> {
- // CHECK-LABEL: @matmul_transpose_a_to_vecmat
- // CHECK: collapse_shape {{.*}} into tensor<256xf32>
- // CHECK: collapse_shape {{.*}} into tensor<512xf32>
- // CHECK: linalg.vecmat
- // CHECK: expand_shape {{.*}} into tensor<1x512xf32>
- %0 = linalg.matmul_transpose_a ins(%arg0, %arg1: tensor<256x1xf32>, tensor<256x512xf32>) outs(%arg2: tensor<1x512xf32>) -> tensor<1x512xf32>
- return %0 : tensor<1x512xf32>
-}
-
-// -----
-
-func.func @batch_matmul_transpose_a_to_batch_vecmat(%arg0: tensor<64x256x1xf32>, %arg1: tensor<64x256x512xf32>, %arg2: tensor<64x1x512xf32>) -> tensor<64x1x512xf32> {
- // CHECK-LABEL: @batch_matmul_transpose_a_to_batch_vecmat
- // CHECK: collapse_shape {{.*}} into tensor<64x256xf32>
- // CHECK: collapse_shape {{.*}} into tensor<64x512xf32>
- // CHECK: linalg.batch_vecmat
- // CHECK: expand_shape {{.*}} into tensor<64x1x512xf32>
- %0 = linalg.batch_matmul_transpose_a ins(%arg0, %arg1: tensor<64x256x1xf32>, tensor<64x256x512xf32>) outs(%arg2: tensor<64x1x512xf32>) -> tensor<64x1x512xf32>
- return %0 : tensor<64x1x512xf32>
-}
-
-// -----
-
-func.func @matmul_transpose_b_to_matvec(%arg0: memref<?x?xf32>, %arg1: memref<1x?xf32>, %arg2: memref<?x1xf32>) {
- // CHECK-LABEL: @matmul_transpose_b_to_matvec
- // CHECK: linalg.matvec
- linalg.matmul_transpose_b ins(%arg0, %arg1: memref<?x?xf32>, memref<1x?xf32>) outs(%arg2: memref<?x1xf32>)
- return
-}
-
-// -----
-
-func.func @batchmatmul_transpose_b_to_batchmatvec_tensor(%arg0: tensor<64x128x256xf32>, %arg1: tensor<64x1x256xf32>, %arg2: tensor<64x128x1xf32>) -> tensor<64x128x1xf32> {
- // CHECK: collapse_shape {{.*}} into tensor<64x256xf32>
- // CHECK: collapse_shape {{.*}} into tensor<64x128xf32>
- // CHECK: linalg.batch_matvec
- // CHECK: expand_shape {{.*}} into tensor<64x128x1xf32>
- %0 = linalg.batch_matmul_transpose_b ins(%arg0, %arg1: tensor<64x128x256xf32>, tensor<64x1x256xf32>) outs(%arg2: tensor<64x128x1xf32>) -> tensor<64x128x1xf32>
- return %0 : tensor<64x128x1xf32>
-}
-
-// -----
-
-func.func @batchmatmul_transpose_b_to_to_dot(%arg0: tensor<1x1x?xf32>, %arg1: tensor<1x1x?xf32>, %arg2: tensor<1x1x1xf32>) -> tensor<1x1x1xf32> {
- // CHECK-LABEL: @batchmatmul_transpose_b_to_to_dot
- // CHECK: linalg.dot
- %0 = linalg.batch_matmul_transpose_b ins(%arg0, %arg1: tensor<1x1x?xf32>, tensor<1x1x?xf32>) outs(%arg2: tensor<1x1x1xf32>) -> tensor<1x1x1xf32>
- return %0 : tensor<1x1x1xf32>
-}
-
-// -----
-
func.func @nonsingleton_batch_matmul(%arg0 : tensor<2x?x?xf32>, %arg1 : tensor<2x?x?xf32>, %arg2: tensor<2x?x?xf32>) -> tensor<2x?x?xf32> {
// CHECK-LABEL: @nonsingleton_batch_matmul
// CHECK-NOT: collapse_shape
diff --git a/mlir/test/Dialect/Linalg/tile-to-forall.mlir b/mlir/test/Dialect/Linalg/tile-to-forall.mlir
index 778d5bb..1b0bade 100644
--- a/mlir/test/Dialect/Linalg/tile-to-forall.mlir
+++ b/mlir/test/Dialect/Linalg/tile-to-forall.mlir
@@ -504,7 +504,7 @@ func.func @matmul_tile_size_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.matmul_transpose_b"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%c10 = transform.param.constant 10 : i64 -> !transform.param<i64>
%c20 = transform.param.constant 20 : i64 -> !transform.param<i64>
%sz = transform.merge_handles %c10, %c20 : !transform.param<i64>
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
index f741876..9a3dcf0 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
@@ -14,11 +14,11 @@ module attributes {transform.with_named_sequence} {
: (!transform.any_op) -> !transform.any_op
// Tile to 5 then pad to 8
- %fill_l1, %loops_l1 = transform.structured.tile_using_for %fill tile_sizes [5]
+ %fill_l1, %loops_l1 = transform.structured.tile_using_for %fill tile_sizes [5]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%fill_padded, %_ = transform.structured.pad_tiling_interface %fill_l1 to padding_sizes [8] {
- padding_values=[0.0 : f32, 0.0 : f32]
+ padding_values= [#ub.poison, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
@@ -33,9 +33,9 @@ func.func @pad_lhs(
-> tensor<24x25xf32>
{
// CHECK: scf.for %{{.*}} -> (tensor<24x25xf32>)
- // CHECK: tensor.pad %{{.*}}
+ // CHECK: tensor.pad %{{.*}}
// CHECK: : tensor<?x12xf32> to tensor<8x12xf32>
- // CHECK: tensor.pad %{{.*}}
+ // CHECK: tensor.pad %{{.*}}
// CHECK: : tensor<?x25xf32> to tensor<8x25xf32>
// CHECK: linalg.matmul ins(%{{.*}}, %{{.*}} : tensor<8x12xf32>, tensor<12x25xf32>) outs(%{{.*}} : tensor<8x25xf32>) -> tensor<8x25xf32>
// CHECK: tensor.extract_slice %{{.*}}[0, 0] [%{{.*}}, 25] [1, 1]
@@ -92,7 +92,7 @@ module {
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 0, 14] {
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- transform.yield
+ transform.yield
}
}
}
@@ -147,7 +147,7 @@ module {
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 0, 14] {
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- transform.yield
+ transform.yield
}
}
}
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad.mlir b/mlir/test/Dialect/Linalg/transform-op-pad.mlir
index f91eb9c..51bf4a2 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad.mlir
@@ -465,14 +465,14 @@ module attributes {transform.with_named_sequence} {
// CHECK: %[[RHS:.*]] = tensor.pad
// CHECK: scf.for
// CHECK-DAG: tensor.extract_slice %[[LHS]][0, %{{.*}}] [%{{.*}}, 32]
-// CHECK-DAG: tensor.extract_slice %[[RHS]][0, %{{.*}}] [%{{.*}}, 32]
+// CHECK-DAG: tensor.extract_slice %[[RHS]][%{{.*}}, 0] [32, %{{.*}}]
func.func @dyn_pad_tiling(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
- %0 = linalg.matmul_transpose_b ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
+ %0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.matmul_transpose_b"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
%padded, %pad, %copy = transform.structured.pad %0 pad_to_multiple_of [32] use_prescribed_tensor_shapes {padding_dimensions = [2], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
%tiled_linalg_op, %loops = transform.structured.tile_using_for %padded tile_sizes [0, 0, 32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%1 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize-matmul.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize-matmul.mlir
index f64953b..bd4c655 100644
--- a/mlir/test/Dialect/Linalg/transform-op-specialize-matmul.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize-matmul.mlir
@@ -30,66 +30,6 @@ module attributes {transform.with_named_sequence} {
// -----
-#map = affine_map<(d0, d1, d2) -> (d2, d0)>
-#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
-#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
-func.func @matmul_transpose_a(%arg0: memref<5x3xf32>, %arg1: memref<5x7xf32>, %arg2: memref<3x7xf32>) {
- linalg.generic
- {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]}
- ins(%arg0, %arg1 : memref<5x3xf32>, memref<5x7xf32>) outs(%arg2 : memref<3x7xf32>) {
- ^bb0(%in: f32, %in_0: f32, %out: f32):
- %0 = arith.mulf %in, %in_0 : f32
- %1 = arith.addf %out, %0 : f32
- linalg.yield %1 : f32
- }
- return
-}
-
-// CHECK-LABEL: @matmul_transpose_a
-// CHECK-SAME: %[[ARG0:.+]]: memref<5x3xf32>, %[[ARG1:.+]]: memref<5x7xf32>, %[[ARG2:.+]]: memref<3x7xf32>) {
-// CHECK-NOT: linalg.generic
-// CHECK: linalg.matmul_transpose_a ins(%[[ARG0]], %[[ARG1]] : memref<5x3xf32>, memref<5x7xf32>) outs(%[[ARG2]] : memref<3x7xf32>)
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-#map = affine_map<(d0, d1, d2) -> (d0, d2)>
-#map1 = affine_map<(d0, d1, d2) -> (d1, d2)>
-#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
-func.func @matmul_transpose_b(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
- %0 = linalg.generic
- {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]}
- ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
- ^bb0(%in: f32, %in_0: f32, %out: f32):
- %1 = arith.mulf %in, %in_0 : f32
- %2 = arith.addf %out, %1 : f32
- linalg.yield %2 : f32
- } -> tensor<?x?xf32>
- return %0 : tensor<?x?xf32>
-}
-
-// CHECK-LABEL: @matmul_transpose_b
-// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
-// CHECK-NOT: linalg.generic
-// CHECK: linalg.matmul_transpose_b ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
@@ -117,32 +57,3 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
-
-// -----
-#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
-#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
-#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-func.func @batch_matmul_transpose_b(%arg0: tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>, %arg2: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
- %0 = linalg.generic
- {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "parallel", "reduction"]}
- ins(%arg0, %arg1 : tensor<?x?x?xf32>, tensor<?x?x?xf32>) outs(%arg2 : tensor<?x?x?xf32>) {
- ^bb0(%in: f32, %in_0: f32, %out: f32):
- %1 = arith.mulf %in, %in_0 : f32
- %2 = arith.addf %out, %1 : f32
- linalg.yield %2 : f32
- } -> tensor<?x?x?xf32>
- return %0 : tensor<?x?x?xf32>
-}
-
-// CHECK-LABEL: @batch_matmul_transpose_b
-// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?xf32>, %[[ARG2:.+]]: tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
-// CHECK-NOT: linalg.generic
-// CHECK: linalg.batch_matmul_transpose_b ins(%[[ARG0]], %[[ARG1]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[ARG2]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
diff --git a/mlir/test/Dialect/Linalg/transpose-matmul.mlir b/mlir/test/Dialect/Linalg/transpose-matmul.mlir
index d2b7e9f..4ee87fb 100644
--- a/mlir/test/Dialect/Linalg/transpose-matmul.mlir
+++ b/mlir/test/Dialect/Linalg/transpose-matmul.mlir
@@ -1,6 +1,20 @@
// RUN: mlir-opt -transform-preload-library='transform-library-paths=%p/transpose-matmul-a.mlir' -transform-interpreter -split-input-file %s | FileCheck %s --check-prefixes=CHECK,TRANSPOSE-A
// RUN: mlir-opt -transform-preload-library='transform-library-paths=%p/transpose-matmul-b.mlir' -transform-interpreter -split-input-file %s | FileCheck %s --check-prefixes=CHECK,TRANSPOSE-B
+// TRANSPOSE-A-DAG: #[[$MA:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
+// TRANSPOSE-A-DAG: #[[$MB:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
+// TRANSPOSE-A-DAG: #[[$MC:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// TRANSPOSE-A-DAG: #[[$BMA:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>
+// TRANSPOSE-A-DAG: #[[$BMB:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
+// TRANSPOSE-A-DAG: #[[$BMC:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
+
+// TRANSPOSE-B-DAG: #[[$MA:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+// TRANSPOSE-B-DAG: #[[$MB:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
+// TRANSPOSE-B-DAG: #[[$MC:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// TRANSPOSE-B-DAG: #[[$BMA:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// TRANSPOSE-B-DAG: #[[$BMB:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
+// TRANSPOSE-B-DAG: #[[$BMC:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
+
// CHECK-LABEL: func.func @matmul_static(
// CHECK-SAME: %[[A:.*]]: tensor<16x8xf32>,
// CHECK-SAME: %[[B:.*]]: tensor<8x16xf32>) -> tensor<16x16xf32> {
@@ -9,10 +23,10 @@
// CHECK: %[[C_ZERO:.*]] = linalg.fill ins(%[[C0_F32]] : f32) outs(%[[C_INIT]] : tensor<16x16xf32>) -> tensor<16x16xf32>
// TRANSPOSE-A: %[[A_TRANSP_INIT:.*]] = tensor.empty() : tensor<8x16xf32>
// TRANSPOSE-A: %[[A_TRANSP:.*]] = linalg.transpose ins(%[[A]] : tensor<16x8xf32>) outs(%[[A_TRANSP_INIT]] : tensor<8x16xf32>) permutation = [1, 0]
-// TRANSPOSE-A: %[[C:.*]] = linalg.matmul_transpose_a ins(%[[A_TRANSP]], %[[B]] : tensor<8x16xf32>, tensor<8x16xf32>) outs(%[[C_ZERO]] : tensor<16x16xf32>) -> tensor<16x16xf32>
+// TRANSPOSE-A: %[[C:.*]] = linalg.matmul indexing_maps = [#[[$MA]], #[[$MB]], #[[$MC]]] ins(%[[A_TRANSP]], %[[B]] : tensor<8x16xf32>, tensor<8x16xf32>) outs(%[[C_ZERO]] : tensor<16x16xf32>) -> tensor<16x16xf32>
// TRANSPOSE-B: %[[B_TRANSP_INIT:.*]] = tensor.empty() : tensor<16x8xf32>
// TRANSPOSE-B: %[[B_TRANSP:.*]] = linalg.transpose ins(%[[B]] : tensor<8x16xf32>) outs(%[[B_TRANSP_INIT]] : tensor<16x8xf32>) permutation = [1, 0]
-// TRANSPOSE-B: %[[C:.*]] = linalg.matmul_transpose_b ins(%[[A]], %[[B_TRANSP]] : tensor<16x8xf32>, tensor<16x8xf32>) outs(%[[C_ZERO]] : tensor<16x16xf32>) -> tensor<16x16xf32>
+// TRANSPOSE-B: %[[C:.*]] = linalg.matmul indexing_maps = [#[[$MA]], #[[$MB]], #[[$MC]]] ins(%[[A]], %[[B_TRANSP]] : tensor<16x8xf32>, tensor<16x8xf32>) outs(%[[C_ZERO]] : tensor<16x16xf32>) -> tensor<16x16xf32>
// CHECK: return %[[C]] : tensor<16x16xf32>
// CHECK: }
func.func @matmul_static(%A: tensor<16x8xf32>, %B: tensor<8x16xf32>) -> (tensor<16x16xf32>) {
@@ -38,11 +52,11 @@ func.func @matmul_static(%A: tensor<16x8xf32>, %B: tensor<8x16xf32>) -> (tensor<
// TRANSPOSE-A: %[[A_DIM1:.*]] = tensor.dim %[[A]], %[[C1]] : tensor<?x?xf32>
// TRANSPOSE-A: %[[A_TRANSP_INIT:.*]] = tensor.empty(%[[A_DIM1]], %[[A_DIM0]]) : tensor<?x?xf32>
// TRANSPOSE-A: %[[A_TRANSP:.*]] = linalg.transpose ins(%[[A]] : tensor<?x?xf32>) outs(%[[A_TRANSP_INIT]] : tensor<?x?xf32>) permutation = [1, 0]
-// TRANSPOSE-A: %[[C:.*]] = linalg.matmul_transpose_a ins(%[[A_TRANSP]], %[[B]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+// TRANSPOSE-A: %[[C:.*]] = linalg.matmul indexing_maps = [#[[$MA]], #[[$MB]], #[[$MC]]] ins(%[[A_TRANSP]], %[[B]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?xf32>) -> tensor<?x?xf32>
// TRANSPOSE-B: %[[B_DIM0:.*]] = tensor.dim %[[B]], %[[C0]] : tensor<?x?xf32>
// TRANSPOSE-B: %[[B_TRANSP_INIT:.*]] = tensor.empty(%[[B_DIM1]], %[[B_DIM0]]) : tensor<?x?xf32>
// TRANSPOSE-B: %[[B_TRANSP:.*]] = linalg.transpose ins(%[[B]] : tensor<?x?xf32>) outs(%[[B_TRANSP_INIT]] : tensor<?x?xf32>) permutation = [1, 0]
-// TRANSPOSE-B: %[[C:.*]] = linalg.matmul_transpose_b ins(%[[A]], %[[B_TRANSP]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+// TRANSPOSE-B: %[[C:.*]] = linalg.matmul indexing_maps = [#[[$MA]], #[[$MB]], #[[$MC]]] ins(%[[A]], %[[B_TRANSP]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?xf32>) -> tensor<?x?xf32>
// CHECK: return %[[C]] : tensor<?x?xf32>
// CHECK: }
func.func @matmul_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>) -> (tensor<?x?xf32>) {
@@ -69,10 +83,10 @@ func.func @matmul_dynamic(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>) -> (tensor<?
// CHECK: %[[C_ZERO:.*]] = linalg.fill ins(%[[C0_F32]] : f32) outs(%[[C_INIT]] : tensor<?x16xf32>) -> tensor<?x16xf32>
// TRANSPOSE-A: %[[A_TRANSP_INIT:.*]] = tensor.empty(%[[A_DIM0]]) : tensor<8x?xf32>
// TRANSPOSE-A: %[[A_TRANSP:.*]] = linalg.transpose ins(%[[A]] : tensor<?x8xf32>) outs(%[[A_TRANSP_INIT]] : tensor<8x?xf32>) permutation = [1, 0]
-// TRANSPOSE-A: %[[B0:.*]] = linalg.matmul_transpose_a ins(%[[A_TRANSP]], %[[B]] : tensor<8x?xf32>, tensor<8x16xf32>) outs(%[[C_ZERO]] : tensor<?x16xf32>) -> tensor<?x16xf32>
+// TRANSPOSE-A: %[[B0:.*]] = linalg.matmul indexing_maps = [#[[$MA]], #[[$MB]], #[[$MC]]] ins(%[[A_TRANSP]], %[[B]] : tensor<8x?xf32>, tensor<8x16xf32>) outs(%[[C_ZERO]] : tensor<?x16xf32>) -> tensor<?x16xf32>
// TRANSPOSE-B: %[[B_TRANSP_INIT:.*]] = tensor.empty() : tensor<16x8xf32>
// TRANSPOSE-B: %[[B_TRANSP:.*]] = linalg.transpose ins(%[[B]] : tensor<8x16xf32>) outs(%[[B_TRANSP_INIT]] : tensor<16x8xf32>) permutation = [1, 0]
-// TRANSPOSE-B: %[[B0:.*]] = linalg.matmul_transpose_b ins(%[[A]], %[[B_TRANSP]] : tensor<?x8xf32>, tensor<16x8xf32>) outs(%[[C_ZERO]] : tensor<?x16xf32>) -> tensor<?x16xf32>
+// TRANSPOSE-B: %[[B0:.*]] = linalg.matmul indexing_maps = [#[[$MA]], #[[$MB]], #[[$MC]]] ins(%[[A]], %[[B_TRANSP]] : tensor<?x8xf32>, tensor<16x8xf32>) outs(%[[C_ZERO]] : tensor<?x16xf32>) -> tensor<?x16xf32>
// CHECK: return %[[B0]] : tensor<?x16xf32>
// CHECK: }
func.func @matmul_mixed(%A: tensor<?x8xf32>, %B: tensor<8x16xf32>) -> (tensor<?x16xf32>) {
@@ -96,10 +110,10 @@ func.func @matmul_mixed(%A: tensor<?x8xf32>, %B: tensor<8x16xf32>) -> (tensor<?x
// CHECK: %[[C_ZERO:.*]] = linalg.fill ins(%[[C0_F32]] : f32) outs(%[[C_INIT]] : tensor<2x16x16xf32>) -> tensor<2x16x16xf32>
// TRANSPOSE-A: %[[A_TRANSP_INIT:.*]] = tensor.empty() : tensor<2x8x16xf32>
// TRANSPOSE-A: %[[A_TRANSP:.*]] = linalg.transpose ins(%[[A]] : tensor<2x16x8xf32>) outs(%[[A_TRANSP_INIT]] : tensor<2x8x16xf32>) permutation = [0, 2, 1]
-// TRANSPOSE-A: %[[C:.*]] = linalg.batch_matmul_transpose_a ins(%[[A_TRANSP]], %[[B]] : tensor<2x8x16xf32>, tensor<2x8x16xf32>) outs(%[[C_ZERO]] : tensor<2x16x16xf32>) -> tensor<2x16x16xf32>
+// TRANSPOSE-A: %[[C:.*]] = linalg.batch_matmul indexing_maps = [#[[$BMA]], #[[$BMB]], #[[$BMC]]] ins(%[[A_TRANSP]], %[[B]] : tensor<2x8x16xf32>, tensor<2x8x16xf32>) outs(%[[C_ZERO]] : tensor<2x16x16xf32>) -> tensor<2x16x16xf32>
// TRANSPOSE-B: %[[B_TRANSP_INIT:.*]] = tensor.empty() : tensor<2x16x8xf32>
// TRANSPOSE-B: %[[B_TRANSP:.*]] = linalg.transpose ins(%[[B]] : tensor<2x8x16xf32>) outs(%[[B_TRANSP_INIT]] : tensor<2x16x8xf32>) permutation = [0, 2, 1]
-// TRANSPOSE-B: %[[C:.*]] = linalg.batch_matmul_transpose_b ins(%[[A]], %[[B_TRANSP]] : tensor<2x16x8xf32>, tensor<2x16x8xf32>) outs(%[[C_ZERO]] : tensor<2x16x16xf32>) -> tensor<2x16x16xf32>
+// TRANSPOSE-B: %[[C:.*]] = linalg.batch_matmul indexing_maps = [#[[$BMA]], #[[$BMB]], #[[$BMC]]] ins(%[[A]], %[[B_TRANSP]] : tensor<2x16x8xf32>, tensor<2x16x8xf32>) outs(%[[C_ZERO]] : tensor<2x16x16xf32>) -> tensor<2x16x16xf32>
// CHECK: return %[[C]] : tensor<2x16x16xf32>
// CHECK: }
func.func @batch_matmul_static(%A: tensor<2x16x8xf32>, %B: tensor<2x8x16xf32>) -> (tensor<2x16x16xf32>) {
@@ -127,12 +141,12 @@ func.func @batch_matmul_static(%A: tensor<2x16x8xf32>, %B: tensor<2x8x16xf32>) -
// TRANSPOSE-A: %[[A_DIM2:.*]] = tensor.dim %[[A]], %[[C2]] : tensor<?x?x?xf32>
// TRANSPOSE-A: %[[A_TRANSP_INIT:.*]] = tensor.empty(%[[A_DIM0]], %[[A_DIM2]], %[[A_DIM1]]) : tensor<?x?x?xf32>
// TRANSPOSE-A: %[[A_TRANSP:.*]] = linalg.transpose ins(%[[A]] : tensor<?x?x?xf32>) outs(%[[A_TRANSP_INIT]] : tensor<?x?x?xf32>) permutation = [0, 2, 1]
-// TRANSPOSE-A: %[[C:.*]] = linalg.batch_matmul_transpose_a ins(%[[A_TRANSP]], %[[B]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+// TRANSPOSE-A: %[[C:.*]] = linalg.batch_matmul indexing_maps = [#[[$BMA]], #[[$BMB]], #[[$BMC]]] ins(%[[A_TRANSP]], %[[B]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
// TRANSPOSE-B: %[[B_DIM0:.*]] = tensor.dim %[[B]], %[[C0]] : tensor<?x?x?xf32>
// TRANSPOSE-B: %[[B_DIM1:.*]] = tensor.dim %[[B]], %[[C1]] : tensor<?x?x?xf32>
// TRANSPOSE-B: %[[B_TRANSP_INIT:.*]] = tensor.empty(%[[B_DIM0]], %[[B_DIM2]], %[[B_DIM1]]) : tensor<?x?x?xf32>
// TRANSPOSE-B: %[[B_TRANSP:.*]] = linalg.transpose ins(%[[B]] : tensor<?x?x?xf32>) outs(%[[B_TRANSP_INIT]] : tensor<?x?x?xf32>) permutation = [0, 2, 1]
-// TRANSPOSE-B: %[[C:.*]] = linalg.batch_matmul_transpose_b ins(%[[A]], %[[B_TRANSP]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+// TRANSPOSE-B: %[[C:.*]] = linalg.batch_matmul indexing_maps = [#[[$BMA]], #[[$BMB]], #[[$BMC]]] ins(%[[A]], %[[B_TRANSP]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[C_ZERO]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
// CHECK: return %[[C]] : tensor<?x?x?xf32>
// CHECK: }
func.func @batch_matmul_dynamic(%A: tensor<?x?x?xf32>, %B: tensor<?x?x?xf32>) -> (tensor<?x?x?xf32>) {
@@ -161,10 +175,10 @@ func.func @batch_matmul_dynamic(%A: tensor<?x?x?xf32>, %B: tensor<?x?x?xf32>) ->
// CHECK: %[[C_ZERO:.*]] = linalg.fill ins(%[[C0_F32]] : f32) outs(%[[C_INIT]] : tensor<2x?x16xf32>) -> tensor<2x?x16xf32>
// TRANSPOSE-A: %[[A_TRANSP_INIT:.*]] = tensor.empty(%[[A_DIM1]]) : tensor<2x8x?xf32>
// TRANSPOSE-A: %[[A_TRANSP:.*]] = linalg.transpose ins(%[[A]] : tensor<2x?x8xf32>) outs(%[[A_TRANSP_INIT]] : tensor<2x8x?xf32>) permutation = [0, 2, 1]
-// TRANSPOSE-A: %[[B0:.*]] = linalg.batch_matmul_transpose_a ins(%[[A_TRANSP]], %[[B]] : tensor<2x8x?xf32>, tensor<2x8x16xf32>) outs(%[[C_ZERO]] : tensor<2x?x16xf32>) -> tensor<2x?x16xf32>
+// TRANSPOSE-A: %[[B0:.*]] = linalg.batch_matmul indexing_maps = [#[[$BMA]], #[[$BMB]], #[[$BMC]]] ins(%[[A_TRANSP]], %[[B]] : tensor<2x8x?xf32>, tensor<2x8x16xf32>) outs(%[[C_ZERO]] : tensor<2x?x16xf32>) -> tensor<2x?x16xf32>
// TRANSPOSE-B: %[[B_TRANSP_INIT:.*]] = tensor.empty() : tensor<2x16x8xf32>
// TRANSPOSE-B: %[[B_TRANSP:.*]] = linalg.transpose ins(%[[B]] : tensor<2x8x16xf32>) outs(%[[B_TRANSP_INIT]] : tensor<2x16x8xf32>) permutation = [0, 2, 1]
-// TRANSPOSE-B: %[[B0:.*]] = linalg.batch_matmul_transpose_b ins(%[[A]], %[[B_TRANSP]] : tensor<2x?x8xf32>, tensor<2x16x8xf32>) outs(%[[C_ZERO]] : tensor<2x?x16xf32>) -> tensor<2x?x16xf32>
+// TRANSPOSE-B: %[[B0:.*]] = linalg.batch_matmul indexing_maps = [#[[$BMA]], #[[$BMB]], #[[$BMC]]] ins(%[[A]], %[[B_TRANSP]] : tensor<2x?x8xf32>, tensor<2x16x8xf32>) outs(%[[C_ZERO]] : tensor<2x?x16xf32>) -> tensor<2x?x16xf32>
// CHECK: return %[[B0]] : tensor<2x?x16xf32>
// CHECK: }
func.func @batch_matmul_mixed(%A: tensor<2x?x8xf32>, %B: tensor<2x8x16xf32>) -> (tensor<2x?x16xf32>) {
diff --git a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
index 4eeae4c..25cbceb 100644
--- a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
@@ -61,6 +61,83 @@ module attributes {transform.with_named_sequence} {
// -----
+// CHECK-LABEL: @float_mixed_precision_matmul
+// CHECK-COUNT-3: vector.transfer_read
+// CHECK-NOT: arith.extf
+// CHECK: vector.contract {{.*}} : vector<1584x1584xbf16>, vector<1584x1584xbf16> into vector<1584x1584xf32>
+func.func @float_mixed_precision_matmul(%A: memref<1584x1584xbf16>, %B: memref<1584x1584xbf16>, %C: memref<1584x1584xf32>) {
+ linalg.matmul ins(%A, %B: memref<1584x1584xbf16>, memref<1584x1584xbf16>)
+ outs(%C: memref<1584x1584xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { fold_type_extensions_into_contract } : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @vectorization_test_2
+func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
+ %C: memref<8x32xf32>) {
+ // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32>
+ // CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [2] : vector<8x32x16xf32> to vector<8x32xf32>
+ linalg.matmul
+ ins(%A, %B: memref<8x16xf32>, memref<16x32xf32>)
+ outs(%C: memref<8x32xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @matmul_tensors
+// CHECK-SAME: (%[[ARG0:.*]]: tensor<8x4xf32>, %[[ARG1:.*]]: tensor<4x12xf32>,
+// CHECK-SAME: %[[ARG2:.*]]: tensor<8x12xf32>) -> tensor<8x12xf32>
+func.func @matmul_tensors(
+ %arg0: tensor<8x4xf32>, %arg1: tensor<4x12xf32>, %arg2: tensor<8x12xf32>)
+ -> tensor<8x12xf32> {
+ // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+ // CHECK-DAG: %[[V0:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x4xf32>, vector<8x12x4xf32>
+ // CHECK-DAG: %[[V1:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x12xf32>, vector<8x12x4xf32>
+ // CHECK-DAG: %[[V2:.*]] = vector.transfer_read %[[ARG2]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x12xf32>, vector<8x12xf32>
+ //
+ // linalg matmul lowers gets expanded to a 3D reduction, canonicalization later
+ // convert it to a 2D contract.
+ // CHECK: %[[MUL:.*]] = arith.mulf %[[V0]], %[[V1]] : vector<8x12x4xf32>
+ // CHECK: %[[R:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[V2]] [2] : vector<8x12x4xf32> to vector<8x12xf32>
+ // CHECK: %[[W:.*]] = vector.transfer_write %[[R]], %[[ARG2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x12xf32>, tensor<8x12xf32>
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<8x4xf32>, tensor<4x12xf32>)
+ outs(%arg2: tensor<8x12xf32>)
+ -> tensor<8x12xf32>
+ // CHECK: return %[[W]] : tensor<8x12xf32>
+ return %0 : tensor<8x12xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
// CHECK-LABEL: contraction_batch_matmul
func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1584x1584xf32>, %C: memref<1584x1584x1584xf32>) {
// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584x1584xf32>
@@ -115,6 +192,265 @@ module attributes {transform.with_named_sequence} {
// -----
+// CHECK-LABEL: @float_mixed_precision_matmul_as_contract
+// CHECK-COUNT-3: vector.transfer_read
+// CHECK-NOT: arith.extf
+// CHECK: vector.contract {{.*}} : vector<24x12xbf16>, vector<12x25xbf16> into vector<24x25xf32>
+// CHECK: vector.transfer_write
+func.func @float_mixed_precision_matmul_as_contract(%A: tensor<24x12xbf16>,
+ %B: tensor<12x25xbf16>,
+ %C: tensor<24x25xf32>) -> tensor<24x25xf32> {
+ %0 = linalg.contract
+ indexing_maps = [affine_map<(m, n, k) -> (m, k)>,
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>]
+ ins(%A, %B : tensor<24x12xbf16>, tensor<12x25xbf16>)
+ outs(%C : tensor<24x25xf32>) -> tensor<24x25xf32>
+ func.return %0 : tensor<24x25xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.contract"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { fold_type_extensions_into_contract } : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @test_vectorize_fill
+func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
+ // CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32>
+ // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
+ linalg.fill ins(%arg0 : f32) outs(%A : memref<8x16xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @test_vectorize_fill
+func.func @test_vectorize_fill_0d(%A : memref<f32>, %arg0 : f32) {
+ // CHECK-SAME: (%[[M:.*]]: memref<f32>, %[[val:.*]]: f32)
+ // CHECK: %[[VEC:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
+ // CHECK: vector.transfer_write %[[VEC]], %[[M]][] : vector<f32>, memref<f32>
+ linalg.fill ins(%arg0 : f32) outs(%A : memref<f32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @test_vectorize_copy
+func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
+ // CHECK: %[[V:.*]] = vector.transfer_read {{.*}} : memref<8x16xf32>, vector<8x16xf32>
+ // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
+ memref.copy %A, %B : memref<8x16xf32> to memref<8x16xf32>
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @test_vectorize_copy_0d
+func.func @test_vectorize_copy_0d(%A : memref<f32>, %B : memref<f32>) {
+ // CHECK-SAME: (%[[A:.*]]: memref<f32>, %[[B:.*]]: memref<f32>)
+ // CHECK: %[[V:.*]] = vector.transfer_read %[[A]][]{{.*}} : memref<f32>, vector<f32>
+ // CHECK: %[[val:.*]] = vector.extract %[[V]][] : f32 from vector<f32>
+ // CHECK: %[[VV:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
+ // CHECK: vector.transfer_write %[[VV]], %[[B]][] : vector<f32>, memref<f32>
+ memref.copy %A, %B : memref<f32> to memref<f32>
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func @test_vectorize_copy_complex
+// CHECK-NOT: vector<
+func.func @test_vectorize_copy_complex(%A : memref<8x16xcomplex<f32>>, %B : memref<8x16xcomplex<f32>>) {
+ memref.copy %A, %B : memref<8x16xcomplex<f32>> to memref<8x16xcomplex<f32>>
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// Input identical as the test in vectorization.mlir. Output is different -
+// vector sizes are inferred (rather than user-specified) and hence _no_
+// masking was used.
+
+func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
+ %pack = linalg.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32>
+ return %pack : tensor<4x1x32x16x2xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// CHECK-LABEL: func.func @test_vectorize_pack(
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x8x16xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32>
+// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
+// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32>
+// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<4x1x32x16x2xf32>
+// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<4x1x32x16x2xf32>, tensor<4x1x32x16x2xf32>
+// CHECK: return %[[VAL_8]] : tensor<4x1x32x16x2xf32>
+
+// -----
+
+func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
+ %pad = arith.constant 0.000000e+00 : f32
+ %pack = linalg.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32>
+ return %pack : tensor<32x4x1x16x2xf32>
+}
+
+// CHECK-LABEL: func.func @test_vectorize_padded_pack(
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x7x15xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, false, false]} : tensor<32x7x15xf32>, vector<32x8x16xf32>
+// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
+// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [0, 1, 3, 4, 2] : vector<32x4x2x1x16xf32> to vector<32x4x1x16x2xf32>
+// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<32x4x1x16x2xf32>
+// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<32x4x1x16x2xf32>, tensor<32x4x1x16x2xf32>
+// CHECK: return %[[VAL_8]] : tensor<32x4x1x16x2xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @vectorize_map(%arg0: memref<64xf32>,
+ %arg1: memref<64xf32>, %arg2: memref<64xf32>) {
+ linalg.map ins(%arg0, %arg1 : memref<64xf32>, memref<64xf32>)
+ outs(%arg2 : memref<64xf32>)
+ (%in: f32, %in_0: f32) {
+ %0 = arith.addf %in, %in_0 : f32
+ linalg.yield %0 : f32
+ }
+ return
+}
+// CHECK-LABEL: func @vectorize_map
+// CHECK: %[[LHS:.*]] = vector.transfer_read
+// CHECK-NEXT: %[[RHS:.*]] = vector.transfer_read
+// CHECK-NEXT: arith.addf %[[LHS]], %[[RHS]] : vector<64xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @vectorize_transpose(%arg0: memref<16x32x64xf32>,
+ %arg1: memref<32x64x16xf32>) {
+ linalg.transpose ins(%arg0 : memref<16x32x64xf32>)
+ outs(%arg1 : memref<32x64x16xf32>) permutation = [1, 2, 0]
+ return
+}
+// CHECK-LABEL: func @vectorize_transpose
+// CHECK: vector.transpose
+// CHECK-SAME: [1, 2, 0] : vector<16x32x64xf32> to vector<32x64x16xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @vectorize_reduce(%arg0: memref<16x32x64xf32>,
+ %arg1: memref<16x64xf32>) {
+ linalg.reduce ins(%arg0 : memref<16x32x64xf32>)
+ outs(%arg1 : memref<16x64xf32>) dimensions = [1]
+ (%in: f32, %init: f32) {
+ %0 = arith.addf %in, %init : f32
+ linalg.yield %0 : f32
+ }
+ return
+}
+// CHECK-LABEL: func @vectorize_reduce
+// CHECK: vector.multi_reduction <add>
+// CHECK-SAME: : vector<16x32x64xf32> to vector<16x64xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
#matmul_trait = {
indexing_maps = [
affine_map<(m, n, k) -> (m, k)>,
@@ -306,27 +642,6 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: func @vectorization_test_2
-func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
- %C: memref<8x32xf32>) {
- // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32>
- // CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [2] : vector<8x32x16xf32> to vector<8x32xf32>
- linalg.matmul
- ins(%A, %B: memref<8x16xf32>, memref<16x32xf32>)
- outs(%C: memref<8x32xf32>)
- return
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
// CHECK-LABEL: func @test_vectorize_scalar_input
func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
@@ -427,104 +742,6 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: func @test_vectorize_fill
-func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
- // CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32>
- // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
- linalg.fill ins(%arg0 : f32) outs(%A : memref<8x16xf32>)
- return
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @test_vectorize_fill
-func.func @test_vectorize_fill_0d(%A : memref<f32>, %arg0 : f32) {
- // CHECK-SAME: (%[[M:.*]]: memref<f32>, %[[val:.*]]: f32)
- // CHECK: %[[VEC:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
- // CHECK: vector.transfer_write %[[VEC]], %[[M]][] : vector<f32>, memref<f32>
- linalg.fill ins(%arg0 : f32) outs(%A : memref<f32>)
- return
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @test_vectorize_copy
-func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
- // CHECK: %[[V:.*]] = vector.transfer_read {{.*}} : memref<8x16xf32>, vector<8x16xf32>
- // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
- memref.copy %A, %B : memref<8x16xf32> to memref<8x16xf32>
- return
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @test_vectorize_copy_0d
-func.func @test_vectorize_copy_0d(%A : memref<f32>, %B : memref<f32>) {
- // CHECK-SAME: (%[[A:.*]]: memref<f32>, %[[B:.*]]: memref<f32>)
- // CHECK: %[[V:.*]] = vector.transfer_read %[[A]][]{{.*}} : memref<f32>, vector<f32>
- // CHECK: %[[val:.*]] = vector.extract %[[V]][] : f32 from vector<f32>
- // CHECK: %[[VV:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
- // CHECK: vector.transfer_write %[[VV]], %[[B]][] : vector<f32>, memref<f32>
- memref.copy %A, %B : memref<f32> to memref<f32>
- return
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-// CHECK-LABEL: func @test_vectorize_copy_complex
-// CHECK-NOT: vector<
-func.func @test_vectorize_copy_complex(%A : memref<8x16xcomplex<f32>>, %B : memref<8x16xcomplex<f32>>) {
- memref.copy %A, %B : memref<8x16xcomplex<f32>> to memref<8x16xcomplex<f32>>
- return
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
// CHECK-LABEL: func @test_vectorize_trailing_index
// CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>)
func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
@@ -855,40 +1072,6 @@ module attributes {transform.with_named_sequence} {
// -----
-// CHECK-LABEL: func @matmul_tensors
-// CHECK-SAME: (%[[ARG0:.*]]: tensor<8x4xf32>, %[[ARG1:.*]]: tensor<4x12xf32>,
-// CHECK-SAME: %[[ARG2:.*]]: tensor<8x12xf32>) -> tensor<8x12xf32>
-func.func @matmul_tensors(
- %arg0: tensor<8x4xf32>, %arg1: tensor<4x12xf32>, %arg2: tensor<8x12xf32>)
- -> tensor<8x12xf32> {
- // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
- // CHECK-DAG: %[[V0:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x4xf32>, vector<8x12x4xf32>
- // CHECK-DAG: %[[V1:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x12xf32>, vector<8x12x4xf32>
- // CHECK-DAG: %[[V2:.*]] = vector.transfer_read %[[ARG2]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x12xf32>, vector<8x12xf32>
- //
- // linalg matmul lowers gets expanded to a 3D reduction, canonicalization later
- // convert it to a 2D contract.
- // CHECK: %[[MUL:.*]] = arith.mulf %[[V0]], %[[V1]] : vector<8x12x4xf32>
- // CHECK: %[[R:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[V2]] [2] : vector<8x12x4xf32> to vector<8x12xf32>
- // CHECK: %[[W:.*]] = vector.transfer_write %[[R]], %[[ARG2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x12xf32>, tensor<8x12xf32>
- %0 = linalg.matmul ins(%arg0, %arg1: tensor<8x4xf32>, tensor<4x12xf32>)
- outs(%arg2: tensor<8x12xf32>)
- -> tensor<8x12xf32>
- // CHECK: return %[[W]] : tensor<8x12xf32>
- return %0 : tensor<8x12xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
// CHECK-LABEL: func @sum_exp
func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
-> tensor<4x16xf32>
@@ -914,7 +1097,6 @@ func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
return %0 : tensor<4x16xf32>
}
-
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
@@ -993,7 +1175,6 @@ func.func @red_maximumf_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
return %red : tensor<4xf32>
}
-
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
@@ -1428,78 +1609,6 @@ module attributes {transform.with_named_sequence} {
// -----
-func.func @vectorize_map(%arg0: memref<64xf32>,
- %arg1: memref<64xf32>, %arg2: memref<64xf32>) {
- linalg.map ins(%arg0, %arg1 : memref<64xf32>, memref<64xf32>)
- outs(%arg2 : memref<64xf32>)
- (%in: f32, %in_0: f32) {
- %0 = arith.addf %in, %in_0 : f32
- linalg.yield %0 : f32
- }
- return
-}
-// CHECK-LABEL: func @vectorize_map
-// CHECK: %[[LHS:.*]] = vector.transfer_read
-// CHECK-NEXT: %[[RHS:.*]] = vector.transfer_read
-// CHECK-NEXT: arith.addf %[[LHS]], %[[RHS]] : vector<64xf32>
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-func.func @vectorize_transpose(%arg0: memref<16x32x64xf32>,
- %arg1: memref<32x64x16xf32>) {
- linalg.transpose ins(%arg0 : memref<16x32x64xf32>)
- outs(%arg1 : memref<32x64x16xf32>) permutation = [1, 2, 0]
- return
-}
-// CHECK-LABEL: func @vectorize_transpose
-// CHECK: vector.transpose
-// CHECK-SAME: [1, 2, 0] : vector<16x32x64xf32> to vector<32x64x16xf32>
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
-func.func @vectorize_reduce(%arg0: memref<16x32x64xf32>,
- %arg1: memref<16x64xf32>) {
- linalg.reduce ins(%arg0 : memref<16x32x64xf32>)
- outs(%arg1 : memref<16x64xf32>) dimensions = [1]
- (%in: f32, %init: f32) {
- %0 = arith.addf %in, %init : f32
- linalg.yield %0 : f32
- }
- return
-}
-// CHECK-LABEL: func @vectorize_reduce
-// CHECK: vector.multi_reduction <add>
-// CHECK-SAME: : vector<16x32x64xf32> to vector<16x64xf32>
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
// This is a regression test. This IR cannot be vectorized, but
// structured.vectorize_children_and_apply_patterns should nevertheless succeed.
@@ -1715,65 +1824,77 @@ module attributes {transform.with_named_sequence} {
// -----
-// Input identical as the test in vectorization.mlir. Output is different -
-// vector sizes are inferred (rather than user-specified) and hence _no_
-// masking was used.
-
-func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
- %pack = linalg.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32>
- return %pack : tensor<4x1x32x16x2xf32>
+// CHECK-LABEL: func @float_mixed_precision_matmul_as_generic
+// CHECK-COUNT-3: vector.transfer_read
+// CHECK-NOT: arith.extf
+// CHECK: vector.contract {{.*}} : vector<8x16xbf16>, vector<16x32xbf16> into vector<8x32xf32>
+// CHECK: vector.transfer_write
+func.func @float_mixed_precision_matmul_as_generic(%A: memref<8x16xbf16>, %B: memref<16x32xbf16>,
+ %C: memref<8x32xf32>) {
+ linalg.generic {
+ indexing_maps = [
+ affine_map<(m, n, k) -> (m, k)>,
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>
+ ],
+ iterator_types = ["parallel", "parallel", "reduction"]
+ }
+ ins(%A, %B : memref<8x16xbf16>, memref<16x32xbf16>)
+ outs(%C : memref<8x32xf32>) {
+ ^bb(%in: bf16, %in_0: bf16, %c: f32) :
+ %a = arith.extf %in : bf16 to f32
+ %b = arith.extf %in_0 : bf16 to f32
+ %d = arith.mulf %a, %b: f32
+ %e = arith.addf %c, %d: f32
+ linalg.yield %e : f32
+ }
+ return
}
module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { fold_type_extensions_into_contract } : (!transform.any_op) -> !transform.any_op
transform.yield
}
}
-// CHECK-LABEL: func.func @test_vectorize_pack(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x8x16xf32>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
-// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
-// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32>
-// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
-// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32>
-// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<4x1x32x16x2xf32>
-// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<4x1x32x16x2xf32>, tensor<4x1x32x16x2xf32>
-// CHECK: return %[[VAL_8]] : tensor<4x1x32x16x2xf32>
-
// -----
-// Input identical as the test in vectorization.mlir. Output is different -
-// vector sizes are inferred (rather than user-specified) and hence _no_
-// masking was used.
-
-func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
- %pad = arith.constant 0.000000e+00 : f32
- %pack = linalg.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32>
- return %pack : tensor<32x4x1x16x2xf32>
+// CHECK-LABEL: func @integer_mixed_precision_matmul_as_generic
+// CHECK-COUNT-3: vector.transfer_read
+// CHECK-NOT: arith.extsi
+// CHECK: vector.contract {{.*}} : vector<8x16xi8>, vector<16x32xi8> into vector<8x32xi32>
+// CHECK: vector.transfer_write
+func.func @integer_mixed_precision_matmul_as_generic(%A: memref<8x16xi8>, %B: memref<16x32xi8>,
+ %C: memref<8x32xi32>) {
+ linalg.generic {
+ indexing_maps = [
+ affine_map<(m, n, k) -> (m, k)>,
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>
+ ],
+ iterator_types = ["parallel", "parallel", "reduction"]
+ }
+ ins(%A, %B : memref<8x16xi8>, memref<16x32xi8>)
+ outs(%C : memref<8x32xi32>) {
+ ^bb(%in: i8, %in_0: i8, %c: i32) :
+ %a = arith.extsi %in : i8 to i32
+ %b = arith.extsi %in_0 : i8 to i32
+ %d = arith.muli %a, %b: i32
+ %e = arith.addi %c, %d: i32
+ linalg.yield %e : i32
+ }
+ return
}
-// CHECK-LABEL: func.func @test_vectorize_padded_pack(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x7x15xf32>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
-// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
-// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, false, false]} : tensor<32x7x15xf32>, vector<32x8x16xf32>
-// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
-// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [0, 1, 3, 4, 2] : vector<32x4x2x1x16xf32> to vector<32x4x1x16x2xf32>
-// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<32x4x1x16x2xf32>
-// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<32x4x1x16x2xf32>, tensor<32x4x1x16x2xf32>
-// CHECK: return %[[VAL_8]] : tensor<32x4x1x16x2xf32>
-
module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
- %0 = transform.structured.match ops{["linalg.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
- %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { fold_type_extensions_into_contract } : (!transform.any_op) -> !transform.any_op
transform.yield
}
}
+
diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir
index f86fb38..4a7176e 100644
--- a/mlir/test/Dialect/Vector/canonicalize.mlir
+++ b/mlir/test/Dialect/Vector/canonicalize.mlir
@@ -1168,6 +1168,106 @@ func.func @canonicalize_broadcast_shapecast_both_possible(%arg0: vector<1xf32>)
// -----
+// CHECK-LABEL: func @canonicalize_shapecast_broadcast_to_broadcast_prepend_dim
+// CHECK-NOT: vector.shape_cast
+// CHECK: vector.broadcast {{.+}} : vector<2xf32> to vector<32x2xf32>
+func.func @canonicalize_shapecast_broadcast_to_broadcast_prepend_dim(%arg0 : vector<2xf32>) -> vector<32x2xf32> {
+ %0 = vector.shape_cast %arg0 : vector<2xf32> to vector<1x2xf32>
+ %1 = vector.broadcast %0 : vector<1x2xf32> to vector<32x2xf32>
+ return %1 : vector<32x2xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @canonicalize_shapecast_broadcast_to_broadcast_prepend_dim2(
+// CHECK-SAME: %[[ARG0:.*]]: vector<2x1xf32>) -> vector<32x2x1xf32> {
+// CHECK: %[[VAL_0:.*]] = vector.broadcast %[[ARG0]] : vector<2x1xf32> to vector<32x2x1xf32>
+// CHECK: return %[[VAL_0]] : vector<32x2x1xf32>
+// CHECK: }
+func.func @canonicalize_shapecast_broadcast_to_broadcast_prepend_dim2(%arg0 : vector<2x1xf32>) -> vector<32x2x1xf32> {
+ %0 = vector.shape_cast %arg0 : vector<2x1xf32> to vector<1x2x1xf32>
+ %1 = vector.broadcast %0 : vector<1x2x1xf32> to vector<32x2x1xf32>
+ return %1 : vector<32x2x1xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @canonicalize_shapecast_broadcast_to_broadcast_prepend_dim3(
+// CHECK-SAME: %[[ARG0:.*]]: vector<2x1xf32>) -> vector<32x2x4xf32> {
+// CHECK: %[[VAL_0:.*]] = vector.broadcast %[[ARG0]] : vector<2x1xf32> to vector<32x2x4xf32>
+// CHECK: return %[[VAL_0]] : vector<32x2x4xf32>
+// CHECK: }
+func.func @canonicalize_shapecast_broadcast_to_broadcast_prepend_dim3(%arg0 : vector<2x1xf32>) -> vector<32x2x4xf32> {
+ %0 = vector.shape_cast %arg0 : vector<2x1xf32> to vector<1x2x1xf32>
+ %1 = vector.broadcast %0 : vector<1x2x1xf32> to vector<32x2x4xf32>
+ return %1 : vector<32x2x4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @canonicalize_shapecast_broadcast_to_broadcast_remove_leading_dim(
+// CHECK-SAME: %[[ARG0:.*]]: vector<1x2xf32>) -> vector<32x2xf32> {
+// CHECK: %[[VAL_0:.*]] = vector.broadcast %[[ARG0]] : vector<1x2xf32> to vector<32x2xf32>
+// CHECK: return %[[VAL_0]] : vector<32x2xf32>
+// CHECK: }
+func.func @canonicalize_shapecast_broadcast_to_broadcast_remove_leading_dim(%arg0 : vector<1x2xf32>) -> vector<32x2xf32> {
+ %0 = vector.shape_cast %arg0 : vector<1x2xf32> to vector<2xf32>
+ %1 = vector.broadcast %0 : vector<2xf32> to vector<32x2xf32>
+ return %1 : vector<32x2xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @negative_canonicalize_shapecast_broadcast_invalid_shape
+// CHECK: vector.shape_cast {{.+}} : vector<64xf32> to vector<4x16xf32>
+// CHECK: vector.broadcast {{.+}} : vector<4x16xf32> to vector<2x4x16xf32>
+func.func @negative_canonicalize_shapecast_broadcast_invalid_shape(%arg0 : vector<64xf32>) -> vector<2x4x16xf32> {
+ %0 = vector.shape_cast %arg0 : vector<64xf32> to vector<4x16xf32>
+ %1 = vector.broadcast %0 : vector<4x16xf32> to vector<2x4x16xf32>
+ return %1 : vector<2x4x16xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @negative_canonicalize_shapecast_broadcast_invalid_broadcasted_dims
+// CHECK: vector.shape_cast {{.+}} : vector<2x1xf32> to vector<1x2xf32>
+// CHECK: vector.broadcast {{.+}} : vector<1x2xf32> to vector<2x2xf32>
+func.func @negative_canonicalize_shapecast_broadcast_invalid_broadcasted_dims(%arg0 : vector<2x1xf32>) -> vector<2x2xf32> {
+ %0 = vector.shape_cast %arg0 : vector<2x1xf32> to vector<1x2xf32>
+ %1 = vector.broadcast %0 : vector<1x2xf32> to vector<2x2xf32>
+ return %1 : vector<2x2xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @negative_canonicalize_shapecast_broadcast_to_broadcast_append_dim(
+// CHECK-SAME: %[[ARG0:.*]]: vector<2xf32>) -> vector<2x4xf32> {
+// CHECK: %[[VAL_0:.*]] = vector.shape_cast %[[ARG0]] : vector<2xf32> to vector<2x1xf32>
+// CHECK: %[[VAL_1:.*]] = vector.broadcast %[[VAL_0]] : vector<2x1xf32> to vector<2x4xf32>
+// CHECK: return %[[VAL_1]] : vector<2x4xf32>
+// CHECK: }
+func.func @negative_canonicalize_shapecast_broadcast_to_broadcast_append_dim(%arg0 : vector<2xf32>) -> vector<2x4xf32> {
+ %0 = vector.shape_cast %arg0 : vector<2xf32> to vector<2x1xf32>
+ %1 = vector.broadcast %0 : vector<2x1xf32> to vector<2x4xf32>
+ return %1 : vector<2x4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @negative_canonicalize_shapecast_broadcast_to_broadcast_remove_trailing_dim(
+// CHECK-SAME: %[[ARG0:.*]]: vector<2x1xf32>) -> vector<32x2xf32> {
+// CHECK: %[[VAL_0:.*]] = vector.shape_cast %[[ARG0]] : vector<2x1xf32> to vector<2xf32>
+// CHECK: %[[VAL_1:.*]] = vector.broadcast %[[VAL_0]] : vector<2xf32> to vector<32x2xf32>
+// CHECK: return %[[VAL_1]] : vector<32x2xf32>
+// CHECK: }
+func.func @negative_canonicalize_shapecast_broadcast_to_broadcast_remove_trailing_dim(%arg0 : vector<2x1xf32>) -> vector<32x2xf32> {
+ %0 = vector.shape_cast %arg0 : vector<2x1xf32> to vector<2xf32>
+ %1 = vector.broadcast %0 : vector<2xf32> to vector<32x2xf32>
+ return %1 : vector<32x2xf32>
+}
+
+// -----
+
// CHECK-LABEL: fold_vector_transfer_masks
func.func @fold_vector_transfer_masks(%A: memref<?x?xf32>) -> (vector<4x8xf32>, vector<4x[4]xf32>) {
// CHECK: %[[C0:.+]] = arith.constant 0 : index
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index c21de56..211e16d 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1305,6 +1305,26 @@ func.func @store_memref_index_mismatch(%base : memref<?xf32>, %value : vector<16
// -----
+//===----------------------------------------------------------------------===//
+// vector.maskedload
+//===----------------------------------------------------------------------===//
+
+func.func @maskedload_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %pass: vector<1xi32>, %index: index) {
+ // expected-error@below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+ %val = vector.maskedload %base[%index], %mask, %pass { alignment = -1 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
+ return
+}
+
+// -----
+
+func.func @maskedload_nonpoweroftwo_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %pass: vector<1xi32>, %index: index) {
+ // expected-error@below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+ %val = vector.maskedload %base[%index], %mask, %pass { alignment = 3 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
+ return
+}
+
+// -----
+
func.func @maskedload_base_type_mismatch(%base: memref<?xf64>, %mask: vector<16xi1>, %pass: vector<16xf32>) {
%c0 = arith.constant 0 : index
// expected-error@+1 {{'vector.maskedload' op base and result element type should match}}
@@ -1336,6 +1356,26 @@ func.func @maskedload_memref_mismatch(%base: memref<?xf32>, %mask: vector<16xi1>
// -----
+//===----------------------------------------------------------------------===//
+// vector.maskedstore
+//===----------------------------------------------------------------------===//
+
+func.func @maskedstore_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %value: vector<1xi32>, %index: index) {
+ // expected-error@below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+ vector.maskedstore %base[%index], %mask, %value { alignment = -1 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
+ return
+}
+
+// -----
+
+func.func @maskedstore_nonpoweroftwo_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %value: vector<1xi32>, %index: index) {
+ // expected-error@below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+ vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
+ return
+}
+
+// -----
+
func.func @maskedstore_base_type_mismatch(%base: memref<?xf64>, %mask: vector<16xi1>, %value: vector<16xf32>) {
%c0 = arith.constant 0 : index
// expected-error@+1 {{'vector.maskedstore' op base and valueToStore element type should match}}
@@ -1912,8 +1952,7 @@ func.func @vector_load(%src : memref<?xi8>) {
// -----
-func.func @invalid_load_alignment(%memref: memref<4xi32>) {
- %c0 = arith.constant 0 : index
+func.func @invalid_load_alignment(%memref: memref<4xi32>, %c0: index) {
// expected-error @below {{'vector.load' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
%val = vector.load %memref[%c0] { alignment = -1 } : memref<4xi32>, vector<4xi32>
return
@@ -1921,6 +1960,14 @@ func.func @invalid_load_alignment(%memref: memref<4xi32>) {
// -----
+func.func @invalid_load_alignment(%memref: memref<4xi32>, %c0: index) {
+ // expected-error @below {{'vector.load' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+ %val = vector.load %memref[%c0] { alignment = 3 } : memref<4xi32>, vector<4xi32>
+ return
+}
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.store
//===----------------------------------------------------------------------===//
@@ -1934,8 +1981,15 @@ func.func @vector_store(%dest : memref<?xi8>, %vec : vector<16x16xi8>) {
// -----
-func.func @invalid_store_alignment(%memref: memref<4xi32>, %val: vector<4xi32>) {
- %c0 = arith.constant 0 : index
+func.func @invalid_store_alignment(%memref: memref<4xi32>, %val: vector<4xi32>, %c0: index) {
+ // expected-error @below {{'vector.store' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+ vector.store %val, %memref[%c0] { alignment = -1 } : memref<4xi32>, vector<4xi32>
+ return
+}
+
+// -----
+
+func.func @invalid_store_alignment(%memref: memref<4xi32>, %val: vector<4xi32>, %c0: index) {
// expected-error @below {{'vector.store' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
vector.store %val, %memref[%c0] { alignment = 3 } : memref<4xi32>, vector<4xi32>
return
diff --git a/mlir/test/Dialect/Vector/vector-sink.mlir b/mlir/test/Dialect/Vector/vector-sink.mlir
index ef881ba..577b06d 100644
--- a/mlir/test/Dialect/Vector/vector-sink.mlir
+++ b/mlir/test/Dialect/Vector/vector-sink.mlir
@@ -40,7 +40,7 @@ func.func @broadcast_scalar_with_bcast_scalable(%arg1: index, %arg2: index) -> v
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[ADD]] : index to vector<1x4xindex>
// CHECK: return %[[BCAST]] : vector<1x4xindex>
func.func @broadcast_scalar_with_bcast_and_splat(%arg1: index, %arg2: index) -> vector<1x4xindex> {
- %0 = vector.splat %arg1 : vector<1x4xindex>
+ %0 = vector.broadcast %arg1 : index to vector<1x4xindex>
%1 = vector.broadcast %arg2 : index to vector<1x4xindex>
%2 = arith.addi %0, %1 : vector<1x4xindex>
return %2 : vector<1x4xindex>
@@ -53,7 +53,7 @@ func.func @broadcast_scalar_with_bcast_and_splat(%arg1: index, %arg2: index) ->
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[ADD]] : index to vector<1x[4]xindex>
// CHECK: return %[[BCAST]] : vector<1x[4]xindex>
func.func @broadcast_scalar_with_bcast_and_splat_scalable(%arg1: index, %arg2: index) -> vector<1x[4]xindex> {
- %0 = vector.splat %arg1 : vector<1x[4]xindex>
+ %0 = vector.broadcast %arg1 : index to vector<1x[4]xindex>
%1 = vector.broadcast %arg2 : index to vector<1x[4]xindex>
%2 = arith.addi %0, %1 : vector<1x[4]xindex>
return %2 : vector<1x[4]xindex>
@@ -94,12 +94,12 @@ func.func @broadcast_vector_scalable(%arg1: vector<[4]xf32>, %arg2: vector<[4]xf
// CHECK-LABEL: func.func @broadcast_scalar_and_vec(
// CHECK-SAME: %[[ARG1:.*]]: index,
// CHECK-SAME: %[[ARG2:.*]]: vector<4xindex>) -> vector<1x4xindex> {
-// CHECK: %[[SPLAT:.*]] = vector.splat %[[ARG1]] : vector<1x4xindex>
+// CHECK: %[[SPLAT:.*]] = vector.broadcast %[[ARG1]] : index to vector<1x4xindex>
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[ARG2]] : vector<4xindex> to vector<1x4xindex>
// CHECK: %[[ADD:.*]] = arith.addi %[[SPLAT]], %[[BCAST]] : vector<1x4xindex>
// CHECK: return %[[ADD]] : vector<1x4xindex>
func.func @broadcast_scalar_and_vec(%arg1: index, %arg2: vector<4xindex>) -> vector<1x4xindex> {
- %0 = vector.splat %arg1 : vector<1x4xindex>
+ %0 = vector.broadcast %arg1 : index to vector<1x4xindex>
%1 = vector.broadcast %arg2 : vector<4xindex> to vector<1x4xindex>
%2 = arith.addi %0, %1 : vector<1x4xindex>
return %2 : vector<1x4xindex>
@@ -108,12 +108,12 @@ func.func @broadcast_scalar_and_vec(%arg1: index, %arg2: vector<4xindex>) -> vec
// CHECK-LABEL: func.func @broadcast_scalar_and_vec_scalable(
// CHECK-SAME: %[[ARG1:.*]]: index,
// CHECK-SAME: %[[ARG2:.*]]: vector<[4]xindex>) -> vector<1x[4]xindex> {
-// CHECK: %[[SPLAT:.*]] = vector.splat %[[ARG1]] : vector<1x[4]xindex>
+// CHECK: %[[SPLAT:.*]] = vector.broadcast %[[ARG1]] : index to vector<1x[4]xindex>
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[ARG2]] : vector<[4]xindex> to vector<1x[4]xindex>
// CHECK: %[[ADD:.*]] = arith.addi %[[SPLAT]], %[[BCAST]] : vector<1x[4]xindex>
// CHECK: return %[[ADD]] : vector<1x[4]xindex>
func.func @broadcast_scalar_and_vec_scalable(%arg1: index, %arg2: vector<[4]xindex>) -> vector<1x[4]xindex> {
- %0 = vector.splat %arg1 : vector<1x[4]xindex>
+ %0 = vector.broadcast %arg1 : index to vector<1x[4]xindex>
%1 = vector.broadcast %arg2 : vector<[4]xindex> to vector<1x[4]xindex>
%2 = arith.addi %0, %1 : vector<1x[4]xindex>
return %2 : vector<1x[4]xindex>
@@ -787,7 +787,7 @@ func.func @negative_extract_load_scalable(%arg0: memref<?xf32>, %arg1: index) ->
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf32>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32)
func.func @store_splat(%arg0: memref<?xf32>, %arg1: index, %arg2: f32) {
// CHECK: memref.store %[[ARG2]], %[[ARG0]][%[[ARG1]]] : memref<?xf32>
- %0 = vector.splat %arg2 : vector<1xf32>
+ %0 = vector.broadcast %arg2 : f32 to vector<1xf32>
vector.store %0, %arg0[%arg1] : memref<?xf32>, vector<1xf32>
return
}
@@ -813,9 +813,9 @@ func.func @store_broadcast_1d_to_2d(%arg0: memref<?x?xf32>, %arg1: index, %arg2:
// CHECK-LABEL: @negative_store_scalable
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf32>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32)
func.func @negative_store_scalable(%arg0: memref<?xf32>, %arg1: index, %arg2: f32) {
-// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<[1]xf32>
+// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<[1]xf32>
// CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref<?xf32>, vector<[1]xf32>
- %0 = vector.splat %arg2 : vector<[1]xf32>
+ %0 = vector.broadcast %arg2 : f32 to vector<[1]xf32>
vector.store %0, %arg0[%arg1] : memref<?xf32>, vector<[1]xf32>
return
}
@@ -823,9 +823,9 @@ func.func @negative_store_scalable(%arg0: memref<?xf32>, %arg1: index, %arg2: f3
// CHECK-LABEL: @negative_store_memref_of_vec
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xvector<1xf32>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32)
func.func @negative_store_memref_of_vec(%arg0: memref<?xvector<1xf32>>, %arg1: index, %arg2: f32) {
-// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<1xf32>
+// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<1xf32>
// CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref<?xvector<1xf32>>, vector<1xf32>
- %0 = vector.splat %arg2 : vector<1xf32>
+ %0 = vector.broadcast %arg2 : f32 to vector<1xf32>
vector.store %0, %arg0[%arg1] : memref<?xvector<1xf32>>, vector<1xf32>
return
}
@@ -833,9 +833,9 @@ func.func @negative_store_memref_of_vec(%arg0: memref<?xvector<1xf32>>, %arg1: i
// CHECK-LABEL: @negative_store_more_than_one_element
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf32>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32)
func.func @negative_store_more_than_one_element(%arg0: memref<?xf32>, %arg1: index, %arg2: f32) {
-// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<4xf32>
+// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<4xf32>
// CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref<?xf32>, vector<4xf32>
- %0 = vector.splat %arg2 : vector<4xf32>
+ %0 = vector.broadcast %arg2 : f32 to vector<4xf32>
vector.store %0, %arg0[%arg1] : memref<?xf32>, vector<4xf32>
return
}
@@ -843,10 +843,10 @@ func.func @negative_store_more_than_one_element(%arg0: memref<?xf32>, %arg1: ind
// CHECK-LABEL: @negative_store_no_single_use
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf32>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32)
func.func @negative_store_no_single_use(%arg0: memref<?xf32>, %arg1: index, %arg2: f32) -> vector<1xf32> {
-// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<1xf32>
+// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<1xf32>
// CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref<?xf32>, vector<1xf32>
// CHECK: return %[[RES:.*]] : vector<1xf32>
- %0 = vector.splat %arg2 : vector<1xf32>
+ %0 = vector.broadcast %arg2 : f32 to vector<1xf32>
vector.store %0, %arg0[%arg1] : memref<?xf32>, vector<1xf32>
return %0 : vector<1xf32>
}
diff --git a/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir b/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir
index 1b54d54..45afbff 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir
@@ -285,19 +285,19 @@ func.func @transfer_read_permutations(%mem_0 : memref<?x?xf32>, %mem_1 : memref<
%c0 = arith.constant 0 : index
// CHECK: %[[MASK0:.*]] = vector.broadcast %{{.*}} : i1 to vector<14x7xi1>
- %mask0 = vector.splat %m : vector<14x7xi1>
+ %mask0 = vector.broadcast %m : i1 to vector<14x7xi1>
%0 = vector.transfer_read %mem_1[%c0, %c0, %c0, %c0], %cst, %mask0 {in_bounds = [true, false, true, true], permutation_map = #map0} : memref<?x?x?x?xf32>, vector<7x14x8x16xf32>
// CHECK: vector.transfer_read {{.*}} %[[MASK0]] {in_bounds = [false, true, true, true], permutation_map = #[[$MAP0]]} : memref<?x?x?x?xf32>, vector<14x7x8x16xf32>
// CHECK: vector.transpose %{{.*}}, [1, 0, 2, 3] : vector<14x7x8x16xf32> to vector<7x14x8x16xf32>
// CHECK: %[[MASK1:.*]] = vector.broadcast %{{.*}} : i1 to vector<16x14xi1>
- %mask1 = vector.splat %m : vector<16x14xi1>
+ %mask1 = vector.broadcast %m : i1 to vector<16x14xi1>
%1 = vector.transfer_read %mem_1[%c0, %c0, %c0, %c0], %cst, %mask1 {in_bounds = [true, false, true, false], permutation_map = #map1} : memref<?x?x?x?xf32>, vector<7x14x8x16xf32>
// CHECK: vector.transfer_read {{.*}} %[[MASK1]] {in_bounds = [false, false, true, true], permutation_map = #[[$MAP0]]} : memref<?x?x?x?xf32>, vector<16x14x7x8xf32>
// CHECK: vector.transpose %{{.*}}, [2, 1, 3, 0] : vector<16x14x7x8xf32> to vector<7x14x8x16xf32>
// CHECK: %[[MASK3:.*]] = vector.broadcast %{{.*}} : i1 to vector<14x7xi1>
- %mask2 = vector.splat %m : vector<14x7xi1>
+ %mask2 = vector.broadcast %m : i1 to vector<14x7xi1>
%2 = vector.transfer_read %mem_1[%c0, %c0, %c0, %c0], %cst, %mask2 {in_bounds = [true, false, true, true], permutation_map = #map2} : memref<?x?x?x?xf32>, vector<7x14x8x16xf32>
// CHECK: vector.transfer_read {{.*}} %[[MASK3]] {in_bounds = [false, true, true], permutation_map = #[[$MAP1]]} : memref<?x?x?x?xf32>, vector<14x16x7xf32>
// CHECK: vector.broadcast %{{.*}} : vector<14x16x7xf32> to vector<8x14x16x7xf32>
@@ -337,7 +337,7 @@ func.func @transfer_write_permutations_tensor_masked(
%c0 = arith.constant 0 : index
// CHECK: %[[MASK:.*]] = vector.broadcast %[[M]] : i1 to vector<16x14x7x8xi1>
- %mask0 = vector.splat %m : vector<16x14x7x8xi1>
+ %mask0 = vector.broadcast %m : i1 to vector<16x14x7x8xi1>
%res = vector.transfer_write %vec, %dst[%c0, %c0, %c0, %c0], %mask0 {in_bounds = [true, false, false, true], permutation_map = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3, d0)>} : vector<7x14x8x16xf32>, tensor<?x?x?x?xf32>
// CHECK: %[[NEW_VEC0:.*]] = vector.transpose %{{.*}} [3, 1, 0, 2] : vector<7x14x8x16xf32> to vector<16x14x7x8xf32>
// CHECK: %[[NEW_RES0:.*]] = vector.transfer_write %[[NEW_VEC0]], %[[DST]][%c0, %c0, %c0, %c0], %[[MASK]] {in_bounds = [true, false, true, false]} : vector<16x14x7x8xf32>, tensor<?x?x?x?xf32>
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir
new file mode 100644
index 0000000..01068cb
--- /dev/null
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/if.mlir
@@ -0,0 +1,53 @@
+// RUN: mlir-opt %s | FileCheck %s
+
+// CHECK-LABEL: wasmssa.func nested @func_0(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: wasmssa.if %[[VAL_0]] : {
+// CHECK: %[[VAL_1:.*]] = wasmssa.const 5.000000e-01 : f32
+// CHECK: wasmssa.block_return %[[VAL_1]] : f32
+// CHECK: } "else "{
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 2.500000e-01 : f32
+// CHECK: wasmssa.block_return %[[VAL_2]] : f32
+// CHECK: }> ^bb1
+// CHECK: ^bb1(%[[VAL_3:.*]]: f32):
+// CHECK: wasmssa.return %[[VAL_3]] : f32
+wasmssa.func nested @func_0(%arg0 : !wasmssa<local ref to i32>) -> i32 {
+ %cond = wasmssa.local_get %arg0 : ref to i32
+ wasmssa.if %cond : {
+ %c0 = wasmssa.const 0.5 : f32
+ wasmssa.block_return %c0 : f32
+ } else {
+ %c1 = wasmssa.const 0.25 : f32
+ wasmssa.block_return %c1 : f32
+ } >^bb1
+ ^bb1(%retVal: f32):
+ wasmssa.return %retVal : f32
+}
+
+// CHECK-LABEL: wasmssa.func nested @func_1(
+// CHECK-SAME: %[[ARG0:.*]]: !wasmssa<local ref to i32>) -> i32 {
+// CHECK: %[[VAL_0:.*]] = wasmssa.local_get %[[ARG0]] : ref to i32
+// CHECK: %[[VAL_1:.*]] = wasmssa.local of type i32
+// CHECK: %[[VAL_2:.*]] = wasmssa.const 0 : i64
+// CHECK: wasmssa.if %[[VAL_0]] : {
+// CHECK: %[[VAL_3:.*]] = wasmssa.const 1 : i32
+// CHECK: wasmssa.local_set %[[VAL_1]] : ref to i32 to %[[VAL_3]] : i32
+// CHECK: wasmssa.block_return
+// CHECK: } > ^bb1
+// CHECK: ^bb1:
+// CHECK: %[[VAL_4:.*]] = wasmssa.local_get %[[VAL_1]] : ref to i32
+// CHECK: wasmssa.return %[[VAL_4]] : i32
+wasmssa.func nested @func_1(%arg0 : !wasmssa<local ref to i32>) -> i32 {
+ %cond = wasmssa.local_get %arg0 : ref to i32
+ %var = wasmssa.local of type i32
+ %zero = wasmssa.const 0
+ wasmssa.if %cond : {
+ %c1 = wasmssa.const 1 : i32
+ wasmssa.local_set %var : ref to i32 to %c1 : i32
+ wasmssa.block_return
+ } >^bb1
+ ^bb1:
+ %res = wasmssa.local_get %var : ref to i32
+ wasmssa.return %res : i32
+}
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir
new file mode 100644
index 0000000..47551db
--- /dev/null
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/memory.mlir
@@ -0,0 +1,7 @@
+// RUN: mlir-opt %s | FileCheck %s
+
+// CHECK: wasmssa.memory @mem0 public !wasmssa<limit[0: 65536]>
+wasmssa.memory @mem0 public !wasmssa<limit[0:65536]>
+
+// CHECK: wasmssa.memory @mem1 nested !wasmssa<limit[512:]>
+wasmssa.memory @mem1 !wasmssa<limit[512:]>
diff --git a/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir b/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir
new file mode 100644
index 0000000..5a874f4
--- /dev/null
+++ b/mlir/test/Dialect/WasmSSA/custom_parser/table.mlir
@@ -0,0 +1,7 @@
+// RUN: mlir-opt %s | FileCheck %s
+
+// CHECK: wasmssa.table @tab0 public !wasmssa<tabletype !wasmssa.externref [0: 65536]>
+wasmssa.table @tab0 public !wasmssa<tabletype !wasmssa.externref [0:65536]>
+
+// CHECK: wasmssa.table @tab1 nested !wasmssa<tabletype !wasmssa.funcref [348:]>
+wasmssa.table @tab1 !wasmssa<tabletype !wasmssa.funcref [348:]>
diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir
index dff3ffa..44e15dd 100644
--- a/mlir/test/Dialect/XeGPU/invalid.mlir
+++ b/mlir/test/Dialect/XeGPU/invalid.mlir
@@ -52,14 +52,14 @@ func.func @create_nd_tdesc_7(%src: memref<128x128xf32>) {
// -----
func.func @create_nd_tdesc_8(%src: ui64) {
- // expected-error@+1 {{'xegpu.create_nd_tdesc' op Expecting strides and shape to be present for integer source}}
+ // expected-error@+1 {{'xegpu.create_nd_tdesc' op expecting strides and shape to be present for integer source}}
%1 = xegpu.create_nd_tdesc %src : ui64-> !xegpu.tensor_desc<128x128xf32>
return
}
// -----
func.func @create_nd_tdesc_9(%src: ui64) {
- // expected-error@+1 {{expected mixed offsets rank to match mixed sizes rank}}
+ // expected-error@+1 {{expecting strides and shape to be present for integer source}}
%1 = xegpu.create_nd_tdesc %src[0, 0] : ui64-> !xegpu.tensor_desc<128x128xf32>
return
}
@@ -149,7 +149,7 @@ func.func @subgroup_load_nd_offset_2(%src: memref<4x8x16xf16>, %x : index) {
}
// -----
-func.func @subgroup_load_nd_offset_3(%src: memref<4x8x16xf16>, %x : index) {
+func.func @subgroup_load_nd_offset_3(%src: memref<4x8x16xf16>, %x : index) {
%3 = xegpu.create_nd_tdesc %src: memref<4x8x16xf16> -> !xegpu.tensor_desc<8x16xf16>
%5 = xegpu.load_nd %3[0, 0] : !xegpu.tensor_desc<8x16xf16> -> vector<8x16xf16>
// expected-error@+1 {{Mismatched ranks between offsets and tensor descriptor}}
@@ -418,7 +418,7 @@ func.func @store_scatter_offset_wi_1(%src: memref<?xf16>) {
%offsets = arith.constant dense<[0]> : vector<1xindex>
%mask = arith.constant dense<1>: vector<1xi1>
// expected-error@+1 {{value elements must match chunk size}}
- xegpu.store %val, %src[%offsets], %mask
+ xegpu.store %val, %src[%offsets], %mask
: vector<4xf16>, memref<?xf16>, vector<1xindex>, vector<1xi1>
return
}
@@ -429,7 +429,7 @@ func.func @store_scatter_offset_wi_2(%src: memref<4x4xf16>) {
%offsets = arith.constant dense<[0]> : vector<1xindex>
%mask = arith.constant dense<1>: vector<1xi1>
// expected-error@+1 {{Expecting the dest is a 1D memref or pointer}}
- xegpu.store %val, %src[%offsets], %mask
+ xegpu.store %val, %src[%offsets], %mask
: vector<4xf16>, memref<4x4xf16>, vector<1xindex>, vector<1xi1>
return
}
@@ -743,3 +743,22 @@ func.func @tensor_desc_invalid_sg_data(%src: ui64, %offsets: vector<16xindex>) {
#xegpu.layout<lane_layout = [8, 1], lane_data = [1, 2], order = [0, 1, 2]>>
return
}
+
+// -----
+#l = #xegpu.layout<sg_layout = [16, 1, 1], sg_data = [1, 8, 2]>
+// expected-error@+1 {{repeated dim (2) in slice attribute}}
+#s = #xegpu.slice<#l, dims = [2, 2]>
+func.func @slice_attr_repeat_dim() {
+ %offsets = arith.constant {layout_result_0 = #s} dense<0.8> : vector<16x8xindex>
+ return
+}
+
+// -----
+#l = #xegpu.layout<sg_layout = [16, 1, 1], sg_data = [1, 8, 2]>
+// expected-error@+1 {{invalid dim (3) in slice attribute}}
+#s = #xegpu.slice<#l, dims = [3]>
+func.func @slice_attr_repeat_dim() {
+ %offsets = arith.constant {layout_result_0 = #s} dense<0.8> : vector<16x8xindex>
+ return
+}
+
diff --git a/mlir/test/Dialect/XeGPU/layout.mlir b/mlir/test/Dialect/XeGPU/layout.mlir
index 017dacc..e4b4e22 100644
--- a/mlir/test/Dialect/XeGPU/layout.mlir
+++ b/mlir/test/Dialect/XeGPU/layout.mlir
@@ -50,4 +50,27 @@ gpu.func @convert_layout_wg(%a: vector<32x64xf16>) {
gpu.return
}
+gpu.func @slice_attr() {
+ //CHECK: arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout<sg_layout = [16, 1, 1], sg_data = [1, 8, 2]>, dims = [2]>} dense<8> : vector<16x8xindex>
+ %cst = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout<sg_layout = [16, 1, 1], sg_data = [1, 8, 2]>, dims = [2]>} dense<8> : vector<16x8xindex>
+ gpu.return
+}
+
+gpu.func @nested_slice_attr() {
+ //CHECK: arith.constant {layout_result_0 = #xegpu.slice<#xegpu.slice<#xegpu.layout<sg_layout = [16, 1, 1], sg_data = [1, 8, 2]>, dims = [2]>, dims = [1]>} dense<8> : vector<16xindex>
+ %cst = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.slice<#xegpu.layout<sg_layout = [16, 1, 1], sg_data = [1, 8, 2]>, dims = [2]>, dims = [1]>} dense<8> : vector<16xindex>
+ gpu.return
+}
+
+gpu.func @softmax_dim_0(%arg0: vector<256x128xf32>) -> vector<256x128xf32> {
+ %cst = arith.constant dense<0.000000e+00> : vector<128xf32>
+ %0 = math.exp %arg0 {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>} : vector<256x128xf32>
+ //CHECK: vector.multi_reduction <add>, {{.*}} {layout_result_0 = #xegpu.slice<#xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>, dims = [0]>} [0] : vector<256x128xf32> to vector<128xf32>
+ %1 = vector.multi_reduction <add>, %0, %cst {layout_result_0 = #xegpu.slice<#xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>, dims = [0]>} [0] : vector<256x128xf32> to vector<128xf32>
+ //CHECK: vector.broadcast {{.*}} {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>} : vector<128xf32> to vector<256x128xf32>
+ %2 = vector.broadcast %1 {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>} : vector<128xf32> to vector<256x128xf32>
+ %3 = arith.divf %0, %2 {layout_result_0 = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32]>} : vector<256x128xf32>
+ gpu.return %3 : vector<256x128xf32>
+}
+
}
diff --git a/mlir/test/Dialect/XeGPU/ops.mlir b/mlir/test/Dialect/XeGPU/ops.mlir
index 6be2371..67c00f5 100644
--- a/mlir/test/Dialect/XeGPU/ops.mlir
+++ b/mlir/test/Dialect/XeGPU/ops.mlir
@@ -62,28 +62,28 @@ gpu.func @create_nd_tdesc_7(%src: memref<8x24x32x48x64xf32>) {
}
-// CHECK: gpu.func @test_create_nd_tdesc_7(%[[arg0:.*]]: ui64, %[[arg1:.*]]: index, %[[arg2:.*]]: index, %[[arg3:.*]]: index, %[[arg4:.*]]: index, %[[arg5:.*]]: memref<24x32xf32>)
+// CHECK: gpu.func @test_create_nd_tdesc_7(%[[arg0:.*]]: ui64, %[[arg1:.*]]: index, %[[arg2:.*]]: index, %[[arg3:.*]]: index, %[[arg4:.*]]: index, %[[arg5:.*]]: memref<24x32xf32>)
gpu.func @test_create_nd_tdesc_7(%src: ui64, %w : index, %h : index, %x : index, %y : index, %src2: memref<24x32xf32>) {
//CHECK: %[[C:.*]] = arith.constant 1 : index
%c1 = arith.constant 1 : index
-
- // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %[[arg5]][0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
+
+ // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %[[arg5]] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
%3 = xegpu.create_nd_tdesc %src2 : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
-
+
gpu.return
}
-// CHECK: gpu.func @test_create_nd_tdesc_8(%[[arg0:.*]]: ui64, %[[arg1:.*]]: index, %[[arg2:.*]]: index, %[[arg3:.*]]: index, %[[arg4:.*]]: index)
+// CHECK: gpu.func @test_create_nd_tdesc_8(%[[arg0:.*]]: ui64, %[[arg1:.*]]: index, %[[arg2:.*]]: index, %[[arg3:.*]]: index, %[[arg4:.*]]: index)
gpu.func @test_create_nd_tdesc_8(%src: ui64, %w : index, %h : index, %x : index, %y : index) {
-
- %c1 = arith.constant 1 : index
- // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0[0, 0], shape : [%arg2, %arg1], strides : [%arg1, %c1] : ui64 -> !xegpu.tensor_desc<8x16xf32>
+
+ %c1 = arith.constant 1 : index
+ // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0, shape : [%arg2, %arg1], strides : [%arg1, %c1] : ui64 -> !xegpu.tensor_desc<8x16xf32>
%2 = xegpu.create_nd_tdesc %src, shape : [%h, %w], strides : [%w, %c1] : ui64 -> !xegpu.tensor_desc<8x16xf32>
-
+
gpu.return
}
-// CHECK-LABEL: func @test_create_nd_tdesc_9({{.*}})
+// CHECK-LABEL: func @test_create_nd_tdesc_9({{.*}})
gpu.func @test_create_nd_tdesc_9(%src: memref<?x?xf16>, %w : index, %h : index, %x : index, %y : index) {
@@ -94,10 +94,10 @@ gpu.func @test_create_nd_tdesc_9(%src: memref<?x?xf16>, %w : index, %h : index,
gpu.return
}
-// CHECK-LABEL: func @test_create_nd_tdesc_10({{.*}})
-gpu.func @test_create_nd_tdesc_10(%src: memref<?x?xf16>, %w : index, %h : index, %x : index, %y : index) {
+// CHECK-LABEL: func @test_create_nd_tdesc_10({{.*}})
+gpu.func @test_create_nd_tdesc_10(%src: memref<?x?xf16>, %w : index, %h : index, %x : index, %y : index) {
%c1 = arith.constant 1 : index
- // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0[0, 0], shape : [%arg2, %arg1], strides : [%arg1, %c1] : memref<?x?xf16> -> !xegpu.tensor_desc<8x16xf16>
+ // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0, shape : [%arg2, %arg1], strides : [%arg1, %c1] : memref<?x?xf16> -> !xegpu.tensor_desc<8x16xf16>
%2 = xegpu.create_nd_tdesc %src, shape:[%h, %w], strides:[%w, %c1] : memref<?x?xf16> -> !xegpu.tensor_desc<8x16xf16>
gpu.return
@@ -123,7 +123,7 @@ gpu.func @prefetch_nd_2(%src: memref<48x64xf16>) {
// CHECK: gpu.func @prefetch_nd_offset_1(%[[arg0:.*]]: memref<48x64xf16>, %arg1: index, %arg2: index) {
gpu.func @prefetch_nd_offset_1(%src: memref<48x64xf16>, %x : index, %y : index) {
- // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %[[arg0]][0, 0] : memref<48x64xf16> -> !xegpu.tensor_desc<8x16xf16>
+ // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %[[arg0]] : memref<48x64xf16> -> !xegpu.tensor_desc<8x16xf16>
%1 = xegpu.create_nd_tdesc %src : memref<48x64xf16> -> !xegpu.tensor_desc<8x16xf16>
// CHECK: xegpu.prefetch_nd %[[R0]][%arg1, %arg2] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<8x16xf16>
xegpu.prefetch_nd %1[%x, %y] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}>: !xegpu.tensor_desc<8x16xf16>
@@ -271,7 +271,7 @@ gpu.func @subgroup_load_nd_8(%src: memref<24x32xf32>) {
// CHECK: func @subgroup_load_nd_offset_1(%[[arg0:.*]]: memref<24x32xf32>, %arg1: index, %arg2: index) {
gpu.func @subgroup_load_nd_offset_1(%src: memref<24x32xf32>, %x : index, %y : index) {
- // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<16x8xf32>
+ // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0 : memref<24x32xf32> -> !xegpu.tensor_desc<16x8xf32>
%1 = xegpu.create_nd_tdesc %src : memref<24x32xf32> -> !xegpu.tensor_desc<16x8xf32>
// CHECK: %[[R1:.*]] = xegpu.load_nd %[[R0]][%arg1, %arg2] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>, transpose = array<i64: 1, 0>}> : !xegpu.tensor_desc<16x8xf32> -> vector<8x16xf32>
%2 = xegpu.load_nd %1[%x, %y] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>, transpose = array<i64: 1, 0>}> : !xegpu.tensor_desc<16x8xf32> -> vector<8x16xf32>
@@ -290,7 +290,7 @@ gpu.func @simt_load_nd_8(%src: memref<24x32xf32>) {
// CHECK: func @simt_load_nd_offset_1(%[[arg0:.*]]: memref<24x32xf32>) {
gpu.func @simt_load_nd_offset_1(%src: memref<24x32xf32>) {
- // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<16x8xf32>
+ // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0 : memref<24x32xf32> -> !xegpu.tensor_desc<16x8xf32>
%1 = xegpu.create_nd_tdesc %src : memref<24x32xf32> -> !xegpu.tensor_desc<16x8xf32>
// CHECK: %[[R1:.*]] = xegpu.load_nd %[[R0]][0, 0] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>, transpose = array<i64: 1, 0>}> : !xegpu.tensor_desc<16x8xf32> -> vector<8xf32>
%2 = xegpu.load_nd %1[0, 0] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>, transpose = array<i64: 1, 0>}> : !xegpu.tensor_desc<16x8xf32> -> vector<8xf32>
@@ -323,7 +323,7 @@ gpu.func @simt_store_nd(%src: memref<24x32xf16>) {
gpu.func @subgroup_store_nd_2(%dst: memref<24x32xf16>, %x : index) {
// CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<32xf16>
%1 = arith.constant dense<1.0>: vector<32xf16>
- // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %[[arg0]][0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16>
+ // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %[[arg0]] : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16>
%2 = xegpu.create_nd_tdesc %dst : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16>
// CHECK: xegpu.store_nd %[[C]], %[[R0]][%arg1] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}> : vector<32xf16>, !xegpu.tensor_desc<32xf16>
xegpu.store_nd %1, %2[%x] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<32xf16>, !xegpu.tensor_desc<32xf16>
@@ -356,7 +356,7 @@ gpu.func @simt_store_nd_2(%src: memref<24x32xf16>) {
gpu.func @simt_store_nd_offset_1(%src: memref<24x32xf16>) {
// CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<2xf16>
%1 = arith.constant dense<1.0>: vector<2xf16>
- // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16>
+ // CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0 : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16>
%2 = xegpu.create_nd_tdesc %src : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16>
// CHECK: xegpu.store_nd %[[C]], %[[R0]][0] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}> : vector<2xf16>, !xegpu.tensor_desc<32xf16>
xegpu.store_nd %1, %2[0] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<2xf16>, !xegpu.tensor_desc<32xf16>
diff --git a/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir b/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir
new file mode 100644
index 0000000..547c735
--- /dev/null
+++ b/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir
@@ -0,0 +1,37 @@
+// RUN: mlir-opt --test-xegpu-layout-interface --cse -split-input-file %s | FileCheck %s
+
+//CHECk: #map = affine_map<()[s0] -> (s0 floordiv 8)>
+gpu.module @test {
+ gpu.func @slice_attr() -> vector<128xindex> {
+ //CHECK: [[sgId:%.+]] = gpu.subgroup_id : index
+ //CHECK: [[IDY:%.+]] = affine.apply #map()[[[sgId]]]
+ //CHECK: [[c32:%.+]] = arith.constant 32 : index
+ //CHECK: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]]
+ //CHECK: [[c0:%.+]] = arith.constant 0 : index
+ //CHECK: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index
+ //CHECK: [[c128:%.+]] = arith.constant 128 : index
+ //CHECK: [[MODY:%.+]] = index.remu [[Y]], [[c128]]
+ //CHECK: [[BASE:%.+]] = vector.step : vector<32xindex>
+ //CHECK: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex>
+ //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex>
+ %step = vector.step {layout_result_0 = #xegpu.slice<#xegpu.layout<sg_layout = [4, 8], sg_data = [32, 32]>, dims = [1]>}: vector<128xindex>
+ gpu.return %step : vector<128xindex>
+ }
+
+ gpu.func @nested_slice_attr() -> vector<128xindex> {
+ //CHECK: [[sgId:%.+]] = gpu.subgroup_id : index
+ //CHECK: [[IDY:%.+]] = affine.apply #map()[[[sgId]]]
+ //CHECK: [[c32:%.+]] = arith.constant 32 : index
+ //CHECK: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]]
+ //CHECK: [[c0:%.+]] = arith.constant 0 : index
+ //CHECK: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index
+ //CHECK: [[c128:%.+]] = arith.constant 128 : index
+ //CHECK: [[MODY:%.+]] = index.remu [[Y]], [[c128]]
+ //CHECK: [[BASE:%.+]] = vector.step : vector<32xindex>
+ //CHECK: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex>
+ //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex>
+ %0 = vector.step {layout_result_0 = #xegpu.slice<#xegpu.slice<#xegpu.layout<sg_layout = [4, 8, 1], sg_data = [32, 32, 1]>, dims = [2]>, dims = [1]>} : vector<128xindex>
+ gpu.return %0 : vector<128xindex>
+ }
+
+} \ No newline at end of file
diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir
index 628a485..e5cc65e 100644
--- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir
+++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir
@@ -1,5 +1,8 @@
// RUN: mlir-opt --xegpu-wg-to-sg-distribute -split-input-file %s | FileCheck %s
+#map = affine_map<()[s0] -> (s0 floordiv 4)>
+#map1 = affine_map<()[s0] -> (s0 mod 4)>
+
gpu.module @test_round_robin_assignment {
// CHECK-LABEL: create_nd_tdesc
// CHECK-SAME: %[[ARG_0:.*]]: memref<256x128xf32>
@@ -12,6 +15,30 @@ gpu.module @test_round_robin_assignment {
gpu.return
}
+ // CHECK-LABEL: create_nd_tdesc_with_shared_data
+ // CHECK-SAME: [[ARG_0:%.*]]: memref<256x128xf32>
+ gpu.func @create_nd_tdesc_with_shared_data(%src: memref<256x128xf32>) {
+ //CHECK: [[sgId:%.+]] = gpu.subgroup_id : index
+ //CHECK: [[IdY:%.+]] = affine.apply #map()[[[sgId]]]
+ //CHECK: [[IdX:%.+]] = affine.apply #map1()[[[sgId]]]
+ //CHECK: [[C16:%.+]] = arith.constant 16 : index
+ //CHECK: [[LY:%.+]] = index.mul [[IdY]], [[C16]]
+ //CHECK: [[C64:%.+]] = arith.constant 64 : index
+ //CHECK: [[LX:%.+]] = index.mul [[IdX]], [[C64]]
+ //CHECK: [[C0:%.+]] = arith.constant 0 : index
+ //CHECK: [[C0_1:%.+]] = arith.constant 0 : index
+ //CHECK: [[ADDY:%.+]] = arith.addi [[LY]], [[C0]] : index
+ //CHECK: [[ADDX:%.+]] = arith.addi [[LX]], [[C0_1]] : index
+ //CHECK: [[C128:%.+]] = arith.constant 128 : index
+ //CHECK: [[offY:%.+]] = index.remu [[ADDY]], [[C128]]
+ //CHECK: [[C64_2:%.+]] = arith.constant 64 : index
+ //CHECK: [[offX:%.+]] = index.remu [[ADDX]], [[C64_2]]
+ //CHECK: xegpu.create_nd_tdesc [[ARG_0]][[[offY]], [[offX]]] : memref<256x128xf32> -> !xegpu.tensor_desc<16x64xf32>
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32>
+ -> !xegpu.tensor_desc<128x64xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [16, 64]>>
+ gpu.return
+ }
+
// CHECK-LABEL: load_nd_tdesc
// CHECK-SAME: %[[ARG_0:.*]]: memref<256x128xf32>
gpu.func @load_nd_tdesc(%src: memref<256x128xf32>) {
diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir
index d4b0037..180ba8a 100644
--- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir
+++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir
@@ -4,34 +4,26 @@
//CHECK: #map1 = affine_map<()[s0] -> (s0 mod 4)>
gpu.module @test_1_1_assignment {
// CHECK-LABEL: create_nd_tdesc
- // CHECK-SAME: %[[ARG_0:.*]]: memref<256x128xf32>
+ // CHECK-SAME: [[ARG_0:%.*]]: memref<256x128xf32>
gpu.func @create_nd_tdesc(%src: memref<256x128xf32>) {
- // CHECK: %[[SGID:.*]] = gpu.subgroup_id
- // CHECK: %[[C8:.*]] = arith.constant 8 : index
- // CHECK: %[[C32:.*]] = arith.constant 32 : index
- // CHECK: %[[C4:.*]] = arith.constant 4 : index
- // CHECK: %[[C32_0:.*]] = arith.constant 32 : index
- // CHECK: %[[C4_1:.*]] = arith.constant 4 : index
- // CHECK: %[[DIV:.*]] = affine.apply #map()[%[[SGID]]]
- // CHECK: %[[REM:.*]] = affine.apply #map1()[%[[SGID]]]
- // CHECK: %[[MUL1:.*]] = index.mul %[[DIV]], %[[C32]]
- // CHECK: %[[MUL2:.*]] = index.mul %[[REM]], %[[C32_0]]
- // CHECK: %[[C0:.*]] = arith.constant 0 : index
- // CHECK: %[[C256:.*]] = arith.constant 256 : index
- // CHECK: %[[MOD:.*]] = index.remu %[[MUL1]], %[[C256]]
- // CHECK: %[[C0_2:.*]] = arith.constant 0 : index
- // CHECK: %[[ADD1:.*]] = index.add %[[MOD]], %[[C0_2]]
- // CHECK: %[[C0_3:.*]] = arith.constant 0 : index
- // CHECK: %[[C128:.*]] = arith.constant 128 : index
- // CHECK: %[[MOD1:.*]] = index.remu %[[MUL2]], %[[C128]]
- // CHECK: %[[C0_4:.*]] = arith.constant 0 : index
- // CHECK: %[[ADD2:.*]] = index.add %[[MOD1]], %[[C0_4]]
- // CHECK: %[[TDESC:.*]] = xegpu.create_nd_tdesc %[[ARG_0]][%[[ADD1]], %[[ADD2]]] : memref<256x128xf32>
- // CHECK-SAME: -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout<lane_layout = [1, 16], lane_data = [1, 1]>>
- // CHECK: gpu.return
- %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32>
- -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32], lane_layout = [1, 16], lane_data = [1, 1]>>
- gpu.return
+ //CHECK: [[SGID:%.+]] = gpu.subgroup_id : index
+ //CHECK: [[SGIDY:%.+]] = affine.apply #map()[[[SGID]]]
+ //CHECK: [[SGIDX:%.+]] = affine.apply #map1()[[[SGID]]]
+ //CHECK: [[C32:%.+]] = arith.constant 32 : index
+ //CHECK: [[LY:%.+]] = index.mul [[SGIDY]], [[C32]]
+ //CHECK: [[LX:%.+]] = index.mul [[SGIDX]], [[C32]]
+ //CHECK: [[C0:%.+]] = arith.constant 0 : index
+ //CHECK: [[C0_1:%.+]] = arith.constant 0 : index
+ //CHECK: [[UY:%.+]] = arith.addi [[LY]], [[C0]] : index
+ //CHECK: [[UX:%.+]] = arith.addi [[LX]], [[C0_1]] : index
+ //CHECK: [[C256:%.+]] = arith.constant 256 : index
+ //CHECK: [[Y:%.+]] = index.remu [[UY]], [[C256]]
+ //CHECK: [[C128:%.+]] = arith.constant 128 : index
+ //CHECK: [[X:%.+]] = index.remu [[UX]], [[C128]]
+ //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][[[Y]], [[X]]] : memref<256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout<lane_layout = [1, 16], lane_data = [1, 1]>>
+ %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32>
+ -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32], lane_layout = [1, 16], lane_data = [1, 1]>>
+ gpu.return
}
// CHECK-LABEL: load_nd_tdesc
@@ -347,7 +339,7 @@ gpu.func @dpas_no_sg_data(%a: memref<128x128xf16>, %b: memref<128x128xf16>) {
// CHECK-LABEL: @subgroup_id_range_nested_if
gpu.func @subgroup_id_range_nested_if(%src: memref<256x128xf32>, %src1: memref<128x64xf32>) {
%sg_id = gpu.subgroup_id : index
- %c1 = arith.constant 1 : i1
+ %c1 = arith.constant 1 : i1
%c3 = arith.constant 3 : index
%c32 = arith.constant 32 : index
%tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32>
diff --git a/mlir/test/Dialect/common_folders.mlir b/mlir/test/Dialect/common_folders.mlir
new file mode 100644
index 0000000..92598b4
--- /dev/null
+++ b/mlir/test/Dialect/common_folders.mlir
@@ -0,0 +1,22 @@
+// RUN: mlir-opt %s --test-fold-type-converting-op --split-input-file | FileCheck %s
+
+// CHECK-LABEL: @test_fold_unary_op_f32_to_si32(
+func.func @test_fold_unary_op_f32_to_si32() -> tensor<4x2xsi32> {
+ // CHECK-NEXT: %[[POSITIVE_ONE:.*]] = arith.constant dense<1> : tensor<4x2xsi32>
+ // CHECK-NEXT: return %[[POSITIVE_ONE]] : tensor<4x2xsi32>
+ %operand = arith.constant dense<5.1> : tensor<4x2xf32>
+ %sign = test.sign %operand : (tensor<4x2xf32>) -> tensor<4x2xsi32>
+ return %sign : tensor<4x2xsi32>
+}
+
+// -----
+
+// CHECK-LABEL: @test_fold_binary_op_f32_to_i1(
+func.func @test_fold_binary_op_f32_to_i1() -> tensor<8xi1> {
+ // CHECK-NEXT: %[[FALSE:.*]] = arith.constant dense<false> : tensor<8xi1>
+ // CHECK-NEXT: return %[[FALSE]] : tensor<8xi1>
+ %lhs = arith.constant dense<5.1> : tensor<8xf32>
+ %rhs = arith.constant dense<4.2> : tensor<8xf32>
+ %less_than = test.less_than %lhs, %rhs : (tensor<8xf32>, tensor<8xf32>) -> tensor<8xi1>
+ return %less_than : tensor<8xi1>
+}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir
index 06a6e22..9d04357 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir
@@ -9,7 +9,12 @@
// RUN: FileCheck %s
func.func @matmul_transpose_a(%A : tensor<?x?xf32>, %B : tensor<?x?xf32>, %C : tensor<?x?xf32>) {
- %res = linalg.matmul_transpose_a ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
+ %res = linalg.matmul
+ indexing_maps = [
+ affine_map<(d0, d1, d2) -> (d2, d0)>,
+ affine_map<(d0, d1, d2) -> (d2, d1)>,
+ affine_map<(d0, d1, d2) -> (d0, d1)>]
+ ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
outs(%C: tensor<?x?xf32>) -> tensor<?x?xf32>
%xf = tensor.cast %res : tensor<?x?xf32> to tensor<*xf32>
call @printMemrefF32(%xf) : (tensor<*xf32>) -> ()
@@ -56,7 +61,7 @@ func.func @main() {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module : !transform.any_op {transform.readonly}) {
- %matmul_transpose_a = transform.structured.match ops{["linalg.matmul_transpose_a"]} in %module
+ %matmul_transpose_a = transform.structured.match ops{["linalg.matmul"]} in %module
: (!transform.any_op) -> !transform.any_op
// Step 1: Tile for size [4] x [4], which corresponds to SVLs x SVLs, where
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir
index 0ee0166..219367a 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir
@@ -46,7 +46,7 @@ func.func @test_outerproduct_with_accumulator_4x4xf32() {
%c0 = arith.constant 0 : index
%f10 = arith.constant 10.0 : f32
- %acc = vector.splat %f10 : vector<[4]x[4]xf32>
+ %acc = vector.broadcast %f10 : f32 to vector<[4]x[4]xf32>
%vector_i32 = llvm.intr.stepvector : vector<[4]xi32>
%vector = arith.sitofp %vector_i32 : vector<[4]xi32> to vector<[4]xf32>
%tile = vector.outerproduct %vector, %vector, %acc : vector<[4]xf32>, vector<[4]xf32>
@@ -103,7 +103,7 @@ func.func @test_masked_outerproduct_with_accumulator_4x4xf32() {
%ones = arith.constant dense<1> : vector<[4]xi32>
%f10 = arith.constant 10.0 : f32
- %acc = vector.splat %f10 : vector<[4]x[4]xf32>
+ %acc = vector.broadcast %f10 : f32 to vector<[4]x[4]xf32>
%step_vector = llvm.intr.stepvector : vector<[4]xi32>
%vector_i32 = arith.addi %step_vector, %ones : vector<[4]xi32>
%vector = arith.sitofp %vector_i32 : vector<[4]xi32> to vector<[4]xf32>
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
index 8e81210..059f24a 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
@@ -52,7 +52,7 @@ func.func @test_outerproduct_with_accumulator_2x2xf64() {
%ones = arith.constant dense<1> : vector<[2]xi32>
%f10 = arith.constant 10.0 : f64
- %acc = vector.splat %f10 : vector<[2]x[2]xf64>
+ %acc = vector.broadcast %f10 : f64 to vector<[2]x[2]xf64>
%step_vector = llvm.intr.stepvector : vector<[2]xi32>
%vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32>
%vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64>
@@ -108,7 +108,7 @@ func.func @test_masked_outerproduct_with_accumulator_2x2xf64() {
%ones = arith.constant dense<1> : vector<[2]xi32>
%f10 = arith.constant 10.0 : f64
- %acc = vector.splat %f10 : vector<[2]x[2]xf64>
+ %acc = vector.broadcast %f10 : f64 to vector<[2]x[2]xf64>
%step_vector = llvm.intr.stepvector : vector<[2]xi32>
%vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32>
%vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64>
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir
index c3bf379..bf6900c 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir
@@ -10,7 +10,7 @@
// Vector store.
func.func @transfer_write_2d(%A : memref<?x?xf32>, %base1: index, %base2: index) {
%c0 = arith.constant 0.0 : f32
- %zero = vector.splat %c0 : vector<[4]x[4]xf32>
+ %zero = vector.broadcast %c0 : f32 to vector<[4]x[4]xf32>
vector.transfer_write %zero, %A[%base1, %base2] {in_bounds=[true, true]} :
vector<[4]x[4]xf32>, memref<?x?xf32>
return
@@ -22,7 +22,7 @@ func.func @transfer_write_2d_mask(%A : memref<?x?xf32>, %base1: index, %base2: i
%c2 = arith.constant 2 : index
%c3 = arith.constant 3 : index
%mask = vector.create_mask %c2, %c3 : vector<[4]x[4]xi1>
- %zero = vector.splat %c0 : vector<[4]x[4]xf32>
+ %zero = vector.broadcast %c0 : f32 to vector<[4]x[4]xf32>
vector.transfer_write %zero, %A[%base1, %base2], %mask {in_bounds=[true, true]} :
vector<[4]x[4]xf32>, memref<?x?xf32>
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir
index c990432..192f291 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir
@@ -106,7 +106,7 @@ func.func @matvec_i32() {
// val = (123 * 314) * 4 * vscale
// so ...
%vscale = vector.vscale
- %vscale_v = vector.splat %vscale : vector<3xindex>
+ %vscale_v = vector.broadcast %vscale : index to vector<3xindex>
%vscale_i32 = arith.index_cast %vscale_v : vector<3xindex> to vector<3xi32>
%mv1_div = arith.divui %mv1, %vscale_i32 : vector<3xi32>
// ... val / vscale = 123 * 314 * 4 = 154488
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir
index d3b1fa4..2d8180a 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir
@@ -7,8 +7,8 @@
func.func @entry() {
%f1 = arith.constant 1.0 : f32
%f2 = arith.constant 2.0 : f32
- %v1 = vector.splat %f1 : vector<[4]xf32>
- %v2 = vector.splat %f2 : vector<[4]xf32>
+ %v1 = vector.broadcast %f1 : f32 to vector<[4]xf32>
+ %v2 = vector.broadcast %f2 : f32 to vector<[4]xf32>
vector.print %v1 : vector<[4]xf32>
vector.print %v2 : vector<[4]xf32>
//
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir
index f812c25..740c742 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir
@@ -6,8 +6,8 @@
func.func @entry() {
%f1 = arith.constant 1.0 : f32
%f2 = arith.constant 2.0 : f32
- %v1 = vector.splat %f1 : vector<2x4xf32>
- %v2 = vector.splat %f2 : vector<2x4xf32>
+ %v1 = vector.broadcast %f1 : f32 to vector<2x4xf32>
+ %v2 = vector.broadcast %f2 : f32 to vector<2x4xf32>
vector.print %v1 : vector<2x4xf32>
vector.print %v2 : vector<2x4xf32>
//
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir
index f7e2229..e25795a 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir
@@ -14,9 +14,9 @@
!vector_type_R = vector<7xf32>
func.func @vector_outerproduct_splat_8x8(%fa: f32, %fb: f32, %fc: f32) -> !vector_type_C {
- %a = vector.splat %fa: !vector_type_A
- %b = vector.splat %fb: !vector_type_B
- %c = vector.splat %fc: !vector_type_C
+ %a = vector.broadcast %fa: f32 to !vector_type_A
+ %b = vector.broadcast %fb: f32 to !vector_type_B
+ %c = vector.broadcast %fc: f32 to !vector_type_C
%d = vector.outerproduct %a, %b, %c : !vector_type_A, !vector_type_B
return %d: !vector_type_C
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir
index a19dfa1..0675102 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir
@@ -14,9 +14,9 @@
!vector_type_R = vector<7xi64>
func.func @vector_outerproduct_splat_8x8(%ia: i64, %ib: i64, %ic: i64) -> !vector_type_C {
- %a = vector.splat %ia: !vector_type_A
- %b = vector.splat %ib: !vector_type_B
- %c = vector.splat %ic: !vector_type_C
+ %a = vector.broadcast %ia: i64 to !vector_type_A
+ %b = vector.broadcast %ib: i64 to !vector_type_B
+ %c = vector.broadcast %ic: i64 to !vector_type_C
%d = vector.outerproduct %a, %b, %c : !vector_type_A, !vector_type_B
return %d: !vector_type_C
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir
index 639eed4..895b881 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir
@@ -137,7 +137,7 @@ func.func @transfer_read_1d_mask_in_bounds(
// Non-contiguous, strided store.
func.func @transfer_write_1d(%A : memref<?x?xf32>, %base1 : index, %base2 : index) {
%fn1 = arith.constant -1.0 : f32
- %vf0 = vector.splat %fn1 : vector<7xf32>
+ %vf0 = vector.broadcast %fn1 : f32 to vector<7xf32>
vector.transfer_write %vf0, %A[%base1, %base2]
{permutation_map = affine_map<(d0, d1) -> (d0)>}
: vector<7xf32>, memref<?x?xf32>
@@ -147,7 +147,7 @@ func.func @transfer_write_1d(%A : memref<?x?xf32>, %base1 : index, %base2 : inde
// Non-contiguous, strided store.
func.func @transfer_write_1d_mask(%A : memref<?x?xf32>, %base1 : index, %base2 : index) {
%fn1 = arith.constant -2.0 : f32
- %vf0 = vector.splat %fn1 : vector<7xf32>
+ %vf0 = vector.broadcast %fn1 : f32 to vector<7xf32>
%mask = arith.constant dense<[1, 0, 1, 0, 1, 1, 1]> : vector<7xi1>
vector.transfer_write %vf0, %A[%base1, %base2], %mask
{permutation_map = affine_map<(d0, d1) -> (d0)>}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir
index 009c137..80dff9d 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir
@@ -100,7 +100,7 @@ func.func @transfer_read_2d_broadcast(
// Vector store.
func.func @transfer_write_2d(%A : memref<?x?xf32>, %base1: index, %base2: index) {
%fn1 = arith.constant -1.0 : f32
- %vf0 = vector.splat %fn1 : vector<1x4xf32>
+ %vf0 = vector.broadcast %fn1 : f32 to vector<1x4xf32>
vector.transfer_write %vf0, %A[%base1, %base2]
{permutation_map = affine_map<(d0, d1) -> (d0, d1)>} :
vector<1x4xf32>, memref<?x?xf32>
@@ -111,7 +111,7 @@ func.func @transfer_write_2d(%A : memref<?x?xf32>, %base1: index, %base2: index)
func.func @transfer_write_2d_mask(%A : memref<?x?xf32>, %base1: index, %base2: index) {
%fn1 = arith.constant -2.0 : f32
%mask = arith.constant dense<[[1, 0, 1, 0]]> : vector<1x4xi1>
- %vf0 = vector.splat %fn1 : vector<1x4xf32>
+ %vf0 = vector.broadcast %fn1 : f32 to vector<1x4xf32>
vector.transfer_write %vf0, %A[%base1, %base2], %mask
{permutation_map = affine_map<(d0, d1) -> (d0, d1)>} :
vector<1x4xf32>, memref<?x?xf32>
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir
index d41d9c9..93e6a12 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir
@@ -62,7 +62,7 @@ func.func @transfer_read_3d_transposed(%A : memref<?x?x?x?xf32>,
func.func @transfer_write_3d(%A : memref<?x?x?x?xf32>,
%o: index, %a: index, %b: index, %c: index) {
%fn1 = arith.constant -1.0 : f32
- %vf0 = vector.splat %fn1 : vector<2x9x3xf32>
+ %vf0 = vector.broadcast %fn1 : f32 to vector<2x9x3xf32>
vector.transfer_write %vf0, %A[%o, %a, %b, %c]
: vector<2x9x3xf32>, memref<?x?x?x?xf32>
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir
index d1a2790..18084e3 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir
@@ -45,7 +45,7 @@ func.func @transfer_read_mask_inbounds_4(%A : memref<?xf32>, %base: index) {
func.func @transfer_write_1d(%A : memref<?xf32>, %base: index) {
%f0 = arith.constant 0.0 : f32
- %vf0 = vector.splat %f0 : vector<4xf32>
+ %vf0 = vector.broadcast %f0 : f32 to vector<4xf32>
vector.transfer_write %vf0, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>} :
vector<4xf32>, memref<?xf32>
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir
index def7081..2251738 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir
@@ -5,7 +5,7 @@
func.func @transfer_write16_inbounds_1d(%A : memref<?xf32>, %base: index) {
%f = arith.constant 16.0 : f32
- %v = vector.splat %f : vector<16xf32>
+ %v = vector.broadcast %f : f32 to vector<16xf32>
vector.transfer_write %v, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>, in_bounds = [true]}
: vector<16xf32>, memref<?xf32>
@@ -14,7 +14,7 @@ func.func @transfer_write16_inbounds_1d(%A : memref<?xf32>, %base: index) {
func.func @transfer_write13_1d(%A : memref<?xf32>, %base: index) {
%f = arith.constant 13.0 : f32
- %v = vector.splat %f : vector<13xf32>
+ %v = vector.broadcast %f : f32 to vector<13xf32>
vector.transfer_write %v, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>}
: vector<13xf32>, memref<?xf32>
@@ -23,7 +23,7 @@ func.func @transfer_write13_1d(%A : memref<?xf32>, %base: index) {
func.func @transfer_write17_1d(%A : memref<?xf32>, %base: index) {
%f = arith.constant 17.0 : f32
- %v = vector.splat %f : vector<17xf32>
+ %v = vector.broadcast %f : f32 to vector<17xf32>
vector.transfer_write %v, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>}
: vector<17xf32>, memref<?xf32>
@@ -42,7 +42,7 @@ func.func @transfer_read_1d(%A : memref<?xf32>) -> vector<32xf32> {
func.func @transfer_write_inbounds_3d(%A : memref<4x4x4xf32>) {
%c0 = arith.constant 0: index
%f = arith.constant 0.0 : f32
- %v0 = vector.splat %f : vector<2x3x4xf32>
+ %v0 = vector.broadcast %f : f32 to vector<2x3x4xf32>
%f1 = arith.constant 1.0 : f32
%f2 = arith.constant 2.0 : f32
%f3 = arith.constant 3.0 : f32
diff --git a/mlir/test/Pass/pipeline-options-parsing.mlir b/mlir/test/Pass/pipeline-options-parsing.mlir
index 9385d35..03ac38e 100644
--- a/mlir/test/Pass/pipeline-options-parsing.mlir
+++ b/mlir/test/Pass/pipeline-options-parsing.mlir
@@ -13,6 +13,7 @@
// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.module(builtin.module(func.func(test-options-pass{list=3}), func.func(test-options-pass{enum=one list=1,2,3,4 string=foo"bar"baz})))' -dump-pass-pipeline 2>&1 | FileCheck --check-prefix=CHECK_6 %s
// RUN: mlir-opt %s -verify-each=false '-test-options-super-pass-pipeline=super-list={{enum=zero list=1 string=foo},{enum=one list=2 string="bar"},{enum=two list=3 string={baz}}}' -dump-pass-pipeline 2>&1 | FileCheck --check-prefix=CHECK_7 %s
// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.module(func.func(test-options-super-pass{list={{enum=zero list={1} string=foo },{enum=one list={2} string=bar },{enum=two list={3} string=baz }}}))' -dump-pass-pipeline 2>&1 | FileCheck --check-prefix=CHECK_7 %s
+// RUN: mlir-opt %s -verify-each=false -test-options-super-set-ab-pipeline='foo=true bar=false' -dump-pass-pipeline 2>&1 | FileCheck --check-prefix=CHECK_11 %s
// This test checks that lists-of-nested-options like 'option1={...},{....}' can be parsed
@@ -106,3 +107,12 @@
// CHECK_10-NEXT: test-options-pass{enum=zero string= string-list={,}}
// CHECK_10-NEXT: )
// CHECK_10-NEXT: )
+
+// CHECK_11: builtin.module(
+// CHECK_11-NEXT: func.func(
+// CHECK_11-NEXT: test-options-pass-a
+// CHECK_11-NEXT: )
+// CHECK_11-NEXT: func.func(
+// CHECK_11-NEXT: test-options-pass-b
+// CHECK_11-NEXT: )
+// CHECK_11-NEXT: )
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic-prefer-unregistered.ll b/mlir/test/Target/LLVMIR/Import/intrinsic-prefer-unregistered.ll
index 797a75c..18c9319 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic-prefer-unregistered.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic-prefer-unregistered.ll
@@ -3,9 +3,9 @@
; CHECK-LABEL: llvm.func @lifetime
define void @lifetime() {
%a = alloca [16 x i8]
- ; CHECK: llvm.call_intrinsic "llvm.lifetime.start.p0"({{.*}}, %[[ptr:.*]]) : (i64, !llvm.ptr {llvm.nonnull}) -> ()
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %a)
- ; CHECK: llvm.call_intrinsic "llvm.lifetime.end.p0"({{.*}}, %[[ptr]]) : (i64, !llvm.ptr {llvm.nonnull}) -> ()
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %a)
+ ; CHECK: llvm.call_intrinsic "llvm.lifetime.start.p0"(%[[ptr:.*]]) : (!llvm.ptr {llvm.nonnull}) -> ()
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
+ ; CHECK: llvm.call_intrinsic "llvm.lifetime.end.p0"(%[[ptr]]) : (!llvm.ptr {llvm.nonnull}) -> ()
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index a419d75..9f882ad 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -876,10 +876,10 @@ define void @stack_restore(ptr %0, ptr addrspace(1) %1) {
; CHECK-LABEL: llvm.func @lifetime
define void @lifetime() {
%a = alloca [16 x i8]
- ; CHECK: llvm.intr.lifetime.start 16, %{{.*}} : !llvm.ptr
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
- ; CHECK: llvm.intr.lifetime.end 32, %{{.*}} : !llvm.ptr
- call void @llvm.lifetime.end.p0(i64 32, ptr %a)
+ ; CHECK: llvm.intr.lifetime.start %{{.*}} : !llvm.ptr
+ call void @llvm.lifetime.start.p0(ptr %a)
+ ; CHECK: llvm.intr.lifetime.end %{{.*}} : !llvm.ptr
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -1353,8 +1353,8 @@ declare <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double>, <8 x i1>, i32)
declare <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double>, <8 x i1>, i32)
declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0(<8 x ptr>, <8 x i1>, i32)
declare <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64(<8 x i64>, <8 x i1>, i32)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @llvm.invariant.start.p0(i64 immarg, ptr nocapture)
declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr nocapture)
declare ptr @llvm.launder.invariant.group.p0(ptr nocapture)
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index eb3510c..2b420ed 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -1104,9 +1104,9 @@ llvm.func @lifetime() {
%c = llvm.mlir.constant(16 : i64) : i64
%a = llvm.alloca %c x i8 : (i64) -> !llvm.ptr
// CHECK: call void @llvm.lifetime.start
- llvm.intr.lifetime.start 16, %a : !llvm.ptr
+ llvm.intr.lifetime.start %a : !llvm.ptr
// CHECK: call void @llvm.lifetime.end
- llvm.intr.lifetime.end 16, %a : !llvm.ptr
+ llvm.intr.lifetime.end %a : !llvm.ptr
llvm.return
}
@@ -1418,8 +1418,8 @@ llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) {
// CHECK-DAG: declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32>, i64 immarg)
// CHECK-DAG: declare { <2 x double>, <2 x double> } @llvm.vector.deinterleave2.v4f64(<4 x double>)
// CHECK-DAG: declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32>)
-// CHECK-DAG: declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none))
-// CHECK-DAG: declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none))
+// CHECK-DAG: declare void @llvm.lifetime.start.p0(ptr captures(none))
+// CHECK-DAG: declare void @llvm.lifetime.end.p0(ptr captures(none))
// CHECK-DAG: declare ptr @llvm.invariant.start.p0(i64 immarg, ptr captures(none))
// CHECK-DAG: declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr captures(none))
diff --git a/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir b/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
index 85478cc..991222c 100644
--- a/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
@@ -1,5 +1,24 @@
// RUN: mlir-translate -verify-diagnostics -split-input-file -mlir-to-llvmir %s
+llvm.func @pmevent_no_id() {
+ // expected-error @below {{either `id` or `mask` must be set}}
+ nvvm.pmevent
+}
+
+// -----
+
+llvm.func @pmevent_bigger15() {
+ // expected-error @below {{`id` must be between 0 and 15}}
+ nvvm.pmevent id = 141
+}
+
+// -----
+
+llvm.func @pmevent_many_ids() {
+ // expected-error @below {{`id` and `mask` cannot be set at the same time}}
+ nvvm.pmevent id = 1 mask = 1
+}
+
// -----
llvm.func @kernel_func(%numberOfThreads : i32) {
diff --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir
index 5c2cfa4..b1800e8 100644
--- a/mlir/test/Target/LLVMIR/nvvmir.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir.mlir
@@ -918,3 +918,14 @@ llvm.func @nvvm_dot_accumulate_2way(%a: vector<2xi16>, %b: vector<4xi8>, %c: i32
%7 = nvvm.dot.accumulate.2way %a <signed>, %b <signed>, %c {b_hi = true}: vector<2xi16>, vector<4xi8>
llvm.return
}
+
+// -----
+
+// CHECK-LABEL: @nvvm_pmevent
+llvm.func @nvvm_pmevent() {
+ // CHECK: call void @llvm.nvvm.pm.event.mask(i16 15000)
+ nvvm.pmevent mask = 15000
+ // CHECK: call void @llvm.nvvm.pm.event.mask(i16 4)
+ nvvm.pmevent mask = 4
+ llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/omptarget-atomic-capture-control-options.mlir b/mlir/test/Target/LLVMIR/omptarget-atomic-capture-control-options.mlir
new file mode 100644
index 0000000..3553907
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/omptarget-atomic-capture-control-options.mlir
@@ -0,0 +1,44 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+// CHECK: atomicrmw add ptr %loadgep_, i32 1 monotonic, align 4, !amdgpu.no.remote.memory !{{.*}}
+
+module attributes {dlti.dl_spec = #dlti.dl_spec<!llvm.ptr = dense<64> : vector<4xi64>, !llvm.ptr<1> = dense<64> : vector<4xi64>, !llvm.ptr<2> = dense<32> : vector<4xi64>, !llvm.ptr<3> = dense<32> : vector<4xi64>, !llvm.ptr<4> = dense<64> : vector<4xi64>, !llvm.ptr<5> = dense<32> : vector<4xi64>, !llvm.ptr<6> = dense<32> : vector<4xi64>, !llvm.ptr<7> = dense<[160, 256, 256, 32]> : vector<4xi64>, !llvm.ptr<8> = dense<[128, 128, 128, 48]> : vector<4xi64>, !llvm.ptr<9> = dense<[192, 256, 256, 32]> : vector<4xi64>, i64 = dense<64> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, i16 = dense<16> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, "dlti.endianness" = "little", "dlti.legal_int_widths" = array<i32: 32, 64>, "dlti.stack_alignment" = 32 : i64, "dlti.alloca_memory_space" = 5 : ui64, "dlti.global_memory_space" = 1 : ui64>, fir.atomic_fine_grained_memory, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", fir.target_cpu = "generic-hsa", llvm.data_layout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9", llvm.target_triple = "amdgcn-amd-amdhsa", omp.flags = #omp.flags<openmp_device_version = 31>, omp.is_gpu = true, omp.is_target_device = true, omp.requires = #omp<clause_requires none>, omp.target_triples = [], omp.version = #omp.version<version = 31>} {
+ llvm.func @_QQmain() attributes {fir.bindc_name = "TEST", omp.declare_target = #omp.declaretarget<device_type = (host), capture_clause = (to)>, target_cpu = "generic-hsa"} {
+ %0 = llvm.mlir.constant(1 : i64) : i64
+ %1 = llvm.alloca %0 x i32 {bindc_name = "threads"} : (i64) -> !llvm.ptr<5>
+ %2 = llvm.addrspacecast %1 : !llvm.ptr<5> to !llvm.ptr
+ %3 = llvm.mlir.constant(1 : i64) : i64
+ %4 = llvm.alloca %3 x i32 {bindc_name = "capture"} : (i64) -> !llvm.ptr<5>
+ %5 = llvm.addrspacecast %4 : !llvm.ptr<5> to !llvm.ptr
+ %6 = llvm.mlir.constant(1 : i64) : i64
+ %7 = llvm.alloca %6 x i32 {bindc_name = "a"} : (i64) -> !llvm.ptr<5>
+ %8 = llvm.addrspacecast %7 : !llvm.ptr<5> to !llvm.ptr
+ %9 = llvm.mlir.constant(0 : i32) : i32
+ %10 = llvm.mlir.constant(128 : i32) : i32
+ %11 = llvm.mlir.constant(1 : i64) : i64
+ %12 = llvm.mlir.constant(1 : i64) : i64
+ %13 = llvm.mlir.constant(1 : i64) : i64
+ llvm.store %10, %2 : i32, !llvm.ptr
+ llvm.store %9, %8 : i32, !llvm.ptr
+ %14 = omp.map.info var_ptr(%2 : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "threads"}
+ %15 = omp.map.info var_ptr(%5 : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "capture"}
+ %16 = omp.map.info var_ptr(%8 : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "a"}
+ omp.target map_entries(%14 -> %arg0, %15 -> %arg1, %16 -> %arg2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {
+ %17 = llvm.mlir.constant(1 : i32) : i32
+ %18 = llvm.load %arg0 : !llvm.ptr -> i32
+ omp.parallel num_threads(%18 : i32) {
+ omp.atomic.capture {
+ omp.atomic.read %arg1 = %arg2 : !llvm.ptr, !llvm.ptr, i32
+ omp.atomic.update %arg2 : !llvm.ptr {
+ ^bb0(%arg3: i32):
+ %19 = llvm.add %arg3, %17 : i32
+ omp.yield(%19 : i32)
+ } {atomic_control = #omp.atomic_control<fine_grained_memory = true>}
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ llvm.return
+ }
+}
diff --git a/mlir/test/Target/LLVMIR/omptarget-atomic-update-control-options.mlir b/mlir/test/Target/LLVMIR/omptarget-atomic-update-control-options.mlir
new file mode 100644
index 0000000..3b0005b
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/omptarget-atomic-update-control-options.mlir
@@ -0,0 +1,36 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+// CHECK: atomicrmw add ptr %loadgep_, i32 1 monotonic, align 4, !amdgpu.ignore.denormal.mode !{{.*}}, !amdgpu.no.fine.grained.memory !{{.*}}, !amdgpu.no.remote.memory !{{.*}}
+
+module attributes {dlti.dl_spec = #dlti.dl_spec<!llvm.ptr = dense<64> : vector<4xi64>, !llvm.ptr<1> = dense<64> : vector<4xi64>, !llvm.ptr<2> = dense<32> : vector<4xi64>, !llvm.ptr<3> = dense<32> : vector<4xi64>, !llvm.ptr<4> = dense<64> : vector<4xi64>, !llvm.ptr<5> = dense<32> : vector<4xi64>, !llvm.ptr<6> = dense<32> : vector<4xi64>, !llvm.ptr<7> = dense<[160, 256, 256, 32]> : vector<4xi64>, !llvm.ptr<8> = dense<[128, 128, 128, 48]> : vector<4xi64>, !llvm.ptr<9> = dense<[192, 256, 256, 32]> : vector<4xi64>, i64 = dense<64> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, i16 = dense<16> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, "dlti.endianness" = "little", "dlti.legal_int_widths" = array<i32: 32, 64>, "dlti.stack_alignment" = 32 : i64, "dlti.alloca_memory_space" = 5 : ui64, "dlti.global_memory_space" = 1 : ui64>, fir.atomic_ignore_denormal_mode, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", fir.target_cpu = "generic-hsa", llvm.data_layout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9", llvm.target_triple = "amdgcn-amd-amdhsa", omp.flags = #omp.flags<openmp_device_version = 31>, omp.is_gpu = true, omp.is_target_device = true, omp.requires = #omp<clause_requires none>, omp.target_triples = [], omp.version = #omp.version<version = 31>} {
+ llvm.func @_QQmain() attributes {fir.bindc_name = "TEST", omp.declare_target = #omp.declaretarget<device_type = (host), capture_clause = (to)>, target_cpu = "generic-hsa"} {
+ %0 = llvm.mlir.constant(1 : i64) : i64
+ %1 = llvm.alloca %0 x i32 {bindc_name = "threads"} : (i64) -> !llvm.ptr<5>
+ %2 = llvm.addrspacecast %1 : !llvm.ptr<5> to !llvm.ptr
+ %3 = llvm.mlir.constant(1 : i64) : i64
+ %4 = llvm.alloca %3 x i32 {bindc_name = "a"} : (i64) -> !llvm.ptr<5>
+ %5 = llvm.addrspacecast %4 : !llvm.ptr<5> to !llvm.ptr
+ %6 = llvm.mlir.constant(0 : i32) : i32
+ %7 = llvm.mlir.constant(128 : i32) : i32
+ %8 = llvm.mlir.constant(1 : i64) : i64
+ %9 = llvm.mlir.constant(1 : i64) : i64
+ llvm.store %7, %2 : i32, !llvm.ptr
+ llvm.store %6, %5 : i32, !llvm.ptr
+ %10 = omp.map.info var_ptr(%2 : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "threads"}
+ %11 = omp.map.info var_ptr(%5 : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "a"}
+ omp.target map_entries(%10 -> %arg0, %11 -> %arg1 : !llvm.ptr, !llvm.ptr) {
+ %12 = llvm.mlir.constant(1 : i32) : i32
+ %13 = llvm.load %arg0 : !llvm.ptr -> i32
+ omp.parallel num_threads(%13 : i32) {
+ omp.atomic.update %arg1 : !llvm.ptr {
+ ^bb0(%arg2: i32):
+ %14 = llvm.add %arg2, %12 : i32
+ omp.yield(%14 : i32)
+ } {atomic_control = #omp.atomic_control<ignore_denormal_mode = true>}
+ omp.terminator
+ }
+ omp.terminator
+ }
+ llvm.return
+ }
+}
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir
index 740990a..ce43941 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -125,6 +125,23 @@ llvm.func @rocdl.ballot64(%pred : i1) -> i64 {
llvm.return %0 : i64
}
+llvm.func @rocdl.readfirstlane(%src0 : f32, %src1: f64, %src2: i32, %src3: vector<2 x f32>) -> f32 {
+ // CHECK-LABEL: rocdl.readfirstlane
+ // CHECK: call float @llvm.amdgcn.readfirstlane.f32(float %{{.*}})
+ %0 = rocdl.readfirstlane %src0 : f32
+
+ // CHECK: call double @llvm.amdgcn.readfirstlane.f64(double %{{.*}})
+ %1 = rocdl.readfirstlane %src1 : f64
+
+ // CHECK: call i32 @llvm.amdgcn.readfirstlane.i32(i32 %{{.*}})
+ %2 = rocdl.readfirstlane %src2 : i32
+
+ // CHECK: call <2 x float> @llvm.amdgcn.readfirstlane.v2f32(<2 x float> %{{.*}})
+ %3 = rocdl.readfirstlane %src3 : vector<2 x f32>
+
+ llvm.return %0 : f32
+}
+
llvm.func @rocdl.readlane(%src0 : f32, %src1: f64, %src2: i32, %src3: vector<2 x f32>) -> f32 {
%idx = llvm.mlir.constant(0 : i32) : i32
diff --git a/mlir/test/Target/SPIRV/arm-tensor-constant.mlir b/mlir/test/Target/SPIRV/arm-tensor-constant.mlir
index 275e586..7fb8af1 100644
--- a/mlir/test/Target/SPIRV/arm-tensor-constant.mlir
+++ b/mlir/test/Target/SPIRV/arm-tensor-constant.mlir
@@ -1,17 +1,36 @@
// RUN: mlir-translate --no-implicit-module --test-spirv-roundtrip %s | FileCheck %s
-// DISABLED: %if spirv-tools %{ mlir-translate --no-implicit-module --serialize-spirv %s | spirv-val %}
-
-// FIXME(#152012): Fix arm tensor constant validation errors and reenable spirv-val tests.
+// RUN: %if spirv-tools %{ mlir-translate --no-implicit-module --serialize-spirv %s | spirv-val %}
spirv.module Logical Vulkan requires #spirv.vce<v1.3,
[VulkanMemoryModel, Shader, TensorsARM, Linkage], [SPV_KHR_vulkan_memory_model, SPV_ARM_tensors]> {
- // CHECK-LABEL: @arm_tensor_of_i32
- spirv.func @arm_tensor_of_i32() -> (!spirv.arm.tensor<2x3xi32>) "None" {
+ // CHECK-LABEL: @rank_1_arm_tensor_of_i32
+ spirv.func @rank_1_arm_tensor_of_i32() -> (!spirv.arm.tensor<3xi32>) "None" {
+ // CHECK: {{%.*}} = spirv.Constant dense<[1, 2, 3]> : !spirv.arm.tensor<3xi32>
+ %0 = spirv.Constant dense<[1, 2, 3]> : !spirv.arm.tensor<3xi32>
+ spirv.ReturnValue %0 : !spirv.arm.tensor<3xi32>
+ }
+
+ // CHECK-LABEL: @rank_2_arm_tensor_of_i32
+ spirv.func @rank_2_arm_tensor_of_i32() -> (!spirv.arm.tensor<2x3xi32>) "None" {
// CHECK: {{%.*}} = spirv.Constant dense<{{\[}}[1, 2, 3], [4, 5, 6]]> : !spirv.arm.tensor<2x3xi32>
%0 = spirv.Constant dense<[[1, 2, 3], [4, 5, 6]]> : !spirv.arm.tensor<2x3xi32>
spirv.ReturnValue %0 : !spirv.arm.tensor<2x3xi32>
}
+ // CHECK-LABEL: @rank_3_arm_tensor_of_i32
+ spirv.func @rank_3_arm_tensor_of_i32() -> (!spirv.arm.tensor<2x2x3xi32>) "None" {
+ // CHECK: {{%.*}} = spirv.Constant dense<{{\[}}{{\[}}[1, 2, 3], [4, 5, 6]], {{\[}}[7, 8, 9], [10, 11, 12]]]> : !spirv.arm.tensor<2x2x3xi32>
+ %0 = spirv.Constant dense<[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]> : !spirv.arm.tensor<2x2x3xi32>
+ spirv.ReturnValue %0 : !spirv.arm.tensor<2x2x3xi32>
+ }
+
+ // CHECK-LABEL: @rank_4_arm_tensor_of_i32
+ spirv.func @rank_4_arm_tensor_of_i32() -> (!spirv.arm.tensor<2x3x4x5xi32>) "None" {
+ // CHECK: {{%.*}} = spirv.Constant dense<5> : !spirv.arm.tensor<2x3x4x5xi32>
+ %0 = spirv.Constant dense<5> : !spirv.arm.tensor<2x3x4x5xi32>
+ spirv.ReturnValue %0 : !spirv.arm.tensor<2x3x4x5xi32>
+ }
+
// CHECK-LABEL: @splat_arm_tensor_of_i32
spirv.func @splat_arm_tensor_of_i32() -> (!spirv.arm.tensor<2x3xi32>) "None" {
// CHECK: {{%.*}} = spirv.Constant dense<2> : !spirv.arm.tensor<2x3xi32>
@@ -19,13 +38,34 @@ spirv.module Logical Vulkan requires #spirv.vce<v1.3,
spirv.ReturnValue %0 : !spirv.arm.tensor<2x3xi32>
}
- // CHECK-LABEL: @arm_tensor_of_f32
- spirv.func @arm_tensor_of_f32() -> (!spirv.arm.tensor<2x3xf32>) "None" {
+ // CHECK-LABEL: @rank_1_arm_tensor_of_f32
+ spirv.func @rank_1_arm_tensor_of_f32() -> (!spirv.arm.tensor<3xf32>) "None" {
+ // CHECK: {{%.*}} = spirv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00]> : !spirv.arm.tensor<3xf32>
+ %0 = spirv.Constant dense<[1.0, 2.0, 3.0]> : !spirv.arm.tensor<3xf32>
+ spirv.ReturnValue %0 : !spirv.arm.tensor<3xf32>
+ }
+
+ // CHECK-LABEL: @rank_2_arm_tensor_of_f32
+ spirv.func @rank_2_arm_tensor_of_f32() -> (!spirv.arm.tensor<2x3xf32>) "None" {
// CHECK: {{%.*}} = spirv.Constant dense<{{\[}}[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : !spirv.arm.tensor<2x3xf32>
- %0 = spirv.Constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]>: !spirv.arm.tensor<2x3xf32>
+ %0 = spirv.Constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : !spirv.arm.tensor<2x3xf32>
spirv.ReturnValue %0 : !spirv.arm.tensor<2x3xf32>
}
+ // CHECK-LABEL: @rank_3_arm_tensor_of_f32
+ spirv.func @rank_3_arm_tensor_of_f32() -> (!spirv.arm.tensor<2x2x3xf32>) "None" {
+ // CHECK: {{%.*}} = spirv.Constant dense<{{\[}}{{\[}}[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]], {{\[}}[7.000000e+00, 8.000000e+00, 9.000000e+00], [1.000000e+01, 1.100000e+01, 1.200000e+01]]]> : !spirv.arm.tensor<2x2x3xf32>
+ %0 = spirv.Constant dense<[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]> : !spirv.arm.tensor<2x2x3xf32>
+ spirv.ReturnValue %0 : !spirv.arm.tensor<2x2x3xf32>
+ }
+
+ // CHECK-LABEL: @rank_4_arm_tensor_of_f32
+ spirv.func @rank_4_arm_tensor_of_f32() -> (!spirv.arm.tensor<2x3x4x5xf32>) "None" {
+ // CHECK: {{%.*}} = spirv.Constant dense<5.000000e+00> : !spirv.arm.tensor<2x3x4x5xf32>
+ %0 = spirv.Constant dense<5.0> : !spirv.arm.tensor<2x3x4x5xf32>
+ spirv.ReturnValue %0 : !spirv.arm.tensor<2x3x4x5xf32>
+ }
+
// CHECK-LABEL: @splat_arm_tensor_of_f32
spirv.func @splat_arm_tensor_of_f32() -> (!spirv.arm.tensor<2x3xf32>) "None" {
// CHECK: {{%.*}} = spirv.Constant dense<2.000000e+00> : !spirv.arm.tensor<2x3xf32>
diff --git a/mlir/test/Transforms/test-legalizer-fold-after.mlir b/mlir/test/Transforms/test-legalizer-fold-after.mlir
new file mode 100644
index 0000000..7f80252
--- /dev/null
+++ b/mlir/test/Transforms/test-legalizer-fold-after.mlir
@@ -0,0 +1,9 @@
+// RUN: mlir-opt %s -test-legalize-patterns="test-legalize-folding-mode=after-patterns" | FileCheck %s
+
+// CHECK-LABEL: @fold_legalization
+func.func @fold_legalization() -> i32 {
+ // CHECK-NOT: op_in_place_self_fold
+ // CHECK: 97
+ %1 = "test.op_in_place_self_fold"() : () -> (i32)
+ "test.return"(%1) : (i32) -> ()
+}
diff --git a/mlir/test/Transforms/test-legalizer-fold-before.mlir b/mlir/test/Transforms/test-legalizer-fold-before.mlir
new file mode 100644
index 0000000..fe6e293
--- /dev/null
+++ b/mlir/test/Transforms/test-legalizer-fold-before.mlir
@@ -0,0 +1,9 @@
+// RUN: mlir-opt %s -test-legalize-patterns="test-legalize-folding-mode=before-patterns" | FileCheck %s
+
+// CHECK-LABEL: @fold_legalization
+func.func @fold_legalization() -> i32 {
+ // CHECK: op_in_place_self_fold
+ // CHECK-SAME: folded
+ %1 = "test.op_in_place_self_fold"() : () -> (i32)
+ "test.return"(%1) : (i32) -> ()
+}
diff --git a/mlir/test/Transforms/test-legalizer-no-fold.mlir b/mlir/test/Transforms/test-legalizer-no-fold.mlir
new file mode 100644
index 0000000..720d17f
--- /dev/null
+++ b/mlir/test/Transforms/test-legalizer-no-fold.mlir
@@ -0,0 +1,12 @@
+// RUN: mlir-opt %s -allow-unregistered-dialect -test-legalize-patterns="test-legalize-folding-mode=never" | FileCheck %s
+
+// CHECK-LABEL: @remove_foldable_op(
+func.func @remove_foldable_op(%arg0 : i32) -> (i32) {
+ // Check that op was not folded.
+ // CHECK: "test.op_with_region_fold"
+ %0 = "test.op_with_region_fold"(%arg0) ({
+ "foo.op_with_region_terminator"() : () -> ()
+ }) : (i32) -> (i32)
+ "test.return"(%0) : (i32) -> ()
+}
+
diff --git a/mlir/test/Transforms/test-legalizer.mlir b/mlir/test/Transforms/test-legalizer.mlir
index e4406e6..5630d15 100644
--- a/mlir/test/Transforms/test-legalizer.mlir
+++ b/mlir/test/Transforms/test-legalizer.mlir
@@ -415,3 +415,20 @@ func.func @test_multiple_1_to_n_replacement() {
%0 = "test.multiple_1_to_n_replacement"() : () -> (f16)
"test.invalid"(%0) : (f16) -> ()
}
+
+// -----
+
+// CHECK-LABEL: func @test_lookup_without_converter
+// CHECK: %[[producer:.*]] = "test.valid_producer"() : () -> i16
+// CHECK: %[[cast:.*]] = "test.cast"(%[[producer]]) : (i16) -> f64
+// CHECK: "test.valid_consumer"(%[[cast]]) : (f64) -> ()
+// CHECK: "test.valid_consumer"(%[[producer]]) : (i16) -> ()
+func.func @test_lookup_without_converter() {
+ %0 = "test.replace_with_valid_producer"() {type = i16} : () -> (i64)
+ "test.replace_with_valid_consumer"(%0) {with_converter} : (i64) -> ()
+ // Make sure that the second "replace_with_valid_consumer" lowering does not
+ // lookup the materialization that was created for the above op.
+ "test.replace_with_valid_consumer"(%0) : (i64) -> ()
+ // expected-remark@+1 {{op 'func.return' is not legalizable}}
+ return
+}
diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 2eaad55..231400e 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -1169,6 +1169,26 @@ def OpP : TEST_Op<"op_p"> {
let results = (outs I32);
}
+// Test constant-folding a pattern that maps `(F32) -> SI32`.
+def SignOp : TEST_Op<"sign", [SameOperandsAndResultShape]> {
+ let arguments = (ins RankedTensorOf<[F32]>:$operand);
+ let results = (outs RankedTensorOf<[SI32]>:$result);
+
+ let assemblyFormat = [{
+ $operand attr-dict `:` functional-type(operands, results)
+ }];
+}
+
+// Test constant-folding a pattern that maps `(F32, F32) -> I1`.
+def LessThanOp : TEST_Op<"less_than", [SameOperandsAndResultShape]> {
+ let arguments = (ins RankedTensorOf<[F32]>:$lhs, RankedTensorOf<[F32]>:$rhs);
+ let results = (outs RankedTensorOf<[I1]>:$result);
+
+ let assemblyFormat = [{
+ $lhs `,` $rhs attr-dict `:` functional-type(operands, results)
+ }];
+}
+
// Test same operand name enforces equality condition check.
def TestEqualArgsPattern : Pat<(OpN $a, $a), (OpO $a)>;
@@ -1478,6 +1498,8 @@ def TestOpInPlaceSelfFold : TEST_Op<"op_in_place_self_fold"> {
let results = (outs I32);
let hasFolder = 1;
}
+def : Pat<(TestOpInPlaceSelfFold:$op $_),
+ (TestOpConstant ConstantAttr<I32Attr, "97">)>;
// Test op that simply returns success.
def TestOpInPlaceFoldSuccess : TEST_Op<"op_in_place_fold_success"> {
@@ -2104,6 +2126,10 @@ def TestInvalidOp : TEST_Op<"invalid", [Terminator]>,
Arguments<(ins Variadic<AnyType>)>;
def TestTypeProducerOp : TEST_Op<"type_producer">,
Results<(outs AnyType)>;
+def TestValidProducerOp : TEST_Op<"valid_producer">,
+ Results<(outs AnyType)>;
+def TestValidConsumerOp : TEST_Op<"valid_consumer">,
+ Arguments<(ins AnyType)>;
def TestAnotherTypeProducerOp : TEST_Op<"another_type_producer">,
Results<(outs AnyType)>;
def TestTypeConsumerOp : TEST_Op<"type_consumer">,
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index eda618f..ff958d9 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -10,6 +10,7 @@
#include "TestOps.h"
#include "TestTypes.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
+#include "mlir/Dialect/CommonFolders.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Func/Transforms/FuncConversions.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
@@ -202,6 +203,66 @@ struct HoistEligibleOps : public OpRewritePattern<test::OneRegionOp> {
}
};
+struct FoldSignOpF32ToSI32 : public OpRewritePattern<test::SignOp> {
+ using OpRewritePattern<test::SignOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(test::SignOp op,
+ PatternRewriter &rewriter) const override {
+ if (op->getNumOperands() != 1 || op->getNumResults() != 1)
+ return failure();
+
+ TypedAttr operandAttr;
+ matchPattern(op->getOperand(0), m_Constant(&operandAttr));
+ if (!operandAttr)
+ return failure();
+
+ TypedAttr res = cast_or_null<TypedAttr>(
+ constFoldUnaryOp<FloatAttr, FloatAttr::ValueType, void, IntegerAttr>(
+ operandAttr, op.getType(), [](APFloat operand) -> APSInt {
+ static const APFloat zero(0.0f);
+ int operandSign = 0;
+ if (operand != zero)
+ operandSign = (operand < zero) ? -1 : +1;
+ return APSInt(APInt(32, operandSign), false);
+ }));
+ if (!res)
+ return failure();
+
+ rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, res);
+ return success();
+ }
+};
+
+struct FoldLessThanOpF32ToI1 : public OpRewritePattern<test::LessThanOp> {
+ using OpRewritePattern<test::LessThanOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(test::LessThanOp op,
+ PatternRewriter &rewriter) const override {
+ if (op->getNumOperands() != 2 || op->getNumResults() != 1)
+ return failure();
+
+ TypedAttr lhsAttr;
+ TypedAttr rhsAttr;
+ matchPattern(op->getOperand(0), m_Constant(&lhsAttr));
+ matchPattern(op->getOperand(1), m_Constant(&rhsAttr));
+
+ if (!lhsAttr || !rhsAttr)
+ return failure();
+
+ Attribute operandAttrs[2] = {lhsAttr, rhsAttr};
+ TypedAttr res = cast_or_null<TypedAttr>(
+ constFoldBinaryOp<FloatAttr, FloatAttr::ValueType, void, IntegerAttr>(
+ operandAttrs, op.getType(), [](APFloat lhs, APFloat rhs) -> APInt {
+ return APInt(1, lhs < rhs);
+ }));
+ if (!res)
+ return failure();
+
+ rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, res);
+ return success();
+ }
+};
+
/// This pattern moves "test.move_before_parent_op" before the parent op.
struct MoveBeforeParentOp : public RewritePattern {
MoveBeforeParentOp(MLIRContext *context)
@@ -1198,6 +1259,47 @@ public:
}
};
+/// Pattern that replaces test.replace_with_valid_producer with
+/// test.valid_producer and the specified type.
+class TestReplaceWithValidProducer : public ConversionPattern {
+public:
+ TestReplaceWithValidProducer(MLIRContext *ctx)
+ : ConversionPattern("test.replace_with_valid_producer", 1, ctx) {}
+ LogicalResult
+ matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const final {
+ auto attr = op->getAttrOfType<TypeAttr>("type");
+ if (!attr)
+ return failure();
+ rewriter.replaceOpWithNewOp<TestValidProducerOp>(op, attr.getValue());
+ return success();
+ }
+};
+
+/// Pattern that replaces test.replace_with_valid_consumer with
+/// test.valid_consumer. Can be used with and without a type converter.
+class TestReplaceWithValidConsumer : public ConversionPattern {
+public:
+ TestReplaceWithValidConsumer(MLIRContext *ctx, const TypeConverter &converter)
+ : ConversionPattern(converter, "test.replace_with_valid_consumer", 1,
+ ctx) {}
+ TestReplaceWithValidConsumer(MLIRContext *ctx)
+ : ConversionPattern("test.replace_with_valid_consumer", 1, ctx) {}
+
+ LogicalResult
+ matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ ConversionPatternRewriter &rewriter) const final {
+ // with_converter present: pattern must have been initialized with a type
+ // converter.
+ // with_converter absent: pattern must have been initialized without a type
+ // converter.
+ if (op->hasAttr("with_converter") != static_cast<bool>(getTypeConverter()))
+ return failure();
+ rewriter.replaceOpWithNewOp<TestValidConsumerOp>(op, operands[0]);
+ return success();
+ }
+};
+
/// This pattern matches a test.convert_block_args op. It either:
/// a) Duplicates all block arguments,
/// b) or: drops all block arguments and replaces each with 2x the first
@@ -1314,6 +1416,7 @@ struct TestTypeConverter : public TypeConverter {
TestTypeConverter() {
addConversion(convertType);
addSourceMaterialization(materializeCast);
+ addTargetMaterialization(materializeCast);
}
static LogicalResult convertType(Type t, SmallVectorImpl<Type> &results) {
@@ -1389,10 +1492,12 @@ struct TestLegalizePatternDriver
TestBoundedRecursiveRewrite, TestNestedOpCreationUndoRewrite,
TestReplaceEraseOp, TestCreateUnregisteredOp, TestUndoMoveOpBefore,
TestUndoPropertiesModification, TestEraseOp,
+ TestReplaceWithValidProducer, TestReplaceWithValidConsumer,
TestRepetitive1ToNConsumer>(&getContext());
patterns.add<TestDropOpSignatureConversion, TestDropAndReplaceInvalidOp,
TestPassthroughInvalidOp, TestMultiple1ToNReplacement,
- TestBlockArgReplace>(&getContext(), converter);
+ TestBlockArgReplace, TestReplaceWithValidConsumer>(
+ &getContext(), converter);
patterns.add<TestConvertBlockArgs>(converter, &getContext());
mlir::populateAnyFunctionOpInterfaceTypeConversionPattern(patterns,
converter);
@@ -1402,7 +1507,8 @@ struct TestLegalizePatternDriver
ConversionTarget target(getContext());
target.addLegalOp<ModuleOp>();
target.addLegalOp<LegalOpA, LegalOpB, LegalOpC, TestCastOp, TestValidOp,
- TerminatorOp, OneRegionOp>();
+ TerminatorOp, TestOpConstant, OneRegionOp,
+ TestValidProducerOp, TestValidConsumerOp>();
target.addLegalOp(OperationName("test.legal_op", &getContext()));
target
.addIllegalOp<ILLegalOpF, TestRegionBuilderOp, TestOpWithRegionFold>();
@@ -1457,6 +1563,7 @@ struct TestLegalizePatternDriver
DumpNotifications dumpNotifications;
config.listener = &dumpNotifications;
config.unlegalizedOps = &unlegalizedOps;
+ config.foldingMode = foldingMode;
if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns), config))) {
getOperation()->emitRemark() << "applyPartialConversion failed";
@@ -1476,6 +1583,7 @@ struct TestLegalizePatternDriver
ConversionConfig config;
DumpNotifications dumpNotifications;
+ config.foldingMode = foldingMode;
config.listener = &dumpNotifications;
if (failed(applyFullConversion(getOperation(), target,
std::move(patterns), config))) {
@@ -1490,6 +1598,7 @@ struct TestLegalizePatternDriver
// Analyze the convertible operations.
DenseSet<Operation *> legalizedOps;
ConversionConfig config;
+ config.foldingMode = foldingMode;
config.legalizableOps = &legalizedOps;
if (failed(applyAnalysisConversion(getOperation(), target,
std::move(patterns), config)))
@@ -1510,6 +1619,21 @@ struct TestLegalizePatternDriver
clEnumValN(ConversionMode::Full, "full", "Perform a full conversion"),
clEnumValN(ConversionMode::Partial, "partial",
"Perform a partial conversion"))};
+
+ Option<DialectConversionFoldingMode> foldingMode{
+ *this, "test-legalize-folding-mode",
+ llvm::cl::desc("The folding mode to use with the test driver"),
+ llvm::cl::init(DialectConversionFoldingMode::BeforePatterns),
+ llvm::cl::values(clEnumValN(DialectConversionFoldingMode::Never, "never",
+ "Never attempt to fold"),
+ clEnumValN(DialectConversionFoldingMode::BeforePatterns,
+ "before-patterns",
+ "Only attempt to fold not legal operations "
+ "before applying patterns"),
+ clEnumValN(DialectConversionFoldingMode::AfterPatterns,
+ "after-patterns",
+ "Only attempt to fold not legal operations "
+ "after applying patterns"))};
};
} // namespace
@@ -2181,6 +2305,24 @@ struct TestSelectiveReplacementPatternDriver
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};
+
+struct TestFoldTypeConvertingOp
+ : public PassWrapper<TestFoldTypeConvertingOp, OperationPass<>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestFoldTypeConvertingOp)
+
+ StringRef getArgument() const final { return "test-fold-type-converting-op"; }
+ StringRef getDescription() const final {
+ return "Test helper functions for folding ops whose input and output types "
+ "differ, e.g. float comparisons of the form `(f32, f32) -> i1`.";
+ }
+ void runOnOperation() override {
+ MLIRContext *context = &getContext();
+ mlir::RewritePatternSet patterns(context);
+ patterns.add<FoldSignOpF32ToSI32, FoldLessThanOpF32ToI1>(context);
+ if (failed(applyPatternsGreedily(getOperation(), std::move(patterns))))
+ signalPassFailure();
+ }
+};
} // namespace
//===----------------------------------------------------------------------===//
@@ -2211,6 +2353,8 @@ void registerPatternsTestPass() {
PassRegistration<TestMergeBlocksPatternDriver>();
PassRegistration<TestSelectiveReplacementPatternDriver>();
+
+ PassRegistration<TestFoldTypeConvertingOp>();
}
} // namespace test
} // namespace mlir
diff --git a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
index c6245b6..3bea8ef 100644
--- a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
+++ b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
@@ -7,11 +7,14 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
+#include "mlir/Dialect/Index/IR/IndexDialect.h"
#include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
#include "mlir/Dialect/XeGPU/IR/XeGPU.h"
#include "mlir/Dialect/XeGPU/Transforms/Transforms.h"
+#include "mlir/Dialect/XeGPU/Utils/XeGPUUtils.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
+#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
using namespace mlir;
@@ -147,12 +150,118 @@ struct TestXeGPUUnrollingPatterns
}
};
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "test-xegpu-layout-interface"
+#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ")
+#define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n")
+
+// Test pattern for distributing vector::StepOp from workgroup to subgroup.
+// Validates LayoutTrait interfaces for offset computation abstraction between
+// LayoutAttr and SliceAttr.
+class TestStepOpPattern : public OpConversionPattern<vector::StepOp> {
+ using OpConversionPattern<vector::StepOp>::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(vector::StepOp op, OneToNOpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+
+ auto layoutName = xegpu::getLayoutName(op->getResult(0));
+ auto sliceAttr = op->getAttrOfType<xegpu::SliceAttr>(layoutName);
+ if (!sliceAttr || sliceAttr.getRank() != 1)
+ return failure();
+
+ std::optional<SmallVector<int64_t>> sgShape = sliceAttr.getSgDataAsInt();
+ if (!sgShape)
+ return failure();
+
+ Location loc = op.getLoc();
+ VectorType type = op.getResult().getType();
+ auto wgShape = type.getShape();
+
+ Value sgId =
+ gpu::SubgroupIdOp::create(rewriter, loc, /*upper_bound=*/nullptr);
+ auto maybeOffsets = sliceAttr.getOffsets(rewriter, loc, sgId, wgShape);
+ if (failed(maybeOffsets))
+ return failure();
+
+ VectorType newTy = type.cloneWith(*sgShape, type.getElementType());
+ Value base = vector::StepOp::create(rewriter, loc, newTy);
+ SmallVector<Value> newOps;
+ for (auto offsets : *maybeOffsets) {
+ Value bcast =
+ vector::BroadcastOp::create(rewriter, loc, newTy, offsets[0]);
+ Value add = arith::AddIOp::create(rewriter, loc, base, bcast);
+ newOps.push_back(add);
+ }
+ rewriter.replaceOpWithMultiple(op, {newOps});
+ return success();
+ }
+};
+
+struct TestXeGPULayoutInterface
+ : public PassWrapper<TestXeGPULayoutInterface,
+ OperationPass<gpu::GPUModuleOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestXeGPULayoutInterface)
+
+ StringRef getArgument() const final { return "test-xegpu-layout-interface"; }
+
+ StringRef getDescription() const final {
+ return "Test the implementation of XeGPU Layout interfaces";
+ }
+
+ void getDependentDialects(::mlir::DialectRegistry &registry) const override {
+ registry.insert<arith::ArithDialect>();
+ registry.insert<memref::MemRefDialect>();
+ registry.insert<xegpu::XeGPUDialect>();
+ registry.insert<vector::VectorDialect>();
+ registry.insert<index::IndexDialect>();
+ }
+
+ TestXeGPULayoutInterface() = default;
+ TestXeGPULayoutInterface(const TestXeGPULayoutInterface &pass)
+ : PassWrapper(pass) {}
+
+ void runOnOperation() override {
+ MLIRContext *ctx = &getContext();
+
+ TypeConverter typeConverter;
+ auto materializeCast = [&](mlir::OpBuilder &builder, mlir::Type type,
+ mlir::ValueRange inputs,
+ mlir::Location loc) -> mlir::Value {
+ return builder.create<UnrealizedConversionCastOp>(loc, type, inputs)
+ .getResult(0);
+ };
+ typeConverter.addSourceMaterialization(materializeCast);
+ typeConverter.addTargetMaterialization(materializeCast);
+
+ RewritePatternSet patterns(ctx);
+ patterns.add<TestStepOpPattern>(typeConverter, ctx);
+
+ ConversionTarget target(*ctx);
+ auto isLegal = [&](xegpu::SliceAttr layout) -> bool {
+ return !layout || !layout.isWgLayout();
+ };
+
+ target.addDynamicallyLegalOp<vector::StepOp>(
+ [&](vector::StepOp op) -> bool {
+ auto layoutName = xegpu::getLayoutName(op->getResult(0));
+ auto sliceAttr = op->getAttrOfType<xegpu::SliceAttr>(layoutName);
+ return isLegal(sliceAttr);
+ });
+
+ target.markUnknownOpDynamicallyLegal([](Operation *op) { return true; });
+
+ (void)applyPartialConversion(getOperation(), target, std::move(patterns));
+ }
+};
+
} // namespace
namespace mlir {
namespace test {
void registerTestXeGPULowerings() {
PassRegistration<TestXeGPUUnrollingPatterns>();
+ PassRegistration<TestXeGPULayoutInterface>();
}
} // namespace test
} // namespace mlir
diff --git a/mlir/test/lib/Pass/TestPassManager.cpp b/mlir/test/lib/Pass/TestPassManager.cpp
index 25c8e53..df2736b 100644
--- a/mlir/test/lib/Pass/TestPassManager.cpp
+++ b/mlir/test/lib/Pass/TestPassManager.cpp
@@ -133,6 +133,51 @@ struct TestOptionsSuperPass
llvm::cl::desc("Example list of PassPipelineOptions option")};
};
+struct TestOptionsPassA
+ : public PassWrapper<TestOptionsPassA, OperationPass<func::FuncOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestOptionsPassA)
+
+ struct Options : public PassPipelineOptions<Options> {
+ Option<bool> foo{*this, "foo", llvm::cl::desc("Example boolean option")};
+ };
+
+ TestOptionsPassA() = default;
+ TestOptionsPassA(const TestOptionsPassA &) : PassWrapper() {}
+ TestOptionsPassA(const Options &options) { this->options.foo = options.foo; }
+
+ void runOnOperation() final {}
+ StringRef getArgument() const final { return "test-options-pass-a"; }
+ StringRef getDescription() const final {
+ return "Test superset options parsing capabilities - subset A";
+ }
+
+ Options options;
+};
+
+struct TestOptionsPassB
+ : public PassWrapper<TestOptionsPassB, OperationPass<func::FuncOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestOptionsPassB)
+
+ struct Options : public PassPipelineOptions<Options> {
+ Option<bool> bar{*this, "bar", llvm::cl::desc("Example boolean option")};
+ };
+
+ TestOptionsPassB() = default;
+ TestOptionsPassB(const TestOptionsPassB &) : PassWrapper() {}
+ TestOptionsPassB(const Options &options) { this->options.bar = options.bar; }
+
+ void runOnOperation() final {}
+ StringRef getArgument() const final { return "test-options-pass-b"; }
+ StringRef getDescription() const final {
+ return "Test superset options parsing capabilities - subset B";
+ }
+
+ Options options;
+};
+
+struct TestPipelineOptionsSuperSetAB : TestOptionsPassA::Options,
+ TestOptionsPassB::Options {};
+
/// A test pass that always aborts to enable testing the crash recovery
/// mechanism of the pass manager.
struct TestCrashRecoveryPass
@@ -270,6 +315,9 @@ void registerPassManagerTestPass() {
PassRegistration<TestOptionsPass>();
PassRegistration<TestOptionsSuperPass>();
+ PassRegistration<TestOptionsPassA>();
+ PassRegistration<TestOptionsPassB>();
+
PassRegistration<TestModulePass>();
PassRegistration<TestFunctionPass>();
@@ -306,5 +354,16 @@ void registerPassManagerTestPass() {
[](OpPassManager &pm, const TestOptionsSuperPass::Options &options) {
pm.addPass(std::make_unique<TestOptionsSuperPass>(options));
});
+
+ PassPipelineRegistration<TestPipelineOptionsSuperSetAB>
+ registerPipelineOptionsSuperSetABPipeline(
+ "test-options-super-set-ab-pipeline",
+ "Parses options of PassPipelineOptions using pass pipeline "
+ "registration",
+ [](OpPassManager &pm, const TestPipelineOptionsSuperSetAB &options) {
+ // Pass superset AB options to subset options A and B
+ pm.addPass(std::make_unique<TestOptionsPassA>(options));
+ pm.addPass(std::make_unique<TestOptionsPassB>(options));
+ });
}
} // namespace mlir
diff --git a/mlir/test/python/dialects/linalg/opdsl/test_core_named_ops.py b/mlir/test/python/dialects/linalg/opdsl/test_core_named_ops.py
index ee76b6d..bc273bf 100644
--- a/mlir/test/python/dialects/linalg/opdsl/test_core_named_ops.py
+++ b/mlir/test/python/dialects/linalg/opdsl/test_core_named_ops.py
@@ -1,7 +1,7 @@
# RUN: %PYTHON -m mlir.dialects.linalg.opdsl.dump_oplib .ops.core_named_ops | FileCheck %s
# Just verify that at least one known op is generated.
-# CHECK: name: matmul
+# CHECK: name: copy
# verify some special cases: negf->NegFOp, powf->PowFOp
# CHECK cpp_class_name: NegFOp
diff --git a/mlir/utils/tree-sitter-mlir/dialect/linalg.js b/mlir/utils/tree-sitter-mlir/dialect/linalg.js
index ddde92b..f465808 100644
--- a/mlir/utils/tree-sitter-mlir/dialect/linalg.js
+++ b/mlir/utils/tree-sitter-mlir/dialect/linalg.js
@@ -4,7 +4,6 @@ module.exports = {
linalg_dialect : $ => prec.right(choice(
seq(choice(
'linalg.batch_matmul',
- 'linalg.batch_matmul_transpose_b',
'linalg.batch_matvec',
'linalg.batch_reduce_matmul', 'linalg.broadcast',
'linalg.conv_1d_ncw_fcw', 'linalg.conv_1d_nwc_wcf',
@@ -27,7 +26,6 @@ module.exports = {
'linalg.dot', 'linalg.elemwise_binary',
'linalg.elemwise_unary', 'linalg.fill',
'linalg.fill_rng_2d', 'linalg.matmul',
- 'linalg.matmul_transpose_b',
'linalg.matmul_unsigned', 'linalg.matvec',
'linalg.mmt4d', 'linalg.pooling_nchw_max',
'linalg.pooling_nchw_sum',
diff --git a/mlir/utils/tree-sitter-mlir/queries/highlights.scm b/mlir/utils/tree-sitter-mlir/queries/highlights.scm
index 4cbea7b..59e280b 100644
--- a/mlir/utils/tree-sitter-mlir/queries/highlights.scm
+++ b/mlir/utils/tree-sitter-mlir/queries/highlights.scm
@@ -213,7 +213,6 @@
"bufferization.to_tensor"
"linalg.batch_matmul"
- "linalg.batch_matmul_transpose_b"
"linalg.batch_matvec"
"linalg.batch_reduce_matmul"
"linalg.broadcast"
@@ -244,7 +243,6 @@
"linalg.fill"
"linalg.fill_rng_2d"
"linalg.matmul"
- "linalg.matmul_transpose_b"
"linalg.matmul_unsigned"
"linalg.matvec"
"linalg.mmt4d"
diff --git a/offload/include/Shared/APITypes.h b/offload/include/Shared/APITypes.h
index f376c7d..8c150b6 100644
--- a/offload/include/Shared/APITypes.h
+++ b/offload/include/Shared/APITypes.h
@@ -21,6 +21,7 @@
#include <cstddef>
#include <cstdint>
+#include <mutex>
extern "C" {
@@ -76,6 +77,9 @@ struct __tgt_async_info {
/// should be freed after finalization.
llvm::SmallVector<void *, 2> AssociatedAllocations;
+ /// Mutex to guard access to AssociatedAllocations and the Queue.
+ std::mutex Mutex;
+
/// The kernel launch environment used to issue a kernel. Stored here to
/// ensure it is a valid location while the transfer to the device is
/// happening.
diff --git a/offload/liboffload/API/Queue.td b/offload/liboffload/API/Queue.td
index 7e4c5b8..1d9f6f2 100644
--- a/offload/liboffload/API/Queue.td
+++ b/offload/liboffload/API/Queue.td
@@ -65,7 +65,8 @@ def : Enum {
let desc = "Supported queue info.";
let is_typed = 1;
let etors = [
- TaggedEtor<"DEVICE", "ol_device_handle_t", "The handle of the device associated with the queue.">
+ TaggedEtor<"DEVICE", "ol_device_handle_t", "The handle of the device associated with the queue.">,
+ TaggedEtor<"EMPTY", "bool", "True if the queue is known to be empty. May be unconditionally false if the device does not support status queries.">,
];
}
diff --git a/offload/liboffload/src/OffloadImpl.cpp b/offload/liboffload/src/OffloadImpl.cpp
index 6486b2b..f5365ca 100644
--- a/offload/liboffload/src/OffloadImpl.cpp
+++ b/offload/liboffload/src/OffloadImpl.cpp
@@ -72,6 +72,8 @@ struct ol_queue_impl_t {
struct ol_event_impl_t {
ol_event_impl_t(void *EventInfo, ol_queue_handle_t Queue)
: EventInfo(EventInfo), Queue(Queue) {}
+ // EventInfo may be null, in which case the event should be considered always
+ // complete
void *EventInfo;
ol_queue_handle_t Queue;
};
@@ -206,7 +208,7 @@ Error initPlugins(OffloadContext &Context) {
}
Error olInit_impl() {
- std::lock_guard<std::mutex> Lock{OffloadContextValMutex};
+ std::lock_guard<std::mutex> Lock(OffloadContextValMutex);
if (isOffloadInitialized()) {
OffloadContext::get().RefCount++;
@@ -224,7 +226,7 @@ Error olInit_impl() {
}
Error olShutDown_impl() {
- std::lock_guard<std::mutex> Lock{OffloadContextValMutex};
+ std::lock_guard<std::mutex> Lock(OffloadContextValMutex);
if (--OffloadContext::get().RefCount != 0)
return Error::success();
@@ -485,16 +487,13 @@ Error olSyncQueue_impl(ol_queue_handle_t Queue) {
// Host plugin doesn't have a queue set so it's not safe to call synchronize
// on it, but we have nothing to synchronize in that situation anyway.
if (Queue->AsyncInfo->Queue) {
- if (auto Err = Queue->Device->Device->synchronize(Queue->AsyncInfo))
+ // We don't need to release the queue and we would like the ability for
+ // other offload threads to submit work concurrently, so pass "false" here
+ // so we don't release the underlying queue object.
+ if (auto Err = Queue->Device->Device->synchronize(Queue->AsyncInfo, false))
return Err;
}
- // Recreate the stream resource so the queue can be reused
- // TODO: Would be easier for the synchronization to (optionally) not release
- // it to begin with.
- if (auto Res = Queue->Device->Device->initAsyncInfo(&Queue->AsyncInfo))
- return Res;
-
return Error::success();
}
@@ -509,8 +508,8 @@ Error olWaitEvents_impl(ol_queue_handle_t Queue, ol_event_handle_t *Events,
return Plugin::error(ErrorCode::INVALID_NULL_HANDLE,
"olWaitEvents asked to wait on a NULL event");
- // Do nothing if the event is for this queue
- if (Event->Queue == Queue)
+ // Do nothing if the event is for this queue or the event is always complete
+ if (Event->Queue == Queue || !Event->EventInfo)
continue;
if (auto Err = Device->waitEvent(Event->EventInfo, Queue->AsyncInfo))
@@ -528,6 +527,12 @@ Error olGetQueueInfoImplDetail(ol_queue_handle_t Queue,
switch (PropName) {
case OL_QUEUE_INFO_DEVICE:
return Info.write<ol_device_handle_t>(Queue->Device);
+ case OL_QUEUE_INFO_EMPTY: {
+ auto Pending = Queue->Device->Device->hasPendingWork(Queue->AsyncInfo);
+ if (auto Err = Pending.takeError())
+ return Err;
+ return Info.write<bool>(!*Pending);
+ }
default:
return createOffloadError(ErrorCode::INVALID_ENUMERATION,
"olGetQueueInfo enum '%i' is invalid", PropName);
@@ -548,6 +553,10 @@ Error olGetQueueInfoSize_impl(ol_queue_handle_t Queue, ol_queue_info_t PropName,
}
Error olSyncEvent_impl(ol_event_handle_t Event) {
+ if (!Event->EventInfo)
+ // Event always complete
+ return Plugin::success();
+
if (auto Res = Event->Queue->Device->Device->syncEvent(Event->EventInfo))
return Res;
@@ -555,8 +564,9 @@ Error olSyncEvent_impl(ol_event_handle_t Event) {
}
Error olDestroyEvent_impl(ol_event_handle_t Event) {
- if (auto Res = Event->Queue->Device->Device->destroyEvent(Event->EventInfo))
- return Res;
+ if (Event->EventInfo)
+ if (auto Res = Event->Queue->Device->Device->destroyEvent(Event->EventInfo))
+ return Res;
return olDestroy(Event);
}
@@ -590,7 +600,16 @@ Error olGetEventInfoSize_impl(ol_event_handle_t Event, ol_event_info_t PropName,
}
Error olCreateEvent_impl(ol_queue_handle_t Queue, ol_event_handle_t *EventOut) {
+ auto Pending = Queue->Device->Device->hasPendingWork(Queue->AsyncInfo);
+ if (auto Err = Pending.takeError())
+ return Err;
+
*EventOut = new ol_event_impl_t(nullptr, Queue);
+ if (!*Pending)
+ // Queue is empty, don't record an event and consider the event always
+ // complete
+ return Plugin::success();
+
if (auto Res = Queue->Device->Device->createEvent(&(*EventOut)->EventInfo))
return Res;
@@ -725,7 +744,7 @@ Error olGetSymbol_impl(ol_program_handle_t Program, const char *Name,
ol_symbol_kind_t Kind, ol_symbol_handle_t *Symbol) {
auto &Device = Program->Image->getDevice();
- std::lock_guard<std::mutex> Lock{Program->SymbolListMutex};
+ std::lock_guard<std::mutex> Lock(Program->SymbolListMutex);
switch (Kind) {
case OL_SYMBOL_KIND_KERNEL: {
diff --git a/offload/libomptarget/interface.cpp b/offload/libomptarget/interface.cpp
index ea35440..e9b148d 100644
--- a/offload/libomptarget/interface.cpp
+++ b/offload/libomptarget/interface.cpp
@@ -116,7 +116,7 @@ targetData(ident_t *Loc, int64_t DeviceId, int32_t ArgNum, void **ArgsBase,
TargetDataFuncPtrTy TargetDataFunction, const char *RegionTypeMsg,
const char *RegionName) {
assert(PM && "Runtime not initialized");
- static_assert(std::is_convertible_v<TargetAsyncInfoTy, AsyncInfoTy>,
+ static_assert(std::is_convertible_v<TargetAsyncInfoTy &, AsyncInfoTy &>,
"TargetAsyncInfoTy must be convertible to AsyncInfoTy.");
TIMESCOPE_WITH_DETAILS_AND_IDENT("Runtime: Data Copy",
@@ -311,7 +311,7 @@ static inline int targetKernel(ident_t *Loc, int64_t DeviceId, int32_t NumTeams,
int32_t ThreadLimit, void *HostPtr,
KernelArgsTy *KernelArgs) {
assert(PM && "Runtime not initialized");
- static_assert(std::is_convertible_v<TargetAsyncInfoTy, AsyncInfoTy>,
+ static_assert(std::is_convertible_v<TargetAsyncInfoTy &, AsyncInfoTy &>,
"Target AsyncInfoTy must be convertible to AsyncInfoTy.");
DP("Entering target region for device %" PRId64 " with entry point " DPxMOD
"\n",
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index 82deeb4..7961820 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -2232,16 +2232,11 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
/// Get the stream of the asynchronous info structure or get a new one.
Error getStream(AsyncInfoWrapperTy &AsyncInfoWrapper,
AMDGPUStreamTy *&Stream) {
- // Get the stream (if any) from the async info.
- Stream = AsyncInfoWrapper.getQueueAs<AMDGPUStreamTy *>();
- if (!Stream) {
- // There was no stream; get an idle one.
- if (auto Err = AMDGPUStreamManager.getResource(Stream))
- return Err;
-
- // Modify the async info's stream.
- AsyncInfoWrapper.setQueueAs<AMDGPUStreamTy *>(Stream);
- }
+ auto WrapperStream =
+ AsyncInfoWrapper.getOrInitQueue<AMDGPUStreamTy *>(AMDGPUStreamManager);
+ if (!WrapperStream)
+ return WrapperStream.takeError();
+ Stream = *WrapperStream;
return Plugin::success();
}
@@ -2296,7 +2291,8 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
}
/// Synchronize current thread with the pending operations on the async info.
- Error synchronizeImpl(__tgt_async_info &AsyncInfo) override {
+ Error synchronizeImpl(__tgt_async_info &AsyncInfo,
+ bool ReleaseQueue) override {
AMDGPUStreamTy *Stream =
reinterpret_cast<AMDGPUStreamTy *>(AsyncInfo.Queue);
assert(Stream && "Invalid stream");
@@ -2307,8 +2303,11 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
// Once the stream is synchronized, return it to stream pool and reset
// AsyncInfo. This is to make sure the synchronization only works for its
// own tasks.
- AsyncInfo.Queue = nullptr;
- return AMDGPUStreamManager.returnResource(Stream);
+ if (ReleaseQueue) {
+ AsyncInfo.Queue = nullptr;
+ return AMDGPUStreamManager.returnResource(Stream);
+ }
+ return Plugin::success();
}
/// Query for the completion of the pending operations on the async info.
@@ -2591,6 +2590,17 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
return Event->wait(*Stream);
}
+ Expected<bool> hasPendingWorkImpl(AsyncInfoWrapperTy &AsyncInfo) override {
+ auto Stream = AsyncInfo.getQueueAs<AMDGPUStreamTy *>();
+ if (!Stream)
+ return false;
+
+ auto Query = Stream->query();
+ if (Query)
+ return !*Query;
+ return Query.takeError();
+ }
+
/// Synchronize the current thread with the event.
Error syncEventImpl(void *EventPtr) override {
AMDGPUEventTy *Event = reinterpret_cast<AMDGPUEventTy *>(EventPtr);
diff --git a/offload/plugins-nextgen/common/include/PluginInterface.h b/offload/plugins-nextgen/common/include/PluginInterface.h
index 4b7d410..c9ab34b 100644
--- a/offload/plugins-nextgen/common/include/PluginInterface.h
+++ b/offload/plugins-nextgen/common/include/PluginInterface.h
@@ -60,6 +60,7 @@ struct GenericPluginTy;
struct GenericKernelTy;
struct GenericDeviceTy;
struct RecordReplayTy;
+template <typename ResourceRef> class GenericDeviceResourceManagerTy;
namespace Plugin {
/// Create a success error. This is the same as calling Error::success(), but
@@ -127,6 +128,20 @@ struct AsyncInfoWrapperTy {
AsyncInfoPtr->Queue = Queue;
}
+ /// Get the queue, using the provided resource manager to initialise it if it
+ /// doesn't exist.
+ template <typename Ty, typename RMTy>
+ Expected<Ty>
+ getOrInitQueue(GenericDeviceResourceManagerTy<RMTy> &ResourceManager) {
+ std::lock_guard<std::mutex> Lock(AsyncInfoPtr->Mutex);
+ if (!AsyncInfoPtr->Queue) {
+ if (auto Err = ResourceManager.getResource(
+ *reinterpret_cast<Ty *>(&AsyncInfoPtr->Queue)))
+ return Err;
+ }
+ return getQueueAs<Ty>();
+ }
+
/// Synchronize with the __tgt_async_info's pending operations if it's the
/// internal async info. The error associated to the asynchronous operations
/// issued in this queue must be provided in \p Err. This function will update
@@ -138,6 +153,7 @@ struct AsyncInfoWrapperTy {
/// Register \p Ptr as an associated allocation that is freed after
/// finalization.
void freeAllocationAfterSynchronization(void *Ptr) {
+ std::lock_guard<std::mutex> AllocationGuard(AsyncInfoPtr->Mutex);
AsyncInfoPtr->AssociatedAllocations.push_back(Ptr);
}
@@ -827,9 +843,12 @@ struct GenericDeviceTy : public DeviceAllocatorTy {
Error setupRPCServer(GenericPluginTy &Plugin, DeviceImageTy &Image);
/// Synchronize the current thread with the pending operations on the
- /// __tgt_async_info structure.
- Error synchronize(__tgt_async_info *AsyncInfo);
- virtual Error synchronizeImpl(__tgt_async_info &AsyncInfo) = 0;
+ /// __tgt_async_info structure. If ReleaseQueue is false, then the
+ // underlying queue will not be released. In this case, additional
+ // work may be submitted to the queue whilst a synchronize is running.
+ Error synchronize(__tgt_async_info *AsyncInfo, bool ReleaseQueue = true);
+ virtual Error synchronizeImpl(__tgt_async_info &AsyncInfo,
+ bool ReleaseQueue) = 0;
/// Invokes any global constructors on the device if present and is required
/// by the target.
@@ -973,6 +992,14 @@ struct GenericDeviceTy : public DeviceAllocatorTy {
Error printInfo();
virtual Expected<InfoTreeNode> obtainInfoImpl() = 0;
+ /// Return true if the device has work that is either queued or currently
+ /// running
+ ///
+ /// Devices which cannot report this information should always return true
+ Expected<bool> hasPendingWork(__tgt_async_info *AsyncInfo);
+ virtual Expected<bool>
+ hasPendingWorkImpl(AsyncInfoWrapperTy &AsyncInfoWrapper) = 0;
+
/// Getters of the grid values.
uint32_t getWarpSize() const { return GridValues.GV_Warp_Size; }
uint32_t getThreadLimit() const { return GridValues.GV_Max_WG_Size; }
diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp
index 0a57046..083d416 100644
--- a/offload/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp
@@ -1335,18 +1335,25 @@ Error PinnedAllocationMapTy::unlockUnmappedHostBuffer(void *HstPtr) {
return eraseEntry(*Entry);
}
-Error GenericDeviceTy::synchronize(__tgt_async_info *AsyncInfo) {
- if (!AsyncInfo || !AsyncInfo->Queue)
- return Plugin::error(ErrorCode::INVALID_ARGUMENT,
- "invalid async info queue");
+Error GenericDeviceTy::synchronize(__tgt_async_info *AsyncInfo,
+ bool ReleaseQueue) {
+ SmallVector<void *> AllocsToDelete{};
+ {
+ std::lock_guard<std::mutex> AllocationGuard{AsyncInfo->Mutex};
- if (auto Err = synchronizeImpl(*AsyncInfo))
- return Err;
+ if (!AsyncInfo || !AsyncInfo->Queue)
+ return Plugin::error(ErrorCode::INVALID_ARGUMENT,
+ "invalid async info queue");
+
+ if (auto Err = synchronizeImpl(*AsyncInfo, ReleaseQueue))
+ return Err;
+
+ std::swap(AllocsToDelete, AsyncInfo->AssociatedAllocations);
+ }
- for (auto *Ptr : AsyncInfo->AssociatedAllocations)
+ for (auto *Ptr : AllocsToDelete)
if (auto Err = dataDelete(Ptr, TargetAllocTy::TARGET_ALLOC_DEVICE))
return Err;
- AsyncInfo->AssociatedAllocations.clear();
return Plugin::success();
}
@@ -1626,6 +1633,21 @@ Error GenericDeviceTy::waitEvent(void *EventPtr, __tgt_async_info *AsyncInfo) {
return Err;
}
+Expected<bool> GenericDeviceTy::hasPendingWork(__tgt_async_info *AsyncInfo) {
+ AsyncInfoWrapperTy AsyncInfoWrapper(*this, AsyncInfo);
+ auto Res = hasPendingWorkImpl(AsyncInfoWrapper);
+ if (auto Err = Res.takeError()) {
+ AsyncInfoWrapper.finalize(Err);
+ return Err;
+ }
+
+ auto Err = Plugin::success();
+ AsyncInfoWrapper.finalize(Err);
+ if (Err)
+ return Err;
+ return Res;
+}
+
Error GenericDeviceTy::syncEvent(void *EventPtr) {
return syncEventImpl(EventPtr);
}
diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp
index c5f3167..e94f3f6 100644
--- a/offload/plugins-nextgen/cuda/src/rtl.cpp
+++ b/offload/plugins-nextgen/cuda/src/rtl.cpp
@@ -522,16 +522,11 @@ struct CUDADeviceTy : public GenericDeviceTy {
/// Get the stream of the asynchronous info structure or get a new one.
Error getStream(AsyncInfoWrapperTy &AsyncInfoWrapper, CUstream &Stream) {
- // Get the stream (if any) from the async info.
- Stream = AsyncInfoWrapper.getQueueAs<CUstream>();
- if (!Stream) {
- // There was no stream; get an idle one.
- if (auto Err = CUDAStreamManager.getResource(Stream))
- return Err;
-
- // Modify the async info's stream.
- AsyncInfoWrapper.setQueueAs<CUstream>(Stream);
- }
+ auto WrapperStream =
+ AsyncInfoWrapper.getOrInitQueue<CUstream>(CUDAStreamManager);
+ if (!WrapperStream)
+ return WrapperStream.takeError();
+ Stream = *WrapperStream;
return Plugin::success();
}
@@ -642,17 +637,20 @@ struct CUDADeviceTy : public GenericDeviceTy {
}
/// Synchronize current thread with the pending operations on the async info.
- Error synchronizeImpl(__tgt_async_info &AsyncInfo) override {
+ Error synchronizeImpl(__tgt_async_info &AsyncInfo,
+ bool ReleaseQueue) override {
CUstream Stream = reinterpret_cast<CUstream>(AsyncInfo.Queue);
CUresult Res;
Res = cuStreamSynchronize(Stream);
- // Once the stream is synchronized, return it to stream pool and reset
- // AsyncInfo. This is to make sure the synchronization only works for its
- // own tasks.
- AsyncInfo.Queue = nullptr;
- if (auto Err = CUDAStreamManager.returnResource(Stream))
- return Err;
+ // Once the stream is synchronized and we want to release the queue, return
+ // it to stream pool and reset AsyncInfo. This is to make sure the
+ // synchronization only works for its own tasks.
+ if (ReleaseQueue) {
+ AsyncInfo.Queue = nullptr;
+ if (auto Err = CUDAStreamManager.returnResource(Stream))
+ return Err;
+ }
return Plugin::check(Res, "error in cuStreamSynchronize: %s");
}
@@ -916,6 +914,11 @@ struct CUDADeviceTy : public GenericDeviceTy {
return Plugin::check(Res, "error in cuStreamWaitEvent: %s");
}
+ // TODO: This should be implementable on CUDA
+ Expected<bool> hasPendingWorkImpl(AsyncInfoWrapperTy &AsyncInfo) override {
+ return true;
+ }
+
/// Synchronize the current thread with the event.
Error syncEventImpl(void *EventPtr) override {
CUevent Event = reinterpret_cast<CUevent>(EventPtr);
@@ -1314,9 +1317,10 @@ Error CUDAKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
if (MaxDynCGroupMem >= MaxDynCGroupMemLimit) {
CUresult AttrResult = cuFuncSetAttribute(
Func, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, MaxDynCGroupMem);
- return Plugin::check(
- AttrResult,
- "Error in cuLaunchKernel while setting the memory limits: %s");
+ if (auto Err = Plugin::check(
+ AttrResult,
+ "Error in cuLaunchKernel while setting the memory limits: %s"))
+ return Err;
MaxDynCGroupMemLimit = MaxDynCGroupMem;
}
diff --git a/offload/plugins-nextgen/host/src/rtl.cpp b/offload/plugins-nextgen/host/src/rtl.cpp
index d950572..ed52135 100644
--- a/offload/plugins-nextgen/host/src/rtl.cpp
+++ b/offload/plugins-nextgen/host/src/rtl.cpp
@@ -297,7 +297,8 @@ struct GenELF64DeviceTy : public GenericDeviceTy {
/// All functions are already synchronous. No need to do anything on this
/// synchronization function.
- Error synchronizeImpl(__tgt_async_info &AsyncInfo) override {
+ Error synchronizeImpl(__tgt_async_info &AsyncInfo,
+ bool ReleaseQueue) override {
return Plugin::success();
}
@@ -333,6 +334,9 @@ struct GenELF64DeviceTy : public GenericDeviceTy {
AsyncInfoWrapperTy &AsyncInfoWrapper) override {
return Plugin::success();
}
+ Expected<bool> hasPendingWorkImpl(AsyncInfoWrapperTy &AsyncInfo) override {
+ return true;
+ }
Error syncEventImpl(void *EventPtr) override { return Plugin::success(); }
/// Print information about the device.
diff --git a/offload/unittests/CMakeLists.txt b/offload/unittests/CMakeLists.txt
index 726d3ee..308849a 100644
--- a/offload/unittests/CMakeLists.txt
+++ b/offload/unittests/CMakeLists.txt
@@ -18,6 +18,9 @@ endif ()
set(OFFLOAD_UNITTESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR})
function(add_offload_test_device_code test_filename test_name)
+ cmake_parse_arguments(
+ "OFFLOAD_TESTS" "WITH_DEVICE_MATH_LIBS" "" "" ${ARGN})
+
set(SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/${test_filename})
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
@@ -37,13 +40,25 @@ function(add_offload_test_device_code test_filename test_name)
endif()
if(nvptx_arch AND CUDAToolkit_FOUND)
+ set(nvptx_compile_flags ${OFFLOAD_TESTS_UNPARSED_ARGUMENTS})
+
+ if(OFFLOAD_TESTS_WITH_DEVICE_MATH_LIBS)
+ file(GLOB libdevice_paths "${CUDAToolkit_LIBRARY_ROOT}/nvvm/libdevice/libdevice.*.bc")
+ if(libdevice_paths)
+ list(GET libdevice_paths 0 libdevice_path)
+ list(APPEND nvptx_compile_flags "-Xclang" "-mlink-builtin-bitcode")
+ list(APPEND nvptx_compile_flags "-Xclang" "${libdevice_path}")
+ list(APPEND nvptx_compile_flags "-DCUDA_MATH_FOUND=1")
+ endif()
+ endif()
+
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/${test_name}.nvptx64.bin")
add_custom_command(
OUTPUT ${output_file}
COMMAND ${CMAKE_CXX_COMPILER}
-I${OFFLOAD_UNITTESTS_DIR}
--target=nvptx64-nvidia-cuda -march=${nvptx_arch}
- -nogpulib --cuda-path=${cuda_path} -flto ${ARGN}
+ -nogpulib --cuda-path=${cuda_path} -flto ${nvptx_compile_flags}
${SRC_PATH} -o ${output_file}
DEPENDS ${SRC_PATH}
)
@@ -62,13 +77,25 @@ function(add_offload_test_device_code test_filename test_name)
endif()
if(amdgpu_arch)
+ set(amdgpu_compile_flags ${OFFLOAD_TESTS_UNPARSED_ARGUMENTS})
+
+ if(OFFLOAD_TESTS_WITH_DEVICE_MATH_LIBS)
+ find_package(AMDDeviceLibs QUIET HINTS ${CMAKE_INSTALL_PREFIX} PATHS /opt/rocm)
+ if(AMDDeviceLibs_FOUND)
+ get_target_property(ocml_path ocml IMPORTED_LOCATION)
+ list(APPEND amdgpu_compile_flags "-Xclang" "-mlink-builtin-bitcode")
+ list(APPEND amdgpu_compile_flags "-Xclang" "${ocml_path}")
+ list(APPEND amdgpu_compile_flags "-DHIP_MATH_FOUND=1")
+ endif()
+ endif()
+
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/${test_name}.amdgpu.bin")
add_custom_command(
OUTPUT ${output_file}
COMMAND ${CMAKE_CXX_COMPILER}
-I${OFFLOAD_UNITTESTS_DIR}
--target=amdgcn-amd-amdhsa -mcpu=${amdgpu_arch}
- -nogpulib -flto ${ARGN} ${SRC_PATH} -o ${output_file}
+ -nogpulib -flto ${amdgpu_compile_flags} ${SRC_PATH} -o ${output_file}
DEPENDS ${SRC_PATH}
)
add_custom_target(${test_name}.amdgpu DEPENDS ${output_file})
@@ -104,11 +131,6 @@ function(add_conformance_test test_name)
list(TRANSFORM ARGN PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/" OUTPUT_VARIABLE files)
- if(NOT TARGET libc)
- message(WARNING "Cannot run conformance tests without the LLVM C library")
- return()
- endif()
-
add_executable(${target_name} ${files})
add_dependencies(${target_name} conformance_device_binaries)
target_compile_definitions(${target_name}
diff --git a/offload/unittests/Conformance/device_code/CMakeLists.txt b/offload/unittests/Conformance/device_code/CMakeLists.txt
index 789dd16..a0c5369 100644
--- a/offload/unittests/Conformance/device_code/CMakeLists.txt
+++ b/offload/unittests/Conformance/device_code/CMakeLists.txt
@@ -1,4 +1,10 @@
+add_offload_test_device_code(CUDAMath.cpp cuda-math WITH_DEVICE_MATH_LIBS -O3 -stdlib -fno-builtin)
+add_offload_test_device_code(HIPMath.cpp hip-math WITH_DEVICE_MATH_LIBS -O3 -stdlib -fno-builtin)
add_offload_test_device_code(LLVMLibm.cpp llvm-libm -O3 -stdlib -fno-builtin)
-add_custom_target(conformance_device_binaries DEPENDS llvm-libm.bin)
+add_custom_target(conformance_device_binaries DEPENDS
+ cuda-math.bin
+ hip-math.bin
+ llvm-libm.bin
+)
set(OFFLOAD_CONFORMANCE_DEVICE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE)
diff --git a/offload/unittests/Conformance/device_code/CUDAMath.cpp b/offload/unittests/Conformance/device_code/CUDAMath.cpp
new file mode 100644
index 0000000..a351e92
--- /dev/null
+++ b/offload/unittests/Conformance/device_code/CUDAMath.cpp
@@ -0,0 +1,178 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the implementation of the device kernels that wrap the
+/// math functions from the cuda-math provider.
+///
+//===----------------------------------------------------------------------===//
+
+#ifdef CUDA_MATH_FOUND
+
+#include "Conformance/device_code/DeviceAPIs.hpp"
+#include "Conformance/device_code/KernelRunner.hpp"
+
+#include <gpuintrin.h>
+#include <stddef.h>
+
+using namespace kernels;
+
+//===----------------------------------------------------------------------===//
+// Helpers
+//===----------------------------------------------------------------------===//
+
+static inline float sincosfSin(float X) {
+ float SinX, CosX;
+ __nv_sincosf(X, &SinX, &CosX);
+ return SinX;
+}
+
+static inline float sincosfCos(float X) {
+ float SinX, CosX;
+ __nv_sincosf(X, &SinX, &CosX);
+ return CosX;
+}
+
+//===----------------------------------------------------------------------===//
+// Kernels
+//===----------------------------------------------------------------------===//
+
+extern "C" {
+
+__gpu_kernel void acosfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_acosf>(NumElements, Out, X);
+}
+
+__gpu_kernel void acoshfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_acoshf>(NumElements, Out, X);
+}
+
+__gpu_kernel void asinfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_asinf>(NumElements, Out, X);
+}
+
+__gpu_kernel void asinhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_asinhf>(NumElements, Out, X);
+}
+
+__gpu_kernel void atanfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_atanf>(NumElements, Out, X);
+}
+
+__gpu_kernel void atanhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_atanhf>(NumElements, Out, X);
+}
+
+__gpu_kernel void cbrtfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_cbrtf>(NumElements, Out, X);
+}
+
+__gpu_kernel void cosfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_cosf>(NumElements, Out, X);
+}
+
+__gpu_kernel void coshfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_coshf>(NumElements, Out, X);
+}
+
+__gpu_kernel void cospifKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_cospif>(NumElements, Out, X);
+}
+
+__gpu_kernel void erffKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_erff>(NumElements, Out, X);
+}
+
+__gpu_kernel void expfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_expf>(NumElements, Out, X);
+}
+
+__gpu_kernel void exp10fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_exp10f>(NumElements, Out, X);
+}
+
+__gpu_kernel void exp2fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_exp2f>(NumElements, Out, X);
+}
+
+__gpu_kernel void expm1fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_expm1f>(NumElements, Out, X);
+}
+
+__gpu_kernel void logfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_logf>(NumElements, Out, X);
+}
+
+__gpu_kernel void log10fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_log10f>(NumElements, Out, X);
+}
+
+__gpu_kernel void log1pfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_log1pf>(NumElements, Out, X);
+}
+
+__gpu_kernel void log2fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_log2f>(NumElements, Out, X);
+}
+
+__gpu_kernel void sinfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_sinf>(NumElements, Out, X);
+}
+
+__gpu_kernel void sincosfSinKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<sincosfSin>(NumElements, Out, X);
+}
+
+__gpu_kernel void sincosfCosKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<sincosfCos>(NumElements, Out, X);
+}
+
+__gpu_kernel void sinhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_sinhf>(NumElements, Out, X);
+}
+
+__gpu_kernel void sinpifKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_sinpif>(NumElements, Out, X);
+}
+
+__gpu_kernel void tanfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_tanf>(NumElements, Out, X);
+}
+
+__gpu_kernel void tanhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__nv_tanhf>(NumElements, Out, X);
+}
+} // extern "C"
+
+#endif // CUDA_MATH_FOUND
diff --git a/offload/unittests/Conformance/device_code/DeviceAPIs.hpp b/offload/unittests/Conformance/device_code/DeviceAPIs.hpp
new file mode 100644
index 0000000..8476dcb
--- /dev/null
+++ b/offload/unittests/Conformance/device_code/DeviceAPIs.hpp
@@ -0,0 +1,113 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains platform-specific definitions and forward declarations
+/// for device-side APIs used by the kernels.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef CONFORMANCE_DEVICE_CODE_DEVICEAPIS_HPP
+#define CONFORMANCE_DEVICE_CODE_DEVICEAPIS_HPP
+
+#include <stdint.h>
+
+typedef _Float16 float16;
+
+#ifdef __AMDGPU__
+
+// The ROCm device library uses control globals to alter codegen for the
+// different targets. To avoid needing to link them in manually, we simply
+// define them here.
+extern "C" {
+extern const inline uint8_t __oclc_unsafe_math_opt = 0;
+extern const inline uint8_t __oclc_daz_opt = 0;
+extern const inline uint8_t __oclc_correctly_rounded_sqrt32 = 1;
+extern const inline uint8_t __oclc_finite_only_opt = 0;
+extern const inline uint32_t __oclc_ISA_version = 9000;
+}
+
+// These aliases cause Clang to emit the control constants with ODR linkage.
+// This allows us to link against the symbols without preventing them from being
+// optimized out or causing symbol collisions.
+[[gnu::alias("__oclc_unsafe_math_opt")]] const uint8_t __oclc_unsafe_math_opt__;
+[[gnu::alias("__oclc_daz_opt")]] const uint8_t __oclc_daz_opt__;
+[[gnu::alias("__oclc_correctly_rounded_sqrt32")]] const uint8_t
+ __oclc_correctly_rounded_sqrt32__;
+[[gnu::alias("__oclc_finite_only_opt")]] const uint8_t __oclc_finite_only_opt__;
+[[gnu::alias("__oclc_ISA_version")]] const uint32_t __oclc_ISA_version__;
+
+#endif // __AMDGPU__
+
+#ifdef CUDA_MATH_FOUND
+
+extern "C" {
+
+float __nv_acosf(float);
+float __nv_acoshf(float);
+float __nv_asinf(float);
+float __nv_asinhf(float);
+float __nv_atanf(float);
+float __nv_atanhf(float);
+float __nv_cbrtf(float);
+float __nv_cosf(float);
+float __nv_coshf(float);
+float __nv_cospif(float);
+float __nv_erff(float);
+float __nv_expf(float);
+float __nv_exp10f(float);
+float __nv_exp2f(float);
+float __nv_expm1f(float);
+float __nv_logf(float);
+float __nv_log10f(float);
+float __nv_log1pf(float);
+float __nv_log2f(float);
+float __nv_sinf(float);
+void __nv_sincosf(float, float *, float *);
+float __nv_sinhf(float);
+float __nv_sinpif(float);
+float __nv_tanf(float);
+float __nv_tanhf(float);
+} // extern "C"
+
+#endif // CUDA_MATH_FOUND
+
+#ifdef HIP_MATH_FOUND
+
+extern "C" {
+
+float __ocml_acos_f32(float);
+float __ocml_acosh_f32(float);
+float __ocml_asin_f32(float);
+float __ocml_asinh_f32(float);
+float __ocml_atan_f32(float);
+float __ocml_atanh_f32(float);
+float __ocml_cbrt_f32(float);
+float __ocml_cos_f32(float);
+float __ocml_cosh_f32(float);
+float __ocml_cospi_f32(float);
+float __ocml_erf_f32(float);
+float __ocml_exp_f32(float);
+float __ocml_exp10_f32(float);
+float __ocml_exp2_f32(float);
+float __ocml_expm1_f32(float);
+float __ocml_log_f32(float);
+float __ocml_log10_f32(float);
+float __ocml_log1p_f32(float);
+float __ocml_log2_f32(float);
+float __ocml_sin_f32(float);
+float __ocml_sincos_f32(float, float *);
+float __ocml_sinh_f32(float);
+float __ocml_sinpi_f32(float);
+float __ocml_tan_f32(float);
+float __ocml_tanh_f32(float);
+} // extern "C"
+
+#endif // HIP_MATH_FOUND
+
+#endif // CONFORMANCE_DEVICE_CODE_DEVICEAPIS_HPP
diff --git a/offload/unittests/Conformance/device_code/HIPMath.cpp b/offload/unittests/Conformance/device_code/HIPMath.cpp
new file mode 100644
index 0000000..36efe6b
--- /dev/null
+++ b/offload/unittests/Conformance/device_code/HIPMath.cpp
@@ -0,0 +1,178 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the implementation of the device kernels that wrap the
+/// math functions from the hip-math provider.
+///
+//===----------------------------------------------------------------------===//
+
+#ifdef HIP_MATH_FOUND
+
+#include "Conformance/device_code/DeviceAPIs.hpp"
+#include "Conformance/device_code/KernelRunner.hpp"
+
+#include <gpuintrin.h>
+#include <stddef.h>
+
+using namespace kernels;
+
+//===----------------------------------------------------------------------===//
+// Helpers
+//===----------------------------------------------------------------------===//
+
+static inline float sincosfSin(float X) {
+ float CosX;
+ float SinX = __ocml_sincos_f32(X, &CosX);
+ return SinX;
+}
+
+static inline float sincosfCos(float X) {
+ float CosX;
+ float SinX = __ocml_sincos_f32(X, &CosX);
+ return CosX;
+}
+
+//===----------------------------------------------------------------------===//
+// Kernels
+//===----------------------------------------------------------------------===//
+
+extern "C" {
+
+__gpu_kernel void acosfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_acos_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void acoshfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_acosh_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void asinfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_asin_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void asinhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_asinh_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void atanfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_atan_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void atanhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_atanh_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void cbrtfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_cbrt_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void cosfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_cos_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void coshfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_cosh_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void cospifKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_cospi_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void erffKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_erf_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void expfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_exp_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void exp10fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_exp10_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void exp2fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_exp2_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void expm1fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_expm1_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void logfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_log_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void log10fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_log10_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void log1pfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_log1p_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void log2fKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_log2_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void sinfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_sin_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void sincosfSinKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<sincosfSin>(NumElements, Out, X);
+}
+
+__gpu_kernel void sincosfCosKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<sincosfCos>(NumElements, Out, X);
+}
+
+__gpu_kernel void sinhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_sinh_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void sinpifKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_sinpi_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void tanfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_tan_f32>(NumElements, Out, X);
+}
+
+__gpu_kernel void tanhfKernel(const float *X, float *Out,
+ size_t NumElements) noexcept {
+ runKernelBody<__ocml_tanh_f32>(NumElements, Out, X);
+}
+} // extern "C"
+
+#endif // HIP_MATH_FOUND
diff --git a/offload/unittests/Conformance/device_code/Common.hpp b/offload/unittests/Conformance/device_code/KernelRunner.hpp
index bcf3ac6..e64a62f 100644
--- a/offload/unittests/Conformance/device_code/Common.hpp
+++ b/offload/unittests/Conformance/device_code/KernelRunner.hpp
@@ -7,21 +7,19 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// This file contains common utilities for defining device kernel wrappers to
-/// math functions.
+/// This file contains the definition of the runKernelBody, a template helper
+/// that executes the per-thread logic of a math function's kernel wrapper.
///
//===----------------------------------------------------------------------===//
-#ifndef CONFORMANCE_DEVICE_CODE_COMMON_HPP
-#define CONFORMANCE_DEVICE_CODE_COMMON_HPP
+#ifndef CONFORMANCE_DEVICE_CODE_KERNELRUNNER_HPP
+#define CONFORMANCE_DEVICE_CODE_KERNELRUNNER_HPP
#include <gpuintrin.h>
#include <stddef.h>
#include <stdint.h>
-namespace common {
-
-typedef _Float16 float16;
+namespace kernels {
template <auto Func, typename OutType, typename... InTypes>
void runKernelBody(size_t NumElements, OutType *Out, const InTypes *...Ins) {
@@ -32,6 +30,6 @@ void runKernelBody(size_t NumElements, OutType *Out, const InTypes *...Ins) {
Out[Index] = Func(Ins[Index]...);
}
}
-} // namespace common
+} // namespace kernels
-#endif // CONFORMANCE_DEVICE_CODE_COMMON_HPP
+#endif // CONFORMANCE_DEVICE_CODE_KERNELRUNNER_HPP
diff --git a/offload/unittests/Conformance/device_code/LLVMLibm.cpp b/offload/unittests/Conformance/device_code/LLVMLibm.cpp
index f137ba3..8869d87 100644
--- a/offload/unittests/Conformance/device_code/LLVMLibm.cpp
+++ b/offload/unittests/Conformance/device_code/LLVMLibm.cpp
@@ -12,13 +12,14 @@
///
//===----------------------------------------------------------------------===//
-#include "Conformance/device_code/Common.hpp"
+#include "Conformance/device_code/DeviceAPIs.hpp"
+#include "Conformance/device_code/KernelRunner.hpp"
#include <gpuintrin.h>
#include <math.h>
#include <stddef.h>
-using namespace common;
+using namespace kernels;
//===----------------------------------------------------------------------===//
// Helpers
diff --git a/offload/unittests/Conformance/include/mathtest/TestRunner.hpp b/offload/unittests/Conformance/include/mathtest/TestRunner.hpp
index f89d151..ab17f1d 100644
--- a/offload/unittests/Conformance/include/mathtest/TestRunner.hpp
+++ b/offload/unittests/Conformance/include/mathtest/TestRunner.hpp
@@ -41,11 +41,11 @@ void printPreamble(const TestConfig &Config, size_t Index,
size_t Total) noexcept {
using FunctionConfig = FunctionConfig<Func>;
- llvm::outs() << "[" << (Index + 1) << "/" << Total << "] "
+ llvm::errs() << "[" << (Index + 1) << "/" << Total << "] "
<< "Running conformance test '" << FunctionConfig::Name
<< "' with '" << Config.Provider << "' on '" << Config.Platform
<< "'\n";
- llvm::outs().flush();
+ llvm::errs().flush();
}
template <typename T>
diff --git a/offload/unittests/Conformance/tests/CMakeLists.txt b/offload/unittests/Conformance/tests/CMakeLists.txt
index 5c0c3ba..8c0109ba 100644
--- a/offload/unittests/Conformance/tests/CMakeLists.txt
+++ b/offload/unittests/Conformance/tests/CMakeLists.txt
@@ -1,3 +1,8 @@
+if(NOT TARGET libc)
+ message(WARNING "Cannot run conformance tests without the LLVM C library")
+ return()
+endif()
+
add_conformance_test(acosf AcosfTest.cpp)
add_conformance_test(acoshf AcoshfTest.cpp)
add_conformance_test(asinf AsinfTest.cpp)
diff --git a/offload/unittests/OffloadAPI/common/Fixtures.hpp b/offload/unittests/OffloadAPI/common/Fixtures.hpp
index 24ce1a0..43240fa 100644
--- a/offload/unittests/OffloadAPI/common/Fixtures.hpp
+++ b/offload/unittests/OffloadAPI/common/Fixtures.hpp
@@ -9,6 +9,7 @@
#include <OffloadAPI.h>
#include <OffloadPrint.hpp>
#include <gtest/gtest.h>
+#include <thread>
#include "Environment.hpp"
@@ -57,6 +58,23 @@ inline std::string SanitizeString(const std::string &Str) {
return NewStr;
}
+template <typename Fn> inline void threadify(Fn body) {
+ std::vector<std::thread> Threads;
+ for (size_t I = 0; I < 20; I++) {
+ Threads.emplace_back(
+ [&body](size_t I) {
+ std::string ScopeMsg{"Thread #"};
+ ScopeMsg.append(std::to_string(I));
+ SCOPED_TRACE(ScopeMsg);
+ body(I);
+ },
+ I);
+ }
+ for (auto &T : Threads) {
+ T.join();
+ }
+}
+
struct OffloadTest : ::testing::Test {
ol_device_handle_t Host = TestEnvironment::getHostDevice();
};
diff --git a/offload/unittests/OffloadAPI/kernel/olLaunchKernel.cpp b/offload/unittests/OffloadAPI/kernel/olLaunchKernel.cpp
index 758f607..1dac8c5 100644
--- a/offload/unittests/OffloadAPI/kernel/olLaunchKernel.cpp
+++ b/offload/unittests/OffloadAPI/kernel/olLaunchKernel.cpp
@@ -104,6 +104,29 @@ TEST_P(olLaunchKernelFooTest, Success) {
ASSERT_SUCCESS(olMemFree(Mem));
}
+TEST_P(olLaunchKernelFooTest, SuccessThreaded) {
+ threadify([&](size_t) {
+ void *Mem;
+ ASSERT_SUCCESS(olMemAlloc(Device, OL_ALLOC_TYPE_MANAGED,
+ LaunchArgs.GroupSize.x * sizeof(uint32_t), &Mem));
+ struct {
+ void *Mem;
+ } Args{Mem};
+
+ ASSERT_SUCCESS(olLaunchKernel(Queue, Device, Kernel, &Args, sizeof(Args),
+ &LaunchArgs));
+
+ ASSERT_SUCCESS(olSyncQueue(Queue));
+
+ uint32_t *Data = (uint32_t *)Mem;
+ for (uint32_t i = 0; i < 64; i++) {
+ ASSERT_EQ(Data[i], i);
+ }
+
+ ASSERT_SUCCESS(olMemFree(Mem));
+ });
+}
+
TEST_P(olLaunchKernelNoArgsTest, Success) {
ASSERT_SUCCESS(
olLaunchKernel(Queue, Device, Kernel, nullptr, 0, &LaunchArgs));
diff --git a/offload/unittests/OffloadAPI/queue/olGetQueueInfo.cpp b/offload/unittests/OffloadAPI/queue/olGetQueueInfo.cpp
index f4fb752..2dccd33 100644
--- a/offload/unittests/OffloadAPI/queue/olGetQueueInfo.cpp
+++ b/offload/unittests/OffloadAPI/queue/olGetQueueInfo.cpp
@@ -20,6 +20,12 @@ TEST_P(olGetQueueInfoTest, SuccessDevice) {
ASSERT_EQ(Device, RetrievedDevice);
}
+TEST_P(olGetQueueInfoTest, SuccessEmpty) {
+ bool Empty;
+ ASSERT_SUCCESS(
+ olGetQueueInfo(Queue, OL_QUEUE_INFO_EMPTY, sizeof(Empty), &Empty));
+}
+
TEST_P(olGetQueueInfoTest, InvalidNullHandle) {
ol_device_handle_t RetrievedDevice;
ASSERT_ERROR(OL_ERRC_INVALID_NULL_HANDLE,
diff --git a/offload/unittests/OffloadAPI/queue/olGetQueueInfoSize.cpp b/offload/unittests/OffloadAPI/queue/olGetQueueInfoSize.cpp
index f1ad9fa..735dad6 100644
--- a/offload/unittests/OffloadAPI/queue/olGetQueueInfoSize.cpp
+++ b/offload/unittests/OffloadAPI/queue/olGetQueueInfoSize.cpp
@@ -19,6 +19,12 @@ TEST_P(olGetQueueInfoSizeTest, SuccessDevice) {
ASSERT_EQ(Size, sizeof(ol_device_handle_t));
}
+TEST_P(olGetQueueInfoSizeTest, SuccessEmpty) {
+ size_t Size = 0;
+ ASSERT_SUCCESS(olGetQueueInfoSize(Queue, OL_QUEUE_INFO_EMPTY, &Size));
+ ASSERT_EQ(Size, sizeof(bool));
+}
+
TEST_P(olGetQueueInfoSizeTest, InvalidNullHandle) {
size_t Size = 0;
ASSERT_ERROR(OL_ERRC_INVALID_NULL_HANDLE,
diff --git a/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel
index cd66c1f..518f723 100644
--- a/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/clang/unittests/BUILD.bazel
@@ -682,12 +682,13 @@ cc_test(
args = [
# TODO: some tests fail with: "JIT session error: Symbols not found:
# [ _ZnwmPv26__clang_Interpreter_NewTag, __clang_Interpreter_SetValueWithAlloc ]
- "--gtest_filter=-InterpreterTest.*",
+ "--gtest_filter=-InterpreterTest.*:InterpreterTestBase.*",
],
deps = [
"//clang:ast",
"//clang:basic",
"//clang:codegen",
+ "//clang:config",
"//clang:frontend",
"//clang:interpreter",
"//clang:lex",
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 6a9bd09..3bfa3b1 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -1969,41 +1969,6 @@ libc_support_library(
)
libc_support_library(
- name = "range_reduction_double",
- hdrs = [
- "src/math/generic/range_reduction_double_common.h",
- "src/math/generic/range_reduction_double_fma.h",
- "src/math/generic/range_reduction_double_nofma.h",
- ],
- deps = [
- ":__support_common",
- ":__support_fputil_double_double",
- ":__support_fputil_dyadic_float",
- ":__support_fputil_fp_bits",
- ":__support_fputil_multiply_add",
- ":__support_fputil_nearest_integer",
- ":__support_fputil_rounding_mode",
- ":__support_integer_literals",
- ],
-)
-
-libc_support_library(
- name = "sincos_eval",
- hdrs = [
- "src/math/generic/sincos_eval.h",
- ],
- deps = [
- ":__support_common",
- ":__support_fputil_double_double",
- ":__support_fputil_dyadic_float",
- ":__support_fputil_fp_bits",
- ":__support_fputil_multiply_add",
- ":__support_fputil_polyeval",
- ":__support_integer_literals",
- ],
-)
-
-libc_support_library(
name = "sincosf_utils",
hdrs = ["src/math/generic/sincosf_utils.h"],
deps = [
@@ -2414,6 +2379,31 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_math_cbrtf",
+ hdrs = ["src/__support/math/cbrtf.h"],
+ deps = [
+ ":__support_fputil_double_double",
+ ":__support_fputil_polyeval",
+ ":__support_fputil_fenv_impl",
+ ":__support_fputil_dyadic_float",
+ ":__support_integer_literals",
+ ],
+)
+
+libc_support_library(
+ name = "__support_math_cos",
+ hdrs = ["src/__support/math/cos.h"],
+ deps = [
+ ":__support_fputil_multiply_add",
+ ":__support_fputil_except_value_utils",
+ ":__support_macros_optimization",
+ ":__support_macros_properties_cpu_features",
+ ":__support_range_reduction_double",
+ ":__support_sincos_eval",
+ ],
+)
+
+libc_support_library(
name = "__support_math_erff",
hdrs = ["src/__support/math/erff.h"],
deps = [
@@ -2667,6 +2657,41 @@ libc_support_library(
],
)
+libc_support_library(
+ name = "__support_range_reduction_double",
+ hdrs = [
+ "src/__support/math/range_reduction_double_common.h",
+ "src/__support/math/range_reduction_double_fma.h",
+ "src/__support/math/range_reduction_double_nofma.h",
+ ],
+ deps = [
+ ":__support_common",
+ ":__support_fputil_double_double",
+ ":__support_fputil_dyadic_float",
+ ":__support_fputil_fp_bits",
+ ":__support_fputil_multiply_add",
+ ":__support_fputil_nearest_integer",
+ ":__support_fputil_rounding_mode",
+ ":__support_integer_literals",
+ ],
+)
+
+libc_support_library(
+ name = "__support_sincos_eval",
+ hdrs = [
+ "src/__support/math/sincos_eval.h",
+ ],
+ deps = [
+ ":__support_common",
+ ":__support_fputil_double_double",
+ ":__support_fputil_dyadic_float",
+ ":__support_fputil_fp_bits",
+ ":__support_fputil_multiply_add",
+ ":__support_fputil_polyeval",
+ ":__support_integer_literals",
+ ],
+)
+
############################### complex targets ################################
libc_function(
@@ -3089,7 +3114,7 @@ libc_math_function(
libc_math_function(
name = "cbrtf",
additional_deps = [
- ":__support_fputil_polyeval",
+ ":__support_math_cbrtf",
],
)
@@ -3116,11 +3141,7 @@ libc_math_function(name = "copysignf16")
libc_math_function(
name = "cos",
additional_deps = [
- ":__support_fputil_multiply_add",
- ":__support_macros_optimization",
- ":__support_macros_properties_cpu_features",
- ":range_reduction_double",
- ":sincos_eval",
+ ":__support_math_cos",
],
)
@@ -4240,8 +4261,8 @@ libc_math_function(
":__support_fputil_multiply_add",
":__support_macros_optimization",
":__support_macros_properties_cpu_features",
- ":range_reduction_double",
- ":sincos_eval",
+ ":__support_range_reduction_double",
+ ":__support_sincos_eval",
],
)
@@ -4273,8 +4294,8 @@ libc_math_function(
":__support_fputil_multiply_add",
":__support_macros_optimization",
":__support_macros_properties_cpu_features",
- ":range_reduction_double",
- ":sincos_eval",
+ ":__support_range_reduction_double",
+ ":__support_sincos_eval",
],
)
@@ -4368,8 +4389,8 @@ libc_math_function(
":__support_fputil_multiply_add",
":__support_macros_optimization",
":__support_macros_properties_cpu_features",
- ":range_reduction_double",
- ":sincos_eval",
+ ":__support_range_reduction_double",
+ ":__support_sincos_eval",
],
)
diff --git a/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel
index 1565153..24baaf1 100644
--- a/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel
@@ -22,11 +22,11 @@ libc_test_library(
"//libc:__support_macros_properties_types",
"//libc:__support_osutil_io",
"//libc:__support_uint128",
- "//libc:hdr_stdint_proxy",
"//libc:func_aligned_alloc",
"//libc:func_free",
"//libc:func_malloc",
"//libc:func_realloc",
+ "//libc:hdr_stdint_proxy",
],
)
@@ -65,12 +65,12 @@ libc_test_library(
"//libc:__support_macros_properties_types",
"//libc:__support_stringutil",
"//libc:__support_uint128",
- "//libc:hdr_stdint_proxy",
"//libc:errno",
"//libc:func_aligned_alloc",
"//libc:func_free",
"//libc:func_malloc",
"//libc:func_realloc",
+ "//libc:hdr_stdint_proxy",
"//libc:llvm_libc_macros_stdfix_macros",
"//llvm:Support",
],
@@ -107,6 +107,7 @@ libc_test_library(
],
deps = [
":LibcUnitTest",
+ ":errno_test_helpers",
":string_utils",
":test_logger",
"//libc:__support_cpp_array",
@@ -119,6 +120,7 @@ libc_test_library(
"//libc:__support_fputil_fp_bits",
"//libc:__support_fputil_fpbits_str",
"//libc:__support_fputil_rounding_mode",
+ "//libc:__support_libc_errno",
"//libc:__support_macros_config",
"//libc:__support_macros_properties_architectures",
"//libc:hdr_fenv_macros",
diff --git a/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
index ff93eb5..dc934a4 100644
--- a/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
@@ -77,6 +77,7 @@ libc_test_library(
"//libc:__support_cpp_string_view",
"//libc:__support_cpp_stringstream",
"//libc:__support_cpp_type_traits",
+ "//libc:__support_fputil_bfloat16",
"//libc:__support_fputil_cast",
"//libc:__support_fputil_fp_bits",
"//libc:__support_fputil_fpbits_str",
diff --git a/utils/bazel/llvm-project-overlay/llvm/config.bzl b/utils/bazel/llvm-project-overlay/llvm/config.bzl
index 2309175..3e9c032 100644
--- a/utils/bazel/llvm-project-overlay/llvm/config.bzl
+++ b/utils/bazel/llvm-project-overlay/llvm/config.bzl
@@ -113,7 +113,6 @@ llvm_config_defines = os_defines + builtin_thread_pointer + select({
"LLVM_VERSION_PATCH={}".format(LLVM_VERSION_PATCH),
r'LLVM_VERSION_STRING=\"{}\"'.format(PACKAGE_VERSION),
# Set globally in HandleLLVMOptions.cmake
- "EXPERIMENTAL_KEY_INSTRUCTIONS",
# These shouldn't be needed by the C++11 standard, but are for some
# platforms (e.g. glibc < 2.18. See
# https://sourceware.org/bugzilla/show_bug.cgi?id=15366). These are also
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 7689300..49694a2 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -3541,14 +3541,13 @@ cc_library(
":InferTypeOpInterface",
":Support",
":WasmSSAIncGen",
+ ":WasmSSAInterfacesIncGen",
":WasmSSAOpsIncGen",
":WasmSSATypesIncGen",
- ":WasmSSAInterfacesIncGen",
"//llvm:Support",
],
)
-
##---------------------------------------------------------------------------##
# XeGPU dialect.
##---------------------------------------------------------------------------##
@@ -3632,21 +3631,35 @@ gentbl_cc_library(
deps = [":XeGPUAttrTdFiles"],
)
+gentbl_cc_library(
+ name = "XeGPUAttrInterfaceIncGen",
+ tbl_outs = {
+ "include/mlir/Dialect/XeGPU/IR/XeGPUAttrInterface.h.inc": ["-gen-attr-interface-decls"],
+ "include/mlir/Dialect/XeGPU/IR/XeGPUAttrInterface.cpp.inc": ["-gen-attr-interface-defs"],
+ },
+ tblgen = ":mlir-tblgen",
+ td_file = "include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td",
+ deps = [":XeGPUAttrTdFiles"],
+)
+
cc_library(
name = "XeGPUDialect",
srcs = glob(["lib/Dialect/XeGPU/IR/*.cpp"]),
hdrs = glob(["include/mlir/Dialect/XeGPU/IR/*.h"]),
includes = ["include"],
deps = [
+ ":AffineUtils",
":ArithDialect",
":ArithUtils",
":BytecodeOpInterface",
":DialectUtils",
":IR",
+ ":IndexDialect",
":ShapedOpInterfaces",
":SideEffectInterfaces",
":VectorDialect",
":ViewLikeInterface",
+ ":XeGPUAttrInterfaceIncGen",
":XeGPUEnumsIncGen",
":XeGPUIncGen",
"//llvm:Support",
@@ -10683,6 +10696,7 @@ cc_library(
":AsmParser",
":BufferizationDialect",
":BufferizationTransforms",
+ ":CommonFolders",
":DialectUtils",
":FuncDialect",
":FunctionInterfaces",
@@ -10706,6 +10720,7 @@ cc_library(
":TransformDialectInterfaces",
":TransformDialectUtils",
":TransformUtils",
+ ":UBDialect",
":VectorDialect",
":VectorTransforms",
"//llvm:Support",
@@ -10818,6 +10833,7 @@ cc_library(
":TensorUtils",
":TilingInterface",
":TransformUtils",
+ ":UBDialect",
":ValueBoundsOpInterface",
":VectorDialect",
":VectorToSCF",
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 9fe42c6d..27b1dbb 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -382,6 +382,7 @@ cc_library(
"//mlir:BufferizationInterfaces",
"//mlir:BytecodeOpInterface",
"//mlir:CallOpInterfaces",
+ "//mlir:CommonFolders",
"//mlir:ControlFlowInterfaces",
"//mlir:CopyOpInterface",
"//mlir:DLTIDialect",
@@ -1203,11 +1204,13 @@ cc_library(
deps = [
"//mlir:GPUDialect",
"//mlir:IR",
+ "//mlir:IndexDialect",
"//mlir:MemRefDialect",
"//mlir:Pass",
"//mlir:TransformUtils",
"//mlir:VectorTransforms",
"//mlir:XeGPUDialect",
"//mlir:XeGPUTransforms",
+ "//mlir:XeGPUUtils",
],
)
diff --git a/utils/bazel/third_party_build/zlib-ng.BUILD b/utils/bazel/third_party_build/zlib-ng.BUILD
index 055261a..2a10720 100644
--- a/utils/bazel/third_party_build/zlib-ng.BUILD
+++ b/utils/bazel/third_party_build/zlib-ng.BUILD
@@ -2,7 +2,7 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
load("@bazel_skylib//rules:common_settings.bzl", "bool_flag")
-load("@bazel_skylib//rules:expand_template.bzl", "expand_template")
+load("@bazel_skylib//rules:copy_file.bzl", "copy_file")
package(
default_visibility = ["//visibility:public"],
@@ -20,12 +20,12 @@ config_setting(
flag_values = {":llvm_enable_zlib": "true"},
)
-genrule(
+copy_file(
# The input template is identical to the CMake output.
name = "zconf_gen",
- srcs = ["zconf.h.in"],
- outs = ["zconf.h"],
- cmd = "cp $(SRCS) $(OUTS)",
+ src = "zconf.h.in",
+ out = "zconf.h",
+ allow_symlink = True,
)
cc_library(